blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0a23df6de654d24c8cfd4468b5bb389cc25b8f20
|
3a93a50bf80668a6ede701534f1567c3653729b0
|
/Full-time-interview-preparation/Graph/redundant_connection.py
|
8b8b2bb52cc5b8bd3f503e7f63d43db58f4c181e
|
[] |
no_license
|
Tadele01/Competitive-Programming
|
c16778298b6c1b4c0b579aedd1b5f0d4106aceeb
|
125de2b4e23f78d2e9f0a8fde90463bed0aed70f
|
refs/heads/master
| 2023-09-01T06:00:09.068940
| 2021-09-13T18:04:30
| 2021-09-13T18:04:30
| 325,728,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 649
|
py
|
from typing import List
class Solution:
def findRedundantConnection(self, edges: List[List[int]]) -> List[int]:
self.parents = [-1 for _ in range(len(edges)+1)]
for u,v in edges:
if not self.union(u,v):
return [u,v]
def find(self, x):
while self.parents[x] > 0:
x = self.parents[x]
return x
def union(self, x, y):
x_parent, y_parent = self.find(x), self.find(y)
if x_parent == y_parent:
return False
else:
self.parents[x_parent] = y_parent
return True
|
[
"tadeleyednkachw@gmail.com"
] |
tadeleyednkachw@gmail.com
|
d22ab5a62f8a9bbd77dc270f6a368adcf4a6a639
|
9c16d6b984c9a22c219bd2a20a02db21a51ba8d7
|
/chrome/test/media_router/media_router_tests.gypi
|
c1211c49edbec838fba1408930357c7773b8918d
|
[
"BSD-3-Clause"
] |
permissive
|
nv-chromium/chromium-crosswalk
|
fc6cc201cb1d6a23d5f52ffd3a553c39acd59fa7
|
b21ec2ffe3a13b6a8283a002079ee63b60e1dbc5
|
refs/heads/nv-crosswalk-17
| 2022-08-25T01:23:53.343546
| 2019-01-16T21:35:23
| 2019-01-16T21:35:23
| 63,197,891
| 0
| 0
|
NOASSERTION
| 2019-01-16T21:38:06
| 2016-07-12T22:58:43
| null |
UTF-8
|
Python
| false
| false
| 1,115
|
gypi
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'media_router_integration_test_resources': [
'resources/basic_test.html',
'resources/common.js',
'resources/fail_create_route.html',
'resources/fail_create_route.json',
'resources/fail_join_session.html',
'resources/fail_join_session.json',
'resources/no_provider.html',
'resources/no_provider.json',
],
}, # end of variables
'targets': [
{
'target_name': 'media_router_integration_test_files',
'type': 'none',
'variables': {
'output_dir': '<(PRODUCT_DIR)/media_router/browser_test_resources',
'resource_files': [
'<@(media_router_integration_test_resources)',
]
},
'copies': [
{
'destination': '<(output_dir)',
'files': [
'<@(resource_files)',
],
},
],
}, # end of target 'media_router_integration_test_files'
], # end of targets
}
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
c49e277824f5d81797c03152796abe6a7b4fb545
|
d4cd7da93ef93b32ae30c6c96b0612ffca758c0b
|
/0x0F-python-object_relational_mapping/5-filter_cities.py
|
e1edee838b931fa9c1737f3e78038bf11adce3a0
|
[] |
no_license
|
jfangwang/holbertonschool-higher_level_programming
|
afde26b71104b1a0ecb6cb1c99736a5286a51f08
|
32f7396181fac7c7495def24af72346d6ba07249
|
refs/heads/master
| 2023-04-24T17:16:38.731772
| 2021-05-06T15:49:39
| 2021-05-06T15:49:39
| 319,357,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
#!/usr/bin/python3
""" Filter states """
if __name__ == "__main__":
import MySQLdb
import sys
argv = sys.argv
state_id = 0
index = 0
if len(argv) != 5:
print("USAGE: ./0-select_states.py username password\
database_name state_name")
exit()
try:
db = MySQLdb.connect(host="localhost", user=argv[1], charset="utf8",
passwd=argv[2], db=argv[3], port=3306)
except:
print()
# Check for injections
while argv[4][index].isalpha() and index < len(argv[4]) - 1:
index += 1
if argv[4][index].isalpha():
index += 1
argv[4] = argv[4][slice(index)]
cur = db.cursor()
cur.execute("SELECT cities.name FROM cities WHERE cities.state_id\
IN (SELECT states.id FROM states\
WHERE states.name = '{}')\
ORDER BY id ASC".format(argv[4]))
rows = cur.fetchall()
if len(rows) > 0:
for a in range(0, len(rows)):
if len(rows) - 1 > a:
print(rows[a][0], end=', ')
print(rows[a][0])
else:
print()
cur.close()
db.close()
|
[
"qbs1864@gmail.com"
] |
qbs1864@gmail.com
|
4855001625f98e9d580537235844adc6d370c7db
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2500/60621/258426.py
|
9373b12868d7d20dccd2542fe5e09e61dd730079
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
a=list(eval(input()))
b=[x for x in a]
c=[i+1 for i in range(len(a))]
d=[]
for i in range(len(a)):
if a==c:
break
index=a.index(len(a)-i)
if index==len(a)-i-1:
continue
else:
if index+1!=1:
d.append(index+1)
temp=[x for x in a[0:index+1]]
temp.reverse()
a[0:index+1]=temp
d.append(len(a)-i)
temp=[x for x in a[0:len(a)-i]]
temp.reverse()
a[0:len(a)-i]=temp
print(d)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
ee6d90c3bc03a44f8fa4910336872a00a09153e6
|
9519f459f8622ce209ba61f601df6b567b321f1a
|
/metrics/migrations/0001_initial.py
|
910358f99305fbaf34be89da0e5ba842ae1dc1c1
|
[
"MIT"
] |
permissive
|
compsoc-ssc/compsocssc
|
3b478b2a9542ac62e0b3a0c8e13b84f289a5eb40
|
b61d490077b6ddf4798ce9ac30ca60bc63923080
|
refs/heads/master
| 2020-04-05T22:57:38.698446
| 2017-08-04T09:25:21
| 2017-08-04T09:25:21
| 30,790,913
| 7
| 8
|
MIT
| 2017-12-29T10:43:21
| 2015-02-14T08:05:48
|
Python
|
UTF-8
|
Python
| false
| false
| 673
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Hit',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ip', models.GenericIPAddressField()),
('ua', models.CharField(max_length=200)),
('stamp', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
]
|
[
"arjoonn.94@gmail.com"
] |
arjoonn.94@gmail.com
|
bb1016bce463941647fcdb119e9254f4af8aff17
|
a867b1c9da10a93136550c767c45e0d8c98f5675
|
/D3_LC_74_Search_a_2D_Matrix.py
|
08d04637b85bca5f33d4a675c5a8fa58d6c01547
|
[] |
no_license
|
Omkar02/FAANG
|
f747aacc938bf747129b8ff35b6648fb265d95b6
|
ee9b245aa83ea58aa67954ab96442561dbe68d06
|
refs/heads/master
| 2023-03-25T19:45:08.153403
| 2021-03-28T07:13:08
| 2021-03-28T07:13:08
| 280,783,785
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,866
|
py
|
# import __main__ as main
# from Helper.TimerLogger import CodeTimeLogging
# fileName = main.__file__
# fileName = fileName.split('\\')[-1]
# CodeTimeLogging(Flag='F', filename=fileName, Tag='Matrix', Difficult='Medium')
"""Integers in each row are sorted from left to right."""
def searchMatrix(matrix, target):
if not matrix:
return False
r, c = 0, len(matrix[0]) - 1
cnt = 0
while r < len(matrix) and c >= 0:
print(matrix[r][c])
if target < matrix[r][c]:
c -= 1
if target > matrix[r][c]:
r += 1
else:
return True
if cnt == 10:
return False
# mat = [[1, 3, 5, 7],
# [10, 11, 16, 20],
# [23, 30, 34, 50]]
# target = 30
mat = [[-8, -6, -5, -4, -2, -1, -1, 0, 2, 4, 5, 7, 7, 7, 7, 9, 9, 9, 9, 11],
[12, 14, 15, 16, 18, 20, 20, 20, 21, 21, 22, 23, 23, 25, 25, 25, 26, 27, 29, 30],
[31, 31, 32, 32, 33, 35, 37, 39, 39, 39, 40, 41, 43, 44, 46, 48, 48, 48, 48, 50],
[52, 54, 55, 57, 57, 58, 58, 60, 62, 64, 65, 65, 65, 67, 69, 71, 71, 73, 74, 74],
[75, 76, 78, 78, 80, 82, 82, 82, 84, 85, 85, 87, 87, 89, 90, 90, 91, 93, 93, 94],
[96, 98, 100, 102, 104, 105, 107, 109, 111, 113, 113, 115, 115, 117, 119, 119, 120, 122, 122, 124],
[126, 127, 128, 130, 130, 130, 130, 132, 132, 133, 134, 136, 137, 138, 140, 141, 141, 143, 144, 146],
[148, 150, 151, 152, 154, 156, 157, 158, 159, 161, 161, 162, 162, 164, 164, 165, 167, 168, 169, 169],
[171, 173, 173, 175, 176, 178, 179, 181, 182, 183, 184, 184, 184, 185, 186, 186, 186, 186, 187, 189],
[190, 192, 192, 193, 195, 196, 197, 197, 198, 198, 198, 198, 198, 199, 201, 203, 204, 206, 208, 208],
[209, 210, 211, 212, 212, 212, 214, 214, 216, 217, 218, 218, 219, 221, 222, 224, 225, 227, 229, 229],
[230, 230, 230, 231, 233, 233, 234, 235, 237, 237, 238, 238, 240, 240, 242, 242, 244, 246, 246, 247],
[249, 251, 252, 252, 254, 254, 256, 256, 257, 258, 259, 260, 260, 261, 263, 265, 266, 267, 267, 269],
[271, 273, 273, 274, 274, 274, 276, 276, 276, 278, 279, 280, 280, 280, 282, 284, 284, 286, 286, 287],
[289, 290, 290, 291, 293, 293, 293, 293, 295, 296, 296, 297, 298, 299, 299, 301, 302, 304, 306, 308],
[309, 310, 311, 311, 312, 312, 314, 315, 317, 319, 320, 322, 323, 324, 324, 324, 326, 328, 329, 331],
[332, 334, 335, 337, 337, 339, 341, 343, 345, 347, 348, 348, 348, 348, 348, 350, 350, 350, 351, 352],
[353, 355, 355, 356, 357, 358, 360, 361, 361, 361, 362, 364, 364, 364, 365, 366, 368, 370, 370, 372],
[374, 376, 378, 380, 382, 382, 383, 384, 385, 385, 387, 388, 388, 390, 392, 394, 394, 396, 397, 399],
[400, 402, 403, 403, 405, 405, 407, 409, 411, 411, 413, 414, 415, 417, 418, 419, 419, 419, 421, 422]]
target = 271
print(searchMatrix(mat, target))
|
[
"omkarjoshi4031@live.com"
] |
omkarjoshi4031@live.com
|
61297baec7f9634e81732d93964cde44437a232f
|
1c790b0adc648ff466913cf4aed28ace905357ff
|
/applications/vision/data/mnist/__init__.py
|
e4bdf2b45dfba9e56aea7e90fcee004bb9fd170b
|
[
"Apache-2.0"
] |
permissive
|
LLNL/lbann
|
04d5fdf443d6b467be4fa91446d40b620eade765
|
e8cf85eed2acbd3383892bf7cb2d88b44c194f4f
|
refs/heads/develop
| 2023-08-23T18:59:29.075981
| 2023-08-22T22:16:48
| 2023-08-22T22:16:48
| 58,576,874
| 225
| 87
|
NOASSERTION
| 2023-09-11T22:43:32
| 2016-05-11T20:04:20
|
C++
|
UTF-8
|
Python
| false
| false
| 2,472
|
py
|
import gzip
import os
import os.path
import urllib.request
import google.protobuf.text_format
import lbann
# Paths
data_dir = os.path.dirname(os.path.realpath(__file__))
def download_data():
"""Download MNIST data files, if needed.
Data files are downloaded from http://yann.lecun.com/exdb/mnist/
and uncompressed. Does nothing if the files already exist.
"""
# MNIST data files and associated URLs
urls = {
'train-images-idx3-ubyte': 'https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz',
'train-labels-idx1-ubyte': 'https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte': 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte': 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz',
}
# Download and uncompress MNIST data files, if needed
for data_file, url in urls.items():
data_file = os.path.join(data_dir, data_file)
compressed_file = data_file + '.gz'
if not os.path.isfile(data_file):
request = urllib.request.Request(
url,
headers={'User-Agent': 'LBANN/vision-app'},
)
with urllib.request.urlopen(request) as response, \
open(compressed_file, 'wb') as out_file:
out_file.write(response.read())
with gzip.open(compressed_file, 'rb') as in_file, \
open(data_file, 'wb') as out_file:
out_file.write(in_file.read())
def make_data_reader(validation_fraction=0.1):
"""Make Protobuf message for MNIST data reader.
MNIST data is downloaded if needed.
Args:
validation_fraction (float): The proportion of samples to be tested
as the validation dataset.
"""
# Download MNIST data files
download_data()
# Load Protobuf message from file
protobuf_file = os.path.join(data_dir, 'data_reader.prototext')
message = lbann.lbann_pb2.LbannPB()
with open(protobuf_file, 'r') as f:
google.protobuf.text_format.Merge(f.read(), message)
message = message.data_reader
if validation_fraction is not None:
assert message.reader[0].role == "train"
message.reader[0].validation_fraction = validation_fraction
# Set paths
for reader in message.reader:
reader.data_filedir = data_dir
return message
|
[
"noreply@github.com"
] |
LLNL.noreply@github.com
|
a14567cc685925ddb6144b5bcefab69fcbc2dd61
|
7cf8cc1f944946f0378da2e6af4ba1c89466dfb4
|
/dbselectprgrm.py
|
36776142444cd630a74ce82002009a999d6df7f8
|
[] |
no_license
|
ashilz/pythonnew
|
8abd164f757efaefa2216d663db2082c241cf4f5
|
5b57e0f1211a67671999bd3a1cae064318ab1e2f
|
refs/heads/master
| 2022-12-10T21:22:02.597080
| 2020-09-16T06:01:53
| 2020-09-16T06:01:53
| 292,829,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
import mysql.connector
db=mysql.connector.connect(
host="localhost",
user="root",
password="Ashil333!",
auth_plugin="mysql_native_password",
database="luminarpython"
)
print(db)
cursor=db.cursor()
try:
query="SELECT * FROM EMPLOYEE"
cursor.execute(query)
result=cursor.fetchall()
for x in result:
print(x)
except Exception as e:
print(e.args)
finally:
db.close
|
[
"ashilantony333@gmail.com"
] |
ashilantony333@gmail.com
|
acfe4f324a502158f5c16d5b7d61048a3e4eac8c
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_jimmied.py
|
ab4838861245bf47f9fd504bf6da78a0bd7b2e15
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
from xai.brain.wordbase.verbs._jimmy import _JIMMY
#calss header
class _JIMMIED(_JIMMY, ):
def __init__(self,):
_JIMMY.__init__(self)
self.name = "JIMMIED"
self.specie = 'verbs'
self.basic = "jimmy"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
9d55651d7155c6be5e3d04e0bb02204342ea9cd5
|
49afe5ff0a10354e0c2c91f805f57dd02a24537d
|
/cashe/s4day118/s4.py
|
8c42aac83a3c6c7986e6689b0b09cb758182bd9f
|
[] |
no_license
|
shaoqianliang/scrapy
|
d155f103fdda0553981649aa7fa9aa9c9457b9a6
|
9aba8835640ddddd9ab4e1e54b83f6cafaeb8b9e
|
refs/heads/master
| 2020-03-07T05:12:49.933792
| 2018-04-25T07:59:28
| 2018-04-25T07:59:28
| 127,289,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
import redis
pool = redis.ConnectionPool(host='192.168.11.81', port=6379)
conn = redis.Redis(connection_pool=pool)
# r.set('foo', '友情并阿斯顿发生地方')
# print(r.get('foo'))
# v = r.getrange('foo',0,3)
# print(v)
# v = r.strlen('foo')
# print(v)
#
# r.lpush('names','alex')
# r.lpush('names','eric')
# r.lpush('names','egon')
# v = r.lindex('names',1)
# print(v)
# aa 0 ba 0 ca 0 da 0 ea 0 fa 0 ga 0
#
conn.zadd('zz', '友情并', -1, '阮国栋', -2,'成汤',-3)
# v = conn.zrange('zz',0,0)
# print(v[0].decode('utf-8'))
# print(v[0])
v = conn.zrank('zz',"成汤")
print(v)
|
[
"1132424753@qq.com"
] |
1132424753@qq.com
|
e079a60a0203f845eb514f4beba68b66be5303fa
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/429/usersdata/309/103346/submittedfiles/jogoDaVelha_BIB.py
|
e2a9991dee25881eef4f01f86be054f83d14d35f
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,485
|
py
|
# -*- coding: utf-8 -*-
from datetime import datetime
from random import randint
# autenticação do simbolo para a jogada humano
def solicitaSimboloDoHumano():
# nome=input('Qual seu nome(ou apelido)? ')
simbH= (input("Qual o simbolo que você deseja utilizar no jogo? "))
while simbH!="X" and simbH!="O" and simbH!="o" and simbH!="x" :
print ("Ops! Simbolo inválido")
simbH= input("Informe um simbolo válido que deseja utilizar para a partida: X ou O : ")
if simbH=="X" or simbH=="x":
simbH="X"
else:
simbH=="O"
return simbH
#sorteio
def sorteioPrimeiraJogada (simbM, simbH, tabuleiro, nome):
now= datetime.now()
a=now.second
#essa var serve para ajudar a definir de quem será aproxima jogada
pro=0
if a%2==0:
print("Vencedor do sorteio para inicio do jogo: Computador")
prop=1
# chama a função mostraTabuleiro com a jogada do computador
tabuleiro=jogada1computer(tabuleiro, simbM)
mostraTabuleiro(tabuleiro)
else:
print("Vencedor do sorteio para inicio do jogo: Jogador")
prop=2
#chama a função mostraTabuleiro com a jogada do jogador
#tabuleiro=jogadaHumana(nome, simbH, tabuleiro)
return prop
#Função para printar o tabuleiro:
def mostraTabuleiro(tabuleiro):
print (tabuleiro[0][0] +'|'+ tabuleiro[0][1] + '|'+ tabuleiro[0][2])
print (tabuleiro[1][0] +'|'+ tabuleiro[1][1] + '|'+ tabuleiro[1][2])
print (tabuleiro[2][0] +'|'+ tabuleiro[2][1] + '|'+ tabuleiro[2][2])
#Função da jogada do humano
def jogadaHumana(nome, simbH, tabuleiro):
casa=[]
casa=input(" Qual a sua jogada, %s ?" %nome)
#tabuleiro[casa//10][casa%10]=simbH
i=int(casa[0])
j=int(casa[2])
while i>2 and j>2 and i<0 and j<0 :
print('Ops! Jogada invalida... ')
casa=int(input(" Qual a sua jogada, %s ?" %nome))
i=int(casa[0])
j=int(casa[2])
validarJogada(nome, simbH, tabuleiro, i, j)
return tabuleiro
#Função para validar uma jogada
def validarJogada(nome, simbH, tabuleiro, i, j):
if tabuleiro[i][j]!="X" and tabuleiro[i][j]!="O" :
tabuleiro[i][j]=simbH
else:
print ("OPS!!! Essa jogada não está disponível. Tente novamente!")
jogadaHumana(nome, simbH, tabuleiro)
while i>2 and j>2:
print('Ops! Jogada invalida... ')
#Função da Jogada do computador
#def jogadaComputador(tabuleiro, simbM):
# if tabuleiro
#Função caso computador inicie o jogo
def jogada1computer(tabuleiro, simbM):
sortL=randint(0, 2)
sortC=randint(0, 2)
while tabuleiro[sortL][sortC] !=" " :
sortL=randint(0, 2)
sortC=randint(0, 2)
tabuleiro[sortL][sortC]=simbM
return tabuleiro
#Função que verifica o vencedor
def VerificaVencedor(tab, simbH, nome):
if tab[0][0]==tab[0][2] and tab[0][0]==tab[0][1] and tab[0][1]==tab[0][2]:
if tab[0][0]==simbH:
x=2
else:
print("Vencedor: Máquina")
x=4
elif tab[1][0]==tab[1][1] and tab[1][1]==tab[1][2] and tab[1][0]==tab[1][2]:
if tab[1][0]==simbH:
x=2
else:
x=4
elif tab[2][0]==tab[2][1] and tab[2][1]==tab[2][2] and tab[2][0]==tab[2][2]:
if tab[2][0]==simbH:
x=2
else:
x=4
elif tab[0][0]==tab[1][0] and tab[2][0]==tab[0][0] and tab[2][0]==tab[1][0]:
if tab[1][0]==simbH:
x=2
else:
x=4
elif tab[0][1]==tab[1][1] and tab[1][1]==tab[2][1] and tab[0][1]==tab[2][1]:
if tab[1][1]==simbH:
x=2
else:
x=4
elif tab[0][2]==tab[1][2] and tab[1][2]==tab[2][2] and tab[0][2]==tab[2][2]:
if tab[2][2]==simbH:
x=2
else:
x=4
elif tab[0][0]==tab[1][1] and tab[1][1]==tab[2][2] and tab[0][0]==tab[2][2]:
if tab[0][0]==simbH:
x=2
else:
x=4
elif tab[0][2]==tab[1][1] and tab[1][1]==tab[2][0] and tab[2][0]==tab[0][2]:
if tab[2][0]==simbH:
x=2
else:
x=4
elif tab[0][0]!=" " and tab[0][1]!=" " and tab[0][2]!=" " and tab[1][0]!=" " and tab[1][1]!=" " and tab[1][2]!=" " and tab[2][0]!=" " and tab[2][1]!=" " and tab[2][2]!=" ":
print ('Deu velha')
x=6
else:
x=1
return x
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
ffd2dbbfe0e5759bc19b443279803a036bc898b0
|
dbdf5d6e9e1e04066bcf13b3e81d00578c6dc25d
|
/Trakttv.bundle/Contents/Libraries/Shared/shove/caches/memcached.py
|
f3c2064a367f862001df0ba1a9826389eee781be
|
[] |
no_license
|
cnstudios/Plex-Trakt-Scrobbler
|
59dfd0b1361d5b1d0f638b1a2009cffe0d5da421
|
73557f52bdba172c0b7261454536641d9c65edb8
|
refs/heads/master
| 2021-01-22T11:04:06.080191
| 2015-03-29T23:58:35
| 2015-03-29T23:58:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 982
|
py
|
# -*- coding: utf-8 -*-
'''
"memcached" cache.
The shove URI for a memcache cache is:
memcache://<memcache_server>
'''
try:
import memcache
except ImportError:
raise ImportError("requires 'python-memcached' library")
from shove.base import Base
__all__ = ['MemCache']
class MemCache(Base):
'''Memcached-based cache frontend.'''
def __init__(self, engine, **kw):
super(MemCache, self).__init__(engine, **kw)
if engine.startswith('memcache://'):
engine = engine.split('://')[1]
self._store = memcache.Client(engine.split(';'))
# set timeout
self.timeout = kw.get('timeout', 300)
def __getitem__(self, key):
value = self._store.get(key)
if value is None:
raise KeyError(key)
return self.loads(value)
def __setitem__(self, key, value):
self._store.set(key, self.dumps(value), self.timeout)
def __delitem__(self, key):
self._store.delete(key)
|
[
"gardiner91@gmail.com"
] |
gardiner91@gmail.com
|
6fa2dd1b962d93710df683eaac29099f951a25c2
|
786232b3c9eac87728cbf2b5c5636d7b6f10f807
|
/Leetcode/medium/162.py
|
b6de70d172ced69a49525fc35b79a96011180de2
|
[] |
no_license
|
luoyanhan/Algorithm-and-data-structure
|
c9ada2e123fae33826975665be37ca625940ddd4
|
fb42c3a193f58360f6b6f3b7d5d755cd6e80ad5b
|
refs/heads/master
| 2021-12-22T15:45:28.260386
| 2021-12-02T03:08:35
| 2021-12-02T03:08:35
| 251,007,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
length = len(nums)
if length == 1:
return 0
for i in range(length):
if i == 0 and nums[i] > nums[i+1]:
return i
elif i == length-1 and nums[i] > nums[i-1]:
return i
else:
left = nums[i-1]
right = nums[i+1]
if nums[i] > left and nums[i] > right:
return i
|
[
"707025023@qq.com"
] |
707025023@qq.com
|
36738b0db03c6d09b59fe47a634737de972a1946
|
ea622960f82fbc374ff3ac300ef670b56820af4e
|
/f2b_venv/bin/gunicorn
|
6e02cafa859287fd91e63e48b1999cf57e125ead
|
[] |
no_license
|
Zacharilius/Food2Beer-Django
|
d1b22e58d5d4c8fab914915428063d66d23958cd
|
691f30822cc80b47cb1bf58eb8521bcf19720b98
|
refs/heads/master
| 2021-01-19T00:44:44.093599
| 2015-04-10T13:59:08
| 2015-04-10T13:59:08
| 32,464,569
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
#!/home/zacharilius/Documents/GitHub/Food2Beer-Django/f2b_venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"zabensley@gmail.com"
] |
zabensley@gmail.com
|
|
932d9be49faba3e76429f78de93191bcabbaa964
|
6fab071f4b3f3852a3f7fb7f87e7d033d5ea9425
|
/4_Demo_Django/2_Django_Test/1_Django_login/APPS/4_Django项目实战.py
|
ebdf6404cd02fc9bc3b921d1de0831d2ff6784fd
|
[] |
no_license
|
pythonzhangfeilong/Python_WorkSpace
|
5d76026d0553bb85346264fc6375b1fc0a388729
|
646b460c79bedc80010185a240c8cd23342093bc
|
refs/heads/master
| 2020-08-26T09:51:43.763751
| 2020-07-07T07:23:20
| 2020-07-07T07:23:20
| 216,998,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,520
|
py
|
# 1、Django安装:直接采用的是pip install Django
'''
1、如果之前安装的Django版本较低,可以采用pip uninstall Django卸载,再重新安装
2、安装过程中出现问题是,要记得更新pip,更新命令python -m pip install --upgrade pip
3、pip安装时要使用系统的最高管理员权限,否则会出错
'''
# 2、使用pycharm创建项目时,注意在More Settings中加上app的名字,这样的项目直接就有app,不用再次命令行创建
'''
1、创建好Diango文件后,个文件夹的名字含义:
__init__ 空文件
settings 主配置文件
urls 主路由文件
wsgi 网关接口
templates HTML文件编辑目录
manage 项目个管理脚本(也就是使用命令行的时候会用到)
注意:如果在写前端时,一些导入性的文件要自己新建一个static的文件夹,把他们放进去
2、创建APP时在More Settings编写后就不用管了,但是没有编写就亚奥采用下面命令行的模式去创建
创建app的命令行python manage.py startapp login (这样就是创建了一个叫login的文件夹,文件夹中有各个相关联的文件)
'''
# 3、路由编写urls
'''
1、路由是浏览器输入url,在Django服务器响应url的转发中心。
路由都写在urls文件里,它将浏览器输入的url映射到相应的业务处理逻辑也就是视图
2、要在urls中导入APP中的views文件,from APPS import views
3、接下来就是在urlpatterns中写path('index/',views.index) 最重要的是视图后面的函数
'''
# 4、编写视图函数views,路由转发用户请求到视图函数。视图函数处理用户请求,也就是编写业务处理逻辑,一般都在views.py文件里
'''
1、首先要导入一个HTTP模块,也就是from django.shortcuts import HttpResponse
注意:函数的名字要与urls中的名字一样,第一个参数最好是使用request
def index(request):
不能直接返回字符串,必须要由HttpResponse封装起来,才能被HTTP识别到
return HttpResponse('Hello world')
###通过上面的操作将index这个url指向了views里的index()视图函数,它接收用户请求,并返回一个“hello world”字符串。
'''
# 5、运行web服务
'''
1、采用命令行运行是 python manage.py runserver 127.0.0.1:8000
2、在pycharm中直接在右上角有个雷氏播放的绿箭头,运行就行
3、或者点击向下的箭头,在Edit Configurations中编辑运行的内容
4、运行时出现404报错,在url后面拼接执行的文件刷新就好,https://127.0.0.1:8000/index
'''
# 6、返回HTML文件操作
'''
1、首先在templates中创建一个index.html文件
2、然后再在views中导入from django.shortcuts import render
def index(request):
render方法使用数据字典和请求元数据,渲染一个指定的HTML模板,其中多个参数,第一个参数必须是request,第二个参数是HTML
return render(request,'func.html')
3、为了让Django知道HTML文件在哪里,需要在settings中设置
在settings中找到TEMPLATES=[{
'DIRS':[os.path.join(BASE_DIR,'templates')]
}]
'''
# 7、使用静态文件
'''
1、将HTML文件返还给用户了,但是这还不够,前端三大块HTML、CSS、JavaScript,还有各种插件,它们齐全才是一个完整的页面。
在Django中,创建一个static目录,将这些静态文件放在static目录中。
2、为了让Django找到static这个目录,需要在settings中,
找到STATIC_URL='/static/'下面编写 STATIC_URL='/static/'的作用是浏览器访问静态文件时加载的前缀部分,比如https://127.0.0.1:8000/static/login.jpg
STATICFILES_DIRS=[
os.path.join(BASE_DIR,'static')
]
3、上面的bain写好就可以在template文件夹的index.html文件中引用静态文件了
<script src='/static/js/jquery-3.2.1.min.js'></script>
'''
# 8、接受用户发送的数据
'''
将一个要素齐全的HTML文件返还给了用户浏览器。但这还不够,因为web服务器和用户之间没有动态交互。
下面设计一个表单,让用户输入用户名和密码,提交给index这个url,服务器将接收到这些数据
1、首先修改index.html,修改时注意action是html的名字,不带后缀名
<h1>兄弟,你好</h1>
<form action="/index/" method="post">
{% csrf_token %}
<div>
<p>用户名:<input type="text" name="username" title="请输入用户名"></p>
<p>密码:<input type="text" name="password" title="请输入密码"></p>
<p><button type="submit">提交</button></p>
</div>
</form>
2、修改完html后是不能直接输入信息的,这时需要修改views
if request.method=='POST':
username=request.POST.get('username')
password=request.POST.get('password')
print(username,password)
return render(request,'func.html')
3、这是刷新页面是会报403错误的,因为Django有一个跨站请求保护机制,要在html的form表单中加入{%csrf_token%}
'''
# 9、返回动态页面,收到了用户的数据,但是返回给用户依然是一个静态页面,通常会根据用户的数据,进行处理后再返回给用户
'''
1、先修改views
# 创建一个空列表
user_list=[]
def index(request):
if request.method=='POST':
username=request.POST.get('username')
password=request.POST.get('password')
print(username,password)
# 将用户发送过来的数据,构建成一个字典
temp={'user':username,'pwd':password}
# 将字典内容添加到列表中
user_list.append(temp)
# 将用户的数据返回给html
return render(request,'func.html',{'date':user_list})
再修改index.html
<div class="bk">
<h1>用户输入</h1>
<form action="/index/" method="post">
{% csrf_token %}
<div>
<p>用户名:<input type="text" name="username" title="请输入用户名"></p>
<p>密码:<input type="text" name="password" title="请输入密码"></p>
<p><button type="submit">提交</button></p>
</div>
</form>
</div>
<div class="bk">
<h1>用户展示</h1>
<table>
<thead>
<tr>用户名</tr>
<tr>密码</tr>
</thead>
<tbody>
{% for item in date %}
<tr>
<td>{{ item.user }}</td>
<td>{{ item.pwd }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
'''
# 10、 数据库的使用,使用数据库的需求是毫无疑问的,Django通过自带的ORM框架操作数据库,并且原生支持轻量级的sqlite3数据库。
'''
1、在使用个数据库的时候,首先要在settings中的INSTALLED_APPS添加自己的app名字,不注册它,数据库就不知道给那个app创建数据库
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'APPS.apps.AppConfig',
]
2、在settings中的DATABASES配置,默认的是sqlite3
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django_database',
'USER':'root',
'PASSWORD':'',
'HOST':'127.0.0.1',
'PORT':'3306',
}
}
3、再编辑models
#首先要继承这个类是固定的写法
class UserInfo(models.Model):
user=models.CharField(max_length=32)
pwd=models.CharField(max_length=32)
# 创建上面的俩个字段,用来保存用户名和密码
1、在pycharm的Terminal中输入python manage.py makemigrations,
会在login目录中的migrations目录中生成一个0001_initial.py迁移记录文件。
2、再输入python manage.py migrate,执行成功就会创建好数据库表
4、再编辑views
# 将提交的用户名和密码保存在数据库中
from APPS import models
def index(request):
if request.method=='POST':
username=request.POST.get('username')
password=request.POST.get('password')
# 将数据保存在数据库中
models.UserInfo.objects.create(user=username,pwd=password)
# 从数据库中读取所有的数据
user_list=models.UserInfo.objects.all()
return render(request,'func.html',{'date':user_list})
'''
|
[
"feilong@feilongdeMacBook-Pro.local"
] |
feilong@feilongdeMacBook-Pro.local
|
4455e95b78174d01b70882d3db82c199191ef89c
|
f1be5da6283270803c3f0cbb80f7d11ff9260655
|
/scripts/vectorize.py
|
cddbfbce4c15edf2d095fa082351dc434767d35e
|
[
"Apache-2.0"
] |
permissive
|
mogproject/tutte-polyn
|
1a1cf371dd434991a41c73ab4e9a3936d9b93d5c
|
991ce12619f86484ffac8a57186b5eea22d01f0a
|
refs/heads/master
| 2022-07-12T05:00:35.493640
| 2020-05-12T07:25:07
| 2020-05-12T07:25:07
| 247,176,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,056
|
py
|
#!/usr/bin/env python3
"""
Converts output from the tuttepoly program into coefficient vectors for each graph.
"""
__author__ = 'Yosuke Mizutani'
__version__ = '0.0.1'
__license__ = 'Apache License, Version 2.0'
# imports standard libraries
import sys
import argparse
def get_parser():
"""Argument parser."""
parser = argparse.ArgumentParser(description='<program description>')
parser.add_argument('-n', type=int, required=True, help='number of vertices')
parser.add_argument('path', help='input file path')
return parser
def parse_tp_line(line):
assert(line[:3] == 'TP[')
tokens = line.split(':=')
gid = int(tokens[0][3:-2])
terms = tokens[1].rstrip(':\n').split('+')
elems = [term.strip().split('*') for term in terms]
ret = []
for elem in elems:
dx, dy = 0, 0
for e in elem[1:]:
if e[0] == 'x':
dx = int(e[2:]) if e[1:2] == '^' else 1
elif e[0] == 'y':
dy = int(e[2:]) if e[1:2] == '^' else 1
ret += [(int(elem[0]), dx, dy)]
return gid, ret
def parse_graph_line(line):
assert(line[:2] == 'G[')
tokens = line.split(':=')
gid = int(tokens[0][2:-2])
edges = tokens[1].strip().rstrip('\}').lstrip('\{').split(',')
ret = []
for edge in edges:
vs = edge.split('--')
ret += [(int(vs[0]), int(vs[1]))]
return gid, ret
def main(args):
"""Entry point of the program. """
nx = args.n # max degree of x: n - 1
ny = 1 + (args.n - 1) * (args.n - 2) // 2 # max degree of y: n - 1 choose 2
with open(args.path) as f:
for line in f:
if line[0] == 'T':
parsed = parse_tp_line(line)
vec = [0 for i in range(nx * ny)]
for c, dx, dy in parsed[1]:
assert(dx < nx)
assert(dy < ny)
vec[dy * nx + dx] = c
print('%d: %s' % (parsed[0], ' '.join(map(str, vec))))
if __name__ == '__main__':
main(get_parser().parse_args())
|
[
"mogproj@gmail.com"
] |
mogproj@gmail.com
|
633868dbf7071b0ab8e9a8d69163295e0e39e2f9
|
146cd740649b87032cbbfb97cde6ae486f76230b
|
/venv/lib/python3.6/site-packages/matplotlib/backends/__init__.py
|
06cad2d1ad10fdbfb2c19ff6f42036aef101cece
|
[] |
no_license
|
shellyhuang18/plank-filter-master
|
8b7024c46334062496f05d31eefc618ebae50b4e
|
8993a5b00f45841c3385fe997857bfdd10b71a84
|
refs/heads/master
| 2020-03-30T18:14:45.017957
| 2018-12-27T20:51:25
| 2018-12-27T20:51:25
| 151,490,556
| 0
| 1
| null | 2018-12-19T22:42:26
| 2018-10-03T22:50:58
|
Python
|
UTF-8
|
Python
| false
| false
| 3,720
|
py
|
import importlib
import logging
import os
import sys
import traceback
import matplotlib
from matplotlib import cbook
from matplotlib.backend_bases import _Backend
_log = logging.getLogger(__name__)
# NOTE: plt.switch_backend() (called at import time) will add a "backend"
# attribute here for backcompat.
def _get_running_interactive_framework():
"""
Return the interactive framework whose event loop is currently running, if
any, or "headless" if no event loop can be started, or None.
Returns
-------
Optional[str]
One of the following values: "qt5", "qt4", "gtk3", "wx", "tk",
"macosx", "headless", ``None``.
"""
QtWidgets = (sys.modules.get("PyQt5.QtWidgets")
or sys.modules.get("PySide2.QtWidgets"))
if QtWidgets and QtWidgets.QApplication.instance():
return "qt5"
QtGui = (sys.modules.get("PyQt4.QtGui")
or sys.modules.get("PySide.QtGui"))
if QtGui and QtGui.QApplication.instance():
return "qt4"
Gtk = (sys.modules.get("gi.repository.Gtk")
or sys.modules.get("pgi.repository.Gtk"))
if Gtk and Gtk.main_level():
return "gtk3"
wx = sys.modules.get("wx")
if wx and wx.GetApp():
return "wx"
tkinter = sys.modules.get("tkinter")
if tkinter:
for frame in sys._current_frames().values():
while frame:
if frame.f_code == tkinter.mainloop.__code__:
return "tk"
frame = frame.f_back
try:
from matplotlib.backends import _macosx
except ImportError:
pass
else:
if _macosx.event_loop_is_running():
return "macosx"
if sys.platform.startswith("linux") and not os.environ.get("DISPLAY"):
return "headless"
return None
@cbook.deprecated("3.0")
def pylab_setup(name=None):
"""
Return new_figure_manager, draw_if_interactive and show for pyplot.
This provides the backend-specific functions that are used by pyplot to
abstract away the difference between backends.
Parameters
----------
name : str, optional
The name of the backend to use. If `None`, falls back to
``matplotlib.get_backend()`` (which return :rc:`backend`).
Returns
-------
backend_mod : module
The module which contains the backend of choice
new_figure_manager : function
Create a new figure manager (roughly maps to GUI window)
draw_if_interactive : function
Redraw the current figure if pyplot is interactive
show : function
Show (and possibly block) any unshown figures.
"""
# Import the requested backend into a generic module object.
if name is None:
name = matplotlib.get_backend()
backend_name = (name[9:] if name.startswith("module://")
else "matplotlib.backends.backend_{}".format(name.lower()))
backend_mod = importlib.import_module(backend_name)
# Create a local Backend class whose body corresponds to the contents of
# the backend module. This allows the Backend class to fill in the missing
# methods through inheritance.
Backend = type("Backend", (_Backend,), vars(backend_mod))
# Need to keep a global reference to the backend for compatibility reasons.
# See https://github.com/matplotlib/matplotlib/issues/6092
global backend
backend = name
_log.debug('backend %s version %s', name, Backend.backend_version)
return (backend_mod,
Backend.new_figure_manager,
Backend.draw_if_interactive,
Backend.show)
|
[
"shellyhuang81@gmail.com"
] |
shellyhuang81@gmail.com
|
f5fcee0e713532c5d2bae4ea721d31bb5f801dea
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/next_child_and_first_case/bad_way/group/right_problem.py
|
33aff9e2e1183bf56ce9cb9ad48a6b8731a0d934
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
#! /usr/bin/env python
def able_government(str_arg):
day(str_arg)
print('eye')
def day(str_arg):
print(str_arg)
if __name__ == '__main__':
able_government('right_case')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
e2e86cef61265cda1718cdef84254ca06d3b8814
|
56fe9508764866a9bab08d1e13bc34777fb50e1b
|
/tests/mock_commands/nextpnr-gowin
|
f967efea61107e7fd199a3213e20b3395417e115
|
[
"BSD-2-Clause"
] |
permissive
|
GregAC/edalize
|
402970d32a708fb2268cc8c03c88527e958ebc7a
|
f4b3cc5bccf0c10375a4dcd101273423de470a08
|
refs/heads/master
| 2021-09-08T06:06:10.281868
| 2021-07-07T11:39:27
| 2021-07-28T22:05:51
| 247,763,731
| 0
| 0
|
BSD-2-Clause
| 2020-03-16T16:27:06
| 2020-03-16T16:27:05
| null |
UTF-8
|
Python
| false
| false
| 245
|
#!/usr/bin/env python3
import os
import sys
output_file = sys.argv[sys.argv.index('--write')+1]
with open(output_file, 'a'):
os.utime(output_file, None)
with open('nextpnr-gowin.cmd', 'w') as f:
f.write(' '.join(sys.argv[1:]) + '\n')
|
[
"olof.kindgren@gmail.com"
] |
olof.kindgren@gmail.com
|
|
a1d224b156b32482685c38df145d7ec196174f7f
|
3b9b4049a8e7d38b49e07bb752780b2f1d792851
|
/src/mojo/public/tools/manifest/manifest_collator.py
|
9a6d0e9b01049681a0f8e0c309faa288aeea1b23
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
webosce/chromium53
|
f8e745e91363586aee9620c609aacf15b3261540
|
9171447efcf0bb393d41d1dc877c7c13c46d8e38
|
refs/heads/webosce
| 2020-03-26T23:08:14.416858
| 2018-08-23T08:35:17
| 2018-09-20T14:25:18
| 145,513,343
| 0
| 2
|
Apache-2.0
| 2019-08-21T22:44:55
| 2018-08-21T05:52:31
| null |
UTF-8
|
Python
| false
| false
| 2,909
|
py
|
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" A collator for Mojo Application Manifests """
import argparse
import json
import os
import shutil
import sys
import urlparse
eater_relative = '../../../../../tools/json_comment_eater'
eater_relative = os.path.join(os.path.abspath(__file__), eater_relative)
sys.path.insert(0, os.path.normpath(eater_relative))
try:
import json_comment_eater
finally:
sys.path.pop(0)
def ParseJSONFile(filename):
with open(filename) as json_file:
try:
return json.loads(json_comment_eater.Nom(json_file.read()))
except ValueError:
print "%s is not a valid JSON document" % filename
return None
def MergeDicts(left, right):
for k, v in right.iteritems():
if k not in left:
left[k] = v
else:
if isinstance(v, dict):
assert isinstance(left[k], dict)
MergeDicts(left[k], v)
elif isinstance(v, list):
assert isinstance(left[k], list)
left[k].extend(v)
else:
raise "Refusing to merge conflicting non-collection values."
return left
def MergeBaseManifest(parent, base):
MergeDicts(parent["capabilities"], base["capabilities"])
if "applications" in base:
if "applications" not in parent:
parent["applications"] = []
parent["applications"].extend(base["applications"])
if "process-group" in base:
parent["process-group"] = base["process-group"]
def main():
parser = argparse.ArgumentParser(
description="Collate Mojo application manifests.")
parser.add_argument("--parent")
parser.add_argument("--output")
parser.add_argument("--application-name")
parser.add_argument("--base-manifest", default=None)
args, children = parser.parse_known_args()
parent = ParseJSONFile(args.parent)
if parent == None:
return 1
if args.base_manifest:
base = ParseJSONFile(args.base_manifest)
if base == None:
return 1
MergeBaseManifest(parent, base)
app_path = parent['name'].split(':')[1]
if app_path.startswith('//'):
raise ValueError("Application name path component '%s' must not start " \
"with //" % app_path)
if args.application_name != app_path:
raise ValueError("Application name '%s' specified in build file does not " \
"match application name '%s' specified in manifest." %
(args.application_name, app_path))
applications = []
for child in children:
application = ParseJSONFile(child)
if application == None:
return 1
applications.append(application)
if len(applications) > 0:
parent['applications'] = applications
with open(args.output, 'w') as output_file:
json.dump(parent, output_file)
return 0
if __name__ == "__main__":
sys.exit(main())
|
[
"changhyeok.bae@lge.com"
] |
changhyeok.bae@lge.com
|
0b7c562f6f37bd3f506e5bbdc00055ef7da3bb3b
|
ddb185b0cf581d85a1dd733a6d1e5d027ba3e0ca
|
/phase4/260.py
|
651673aea432957ccfbb450b4c912f91b85e1222
|
[] |
no_license
|
GavinPHR/code
|
8a319e1223a307e755211b7e9b34c5abb00b556b
|
b1d8d49633db362bbab246c0cd4bd28305964b57
|
refs/heads/master
| 2020-05-16T04:09:19.026207
| 2020-04-30T10:00:06
| 2020-04-30T10:00:06
| 182,766,600
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
# Single Number III
from typing import List
class Solution:
def singleNumber(self, nums: List[int]) -> List[int]:
s = set()
for n in nums:
if n in s:
s.remove(n)
else:
s.add(n)
return list(s)
|
[
"gavinsweden@gmail.com"
] |
gavinsweden@gmail.com
|
b7bc73e1f99352427bf7b271d7a94b320b253ffb
|
a4deea660ea0616f3b5ee0b8bded03373c5bbfa2
|
/executale_binaries/register-variants/vmovapd_ymm_ymm.gen.vex.py
|
2c67bcd6beddc36d418754a140f4a9d4de0e869a
|
[] |
no_license
|
Vsevolod-Livinskij/x86-64-instruction-summary
|
4a43472e26f0e4ec130be9a82f7e3f3c1361ccfd
|
c276edab1b19e3929efb3ebe7514489f66087764
|
refs/heads/master
| 2022-02-02T18:11:07.818345
| 2019-01-25T17:19:21
| 2019-01-25T17:19:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
import angr
proj = angr.Project('vmovapd_ymm_ymm.exe')
print proj.arch
print proj.entry
print proj.filename
irsb = proj.factory.block(proj.entry).vex
irsb.pp()
|
[
"sdasgup3@illinois.edu"
] |
sdasgup3@illinois.edu
|
4bde09ff5b4dcd8af235f043ea5b05674c5e581d
|
35b6013c1943f37d1428afd2663c8aba0a02628d
|
/functions/v2/log/helloworld/main_test.py
|
4a2633ea2fdedff862401585737661a6830f6308
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/python-docs-samples
|
d2a251805fbeab15d76ed995cf200727f63f887d
|
44e819e713c3885e38c99c16dc73b7d7478acfe8
|
refs/heads/main
| 2023-08-28T12:52:01.712293
| 2023-08-28T11:18:28
| 2023-08-28T11:18:28
| 35,065,876
| 7,035
| 7,593
|
Apache-2.0
| 2023-09-14T20:20:56
| 2015-05-04T23:26:13
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 715
|
py
|
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import main
def test_hello_world(capsys):
main.hello_world(None)
out, _ = capsys.readouterr()
assert "Hello, stdout!" in out
|
[
"noreply@github.com"
] |
GoogleCloudPlatform.noreply@github.com
|
b3ed0d89e201a0a78da0131558224b973b5b5960
|
80b2700b6f9940ee672f42124b2cb8a81836426e
|
/exception/test1.py
|
a4c837a4d50be53be15bca32d9979f2a5b1b85fd
|
[
"Apache-2.0"
] |
permissive
|
Vayne-Lover/Python
|
6c1ac5c0d62ecdf9e3cf68d3e659d49907bb29d4
|
79cfe3d6971a7901d420ba5a7f52bf4c68f6a1c1
|
refs/heads/master
| 2020-04-12T08:46:13.128989
| 2017-04-21T06:36:40
| 2017-04-21T06:36:40
| 63,305,306
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 886
|
py
|
#!/usr/local/bin/python
#class MuffledCalculator:
# muffled=False
# def calc(self,expr):
# try:
# return eval(expr)
# except (ZeroDivisionError,TypeError):
# if self.muffled:
# print "There are errors."
# else:
# raise
#a=MuffledCalculator()
#print a.calc('2/1')
##print a.calc('2/"dsf"')
##print a.calc('1/0')
#a.muffled=True
#print a.calc('1/0')
#class Test:
# def init(self):
# try:
# x=1
# y='sg'
# print x/y
# except (ZeroDivisionError,TypeError),e:
# print e
#a=Test()
#a.init()
#class Test1:
# def init(self):
# try:
# x=1
# y='sg'
# print x/y
# except Exception,e:
# print e
#a=Test1()
#a.init()
#try:
# print 'Go!'
#except Exception,e:
# print e
#else:
# print 'Planned.'
x=1
try:
x=2
print x
x=1/0
except Exception,e:
x=3
print x
print e
finally:
x=4
print x
|
[
"406378362@qq.com"
] |
406378362@qq.com
|
af969107fa1b60317809a206e8afae54a0ac999b
|
25e989e986522cf91365a6cc51e3c68b3d29351b
|
/app/http/controllers/TeamController.py
|
1f26e8f10cb79cc093d37b55e0170ed7b77e05c1
|
[
"MIT"
] |
permissive
|
josephmancuso/gbaleague-masonite2
|
ff7a3865927705649deea07f68d89829b2132d31
|
b3dd5ec3f20c07eaabcc3129b0c50379a946a82b
|
refs/heads/master
| 2022-05-06T10:47:21.809432
| 2019-03-31T22:01:04
| 2019-03-31T22:01:04
| 136,680,885
| 0
| 1
|
MIT
| 2022-03-21T22:16:43
| 2018-06-09T01:33:01
|
Python
|
UTF-8
|
Python
| false
| false
| 898
|
py
|
''' A Module Description '''
from app.Team import Team
from masonite import Upload
class TeamController:
''' Class Docstring Description '''
def show(self):
if request().has('back'):
request().session.flash('back', request().input('back'))
return view('create/team')
def store(self, upload: Upload):
try:
logo = request().input('logo').filename
except AttributeError:
logo = ''
create_team = Team.create(
name=request().input('name'),
owner_id=auth().id,
picture=logo
)
# upload logo
if logo:
upload.store(request().input('logo'))
if create_team:
return request().back(default='create/team?message=Created Successfully')
return request().redirect('/create/team?message=Could Not Create Team')
|
[
"idmann509@gmail.com"
] |
idmann509@gmail.com
|
188bb4f99769e74fbcea03d37051f0bf96544b9b
|
51885da54b320351bfea42c7dd629f41985454cd
|
/abc181/d.py
|
9a094d524253759e71cbe9da0075af9308c3e941
|
[] |
no_license
|
mskt4440/AtCoder
|
dd266247205faeda468f911bff279a792eef5113
|
f22702e3932e129a13f0683e91e5cc1a0a99c8d5
|
refs/heads/master
| 2021-12-15T10:21:31.036601
| 2021-12-14T08:19:11
| 2021-12-14T08:19:11
| 185,161,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,241
|
py
|
#
# abc181 d
#
import sys
from io import StringIO
import unittest
from collections import Counter
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """1234"""
output = """Yes"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """1333"""
output = """No"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """8"""
output = """Yes"""
self.assertIO(input, output)
def resolve():
n = input()
if len(n) <= 2:
if int(n) % 8 == 0 or int(n[::-1]) % 8 == 0:
print("Yes")
else:
print("No")
exit()
cnt = Counter(n)
for i in range(112, 1000, 8):
tmp = Counter(str(i)) - cnt
print(tmp)
if not tmp:
print("Yes")
break
if __name__ == "__main__":
unittest.main()
# resolve()
|
[
"mskt4440@gmail.com"
] |
mskt4440@gmail.com
|
cbeb14309190629a8358f2fec79c0b07079b6bd8
|
1f620140538728b25fd0181e493975534aa0e1fb
|
/project/basis/test/test_admin.py
|
b83678eb911cbb41cdc1dfa79cf4df613d45e9b4
|
[] |
no_license
|
YukiUmetsu/recipe-app-api-python-django
|
2a22f63871489cd073d5c312e20fd9fe49eee5a5
|
abaf4a0826e840e990781b20aaa5d7f0577c54c5
|
refs/heads/master
| 2022-11-30T03:11:16.129881
| 2020-03-03T20:04:00
| 2020-03-03T20:04:00
| 244,045,701
| 0
| 0
| null | 2022-11-22T05:21:23
| 2020-02-29T21:41:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
"""set up admin user (logged in) and normal user"""
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@testadmin.com',
password='password1234'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='admin@testadminsite.com',
password='password1234',
name='Test user full name'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:basis_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
url = reverse('admin:basis_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:basis_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
|
[
"yuuki.umetsu@gmail.com"
] |
yuuki.umetsu@gmail.com
|
da9bbbbba39f5dc6085d921ab3cf3fe4c283bf0e
|
f3b5c4a5ce869dee94c3dfa8d110bab1b4be698b
|
/controller/src/xmpp/test/SConscript
|
8358c3272343acd70e14c431f4e7437de65829ec
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
pan2za/ctrl
|
8f808fb4da117fce346ff3d54f80b4e3d6b86b52
|
1d49df03ec4577b014b7d7ef2557d76e795f6a1c
|
refs/heads/master
| 2021-01-22T23:16:48.002959
| 2015-06-17T06:13:36
| 2015-06-17T06:13:36
| 37,454,161
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,075
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
# -*- mode: python; -*-
Import('BuildEnv')
import sys
env = BuildEnv.Clone()
env.Append(LIBPATH = ['#/' + Dir('..').path,
'../../base',
'../../base/test',
'../../bgp',
'../../control-node',
'../../db',
'../../ifmap',
'../../io',
'../../xml',
'../../schema',
'.'
])
libxmpptest = env.Library('xmpptest',
['xmpp_sample_peer.cc'])
env.Prepend(LIBS = ['task_test', 'gunit', 'xmpp', 'xml', 'pugixml', 'sandesh',
'http', 'http_parser', 'curl', 'process_info',
'io', 'ssl', 'sandeshvns', 'base', 'peer_sandesh',
'boost_regex', 'xmpptest', 'control_node'])
if sys.platform != 'darwin':
env.Append(LIBS = ['rt'])
xmpp_server_test = env.UnitTest('xmpp_server_test', ['xmpp_server_test.cc'])
env.Alias('controller/xmpp:xmpp_server_test', xmpp_server_test)
xmpp_regex_test = env.UnitTest('xmpp_regex_test', ['xmpp_regex_test.cc'])
env.Alias('controller/xmpp:xmpp_regex_test', xmpp_regex_test)
xmpp_pubsub_test = env.UnitTest('xmpp_pubsub_test', ['xmpp_pubsub_test.cc'])
env.Alias('controller/xmpp:xmpp_pubsub_test', xmpp_pubsub_test)
xmpp_pubsub_client = env.UnitTest('xmpp_pubsub_client', ['xmpp_pubsub_client.cc'])
env.Alias('controller/xmpp:xmpp_pubsub_client', xmpp_pubsub_client)
xmpp_session_test = env.UnitTest('xmpp_session_test', ['xmpp_session_test.cc'])
env.Alias('controller/xmpp:xmpp_session_test', xmpp_session_test)
xmpp_client_standalone_test = env.UnitTest('xmpp_client_standalone_test',
['xmpp_client_standalone.cc'])
env.Alias('controller/xmpp:xmpp_client_standalone_test', xmpp_client_standalone_test)
xmpp_server_standalone_test = env.UnitTest('xmpp_server_standalone_test',
['xmpp_server_standalone.cc'])
env.Alias('controller/xmpp:xmpp_server_standalone_test', xmpp_server_standalone_test)
xmpp_server_sm_test = env.UnitTest('xmpp_server_sm_test',['xmpp_server_sm_test.cc'])
env.Alias('controller/xmpp:xmpp_server_sm_test', xmpp_server_sm_test)
xmpp_server_auth_sm_test = env.UnitTest('xmpp_server_auth_sm_test',
['xmpp_server_auth_sm_test.cc'])
env.Alias('controller/xmpp:xmpp_server_auth_sm_test', xmpp_server_auth_sm_test)
xmpp_client_sm_test = env.UnitTest('xmpp_client_sm_test', ['xmpp_client_sm_test.cc'])
env.Alias('controller/xmpp:xmpp_client_sm_test', xmpp_client_sm_test)
xmpp_client_auth_sm_test = env.UnitTest('xmpp_client_auth_sm_test',
['xmpp_client_auth_sm_test.cc'])
env.Alias('controller/xmpp:xmpp_client_auth_sm_test', xmpp_client_auth_sm_test)
xmpp_stream_message_client_test = env.UnitTest('xmpp_stream_message_client_test',
['xmpp_stream_message_client.cc'])
env.Alias('controller/xmpp:xmpp_stream_message_client_test', xmpp_stream_message_client_test)
xmpp_stream_message_server_test = env.UnitTest('xmpp_stream_message_server_test',
['xmpp_stream_message_server.cc'])
env.Alias('controller/xmpp:xmpp_stream_message_server_test', xmpp_stream_message_server_test)
test_suite = [
xmpp_client_sm_test,
xmpp_pubsub_test,
xmpp_regex_test,
xmpp_server_sm_test,
xmpp_server_test,
xmpp_session_test,
xmpp_server_auth_sm_test,
xmpp_client_auth_sm_test
]
flaky_test_suite = [
xmpp_stream_message_client_test,
xmpp_stream_message_server_test,
]
test = env.TestSuite('xmpp-test', test_suite)
env.Alias('controller/src/xmpp:test', test)
flaky_test = env.TestSuite('xmpp-flaky-test', flaky_test_suite)
env.Alias('controller/src/xmpp:flaky-test', flaky_test)
env.Alias('controller/src/xmpp:all-test', [test, flaky_test])
Return('test_suite')
|
[
"pan2za@live.com"
] |
pan2za@live.com
|
|
cad3020fe5ed05dd5f2adb766e25e7be8f6dbf96
|
9d41f4df737dc2e6fd3fcf4c6f50028fd483cdd0
|
/python_basic/section08.py
|
c3e3a9de8ba928d719982f41ef6e0dcd6599505e
|
[] |
no_license
|
Ha-Young/byte_degree_python
|
33a730f4c1f4a99fea03fb923ad73edee2dd1d48
|
7fcbfed832dec3d7cb8503b86d9457e1f2ae0ccf
|
refs/heads/master
| 2022-11-16T16:54:52.978443
| 2020-07-04T14:32:16
| 2020-07-04T14:32:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
# Section08
# 파이썬 모듈과 패키지
# 패키지 예제
# 상대 경로
# .. : 부모 디렉토리
# . : 현재 디렉토리
# 사용1(클래스)
#from 폴더 import 클래스
from pkg.fibonacci import Fibonacci
Fibonacci.fib(300)
print("ex2 : ", Fibonacci.fib2(400))
print("ex2 : ", Fibonacci("hoho").title)
# 사용2(클래스) -> 메모리 사용 많이하여 권장X
from pkg.fibonacci import * # 전부 가져온다
Fibonacci.fib(600)
print("ex2 : ", Fibonacci.fib2(600))
print("ex2 : ", Fibonacci("use2").title)
# 사용3(클래스) -> 권장. Alias 이용
from pkg.fibonacci import Fibonacci as Fib
Fib.fib(1000)
print("ex3 : ", Fib.fib2(1400))
print("ex3 : ", Fib("use3").title)
# 사용4(함수)
#import 파일 -> 그 파일안에 있는 모든 것을 가져온다
import pkg.calculations as c
print("ex4 : ", c.add(10, 100))
print("ex4 : ", c.mul(10, 100))
# 사용5(함수) -> 권장.
# from 파일 import 함수 as 요약명
from pkg.calculations import div as d
print("ex5 : ", int(d(100,10)))
# 사용6
import pkg.prints as p
import builtins # 기본으로 가지고 있는 것. 디폴트로 가져오게 되어있다
p.prt1()
p.prt2()
print(dir(builtins))
|
[
"hayeong28@naver.com"
] |
hayeong28@naver.com
|
096024ea08a007418474f21229662d03091ef468
|
5253ecc76e493afea8935be6ed7926a431f1721d
|
/sovrin_client/test/cli/test_pool_upgrade.py
|
a088aed75e7690e92fb0e012c68ff4dd722c0fd2
|
[
"Apache-2.0"
] |
permissive
|
peacekeeper/sovrin-client
|
a34f7d1edc1722e4805cbe36e61d031dc6574dc4
|
4d408d16ee2d8aca2d3065c0302431cc5c5386c1
|
refs/heads/master
| 2021-01-11T14:43:46.240473
| 2017-02-16T10:02:21
| 2017-02-16T10:02:21
| 80,198,885
| 2
| 1
| null | 2017-01-27T10:31:04
| 2017-01-27T10:31:03
| null |
UTF-8
|
Python
| false
| false
| 1,916
|
py
|
from copy import copy
import pytest
from plenum.common.eventually import eventually
from plenum.common.txn import VERSION
from sovrin_common.txn import ACTION, CANCEL, JUSTIFICATION
from sovrin_node.test.upgrade.helper import checkUpgradeScheduled, \
checkNoUpgradeScheduled
from sovrin_node.test.upgrade.conftest import validUpgrade
@pytest.fixture(scope='module')
def nodeIds(poolNodesStarted):
return next(iter(poolNodesStarted.nodes.values())).poolManager.nodeIds
@pytest.fixture(scope="module")
def poolUpgradeSubmitted(be, do, trusteeCli, validUpgrade, trusteeMap):
do('send POOL_UPGRADE name={name} version={version} sha256={sha256} '
'action={action} schedule={schedule} timeout={timeout}',
within=10,
expect=['Pool upgrade successful'], mapper=validUpgrade)
@pytest.fixture(scope="module")
def poolUpgradeScheduled(poolUpgradeSubmitted, poolNodesStarted, validUpgrade):
nodes = poolNodesStarted.nodes.values()
poolNodesStarted.looper.run(
eventually(checkUpgradeScheduled, nodes,
validUpgrade[VERSION], retryWait=1, timeout=10))
@pytest.fixture(scope="module")
def poolUpgradeCancelled(poolUpgradeScheduled, be, do, trusteeCli,
validUpgrade, trusteeMap):
validUpgrade = copy(validUpgrade)
validUpgrade[ACTION] = CANCEL
validUpgrade[JUSTIFICATION] = '"not gonna give you one"'
do('send POOL_UPGRADE name={name} version={version} sha256={sha256} '
'action={action} justification={justification}',
within=10,
expect=['Pool upgrade successful'], mapper=validUpgrade)
def testPoolUpgradeSent(poolUpgradeScheduled):
pass
def testPoolUpgradeCancelled(poolUpgradeCancelled, poolNodesStarted):
nodes = poolNodesStarted.nodes.values()
poolNodesStarted.looper.run(
eventually(checkNoUpgradeScheduled,
nodes, retryWait=1, timeout=10))
|
[
"lovesh.harchandani@evernym.com"
] |
lovesh.harchandani@evernym.com
|
c3122503563ac8940d8246dba442f246f956d3bb
|
6f560247d031db5ab0bbf5d1d3ad6bd7b12f6e14
|
/aiormq/tools.py
|
42ec7dd8b7d84c995a3b6330edebcf611edb79ae
|
[
"Apache-2.0"
] |
permissive
|
tchalupnik/aiormq
|
c7482a224447aabac734e5963e8ffba9c80872c0
|
8aea0ecd15f695ae74fdafe0dfb1626a56412130
|
refs/heads/master
| 2023-09-04T11:42:16.178616
| 2021-10-03T19:23:12
| 2021-10-03T19:23:12
| 422,150,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,634
|
py
|
import asyncio
from functools import wraps
from typing import AsyncContextManager, Awaitable, TypeVar
from yarl import URL
from aiormq.abc import TimeoutType
T = TypeVar("T")
def censor_url(url: URL):
if url.password is not None:
return url.with_password("******")
return url
def shield(func):
@wraps(func)
def wrap(*args, **kwargs):
return asyncio.shield(awaitable(func)(*args, **kwargs))
return wrap
def awaitable(func):
# Avoid python 3.8+ warning
if asyncio.iscoroutinefunction(func):
return func
@wraps(func)
async def wrap(*args, **kwargs):
result = func(*args, **kwargs)
if hasattr(result, "__await__"):
return await result
if asyncio.iscoroutine(result) or asyncio.isfuture(result):
return await result
return result
return wrap
def _inspect_await_method():
async def _test():
pass
coro = _test()
method_await = getattr(coro, "__await__", None)
method_iter = getattr(coro, "__iter__", None)
for _ in (method_await or method_iter)():
pass
return bool(method_await)
HAS_AWAIT_METHOD = _inspect_await_method()
class Countdown:
def __init__(self, timeout: TimeoutType = None):
self.loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()
self.deadline: TimeoutType = None
if timeout is not None:
self.deadline = self.loop.time() + timeout
def get_timeout(self) -> TimeoutType:
if self.deadline is None:
return None
current = self.loop.time()
if current >= self.deadline:
raise asyncio.TimeoutError
return self.deadline - current
def __call__(self, coro: Awaitable[T]) -> Awaitable[T]:
if self.deadline is None:
return coro
return asyncio.wait_for(coro, timeout=self.get_timeout())
def enter_context(
self, ctx: AsyncContextManager[T],
) -> AsyncContextManager[T]:
return CountdownContext(self, ctx)
class CountdownContext(AsyncContextManager):
def __init__(self, countdown: Countdown, ctx: AsyncContextManager):
self.countdown = countdown
self.ctx = ctx
def __aenter__(self):
if self.countdown.deadline is None:
return self.ctx.__aenter__()
return self.countdown(self.ctx.__aenter__())
def __aexit__(self, exc_type, exc_val, exc_tb):
if self.countdown.deadline is None:
return self.ctx.__aexit__(exc_type, exc_val, exc_tb)
return self.countdown(self.ctx.__aexit__(exc_type, exc_val, exc_tb))
|
[
"me@mosquito.su"
] |
me@mosquito.su
|
d50c1835fb54d1533e75bd2db34d814ec732a697
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02613/s924626133.py
|
1ea35f5c1bd5d4cd2ec8b5efdd1f5791376114f6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
def judge_status_summary():
N = int(input())
res = {"AC":0,"WA":0,"TLE":0,"RE":0}
for i in range(N):
res[input()] += 1
for k in res:
print("{} x {}".format(k,res[k]))
judge_status_summary()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6547446b3422447b4e2918a7979c12e3681e4daa
|
5a3547772b61f7d1b3a81f76dd1397eb92c68e7b
|
/lunzi/config.py
|
c2d1f52b43355983c9b8b2247f0da3a96e3fa5db
|
[
"MIT"
] |
permissive
|
suen049/AdMRL
|
483440f0ded14e471d879b300da9afbab68fbe66
|
50a22d4d480e99125cc91cc65dfcc0df4a883ac6
|
refs/heads/master
| 2023-03-12T23:15:05.154003
| 2021-03-06T15:31:21
| 2021-03-06T15:31:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,745
|
py
|
import argparse
import os
import yaml
from lunzi.Logger import logger
_frozen = False
_initialized = False
def expand(path):
return os.path.abspath(os.path.expanduser(path))
class MetaFLAGS(type):
_initialized = False
def __setattr__(self, key, value):
assert not _frozen, 'Modifying FLAGS after dumping is not allowed!'
super().__setattr__(key, value)
def __getitem__(self, item):
return self.__dict__[item]
def __iter__(self):
for key, value in self.__dict__.items():
if not key.startswith('_') and not isinstance(value, classmethod):
if isinstance(value, MetaFLAGS):
value = dict(value)
yield key, value
def as_dict(self):
return dict(self)
def merge(self, other: dict):
for key in other:
assert key in self.__dict__, f"Can't find key `{key}`"
if isinstance(self[key], MetaFLAGS) and isinstance(other[key], dict):
self[key].merge(other[key])
else:
setattr(self, key, other[key])
def set_value(self, path, value):
key, *rest = path
assert key in self.__dict__, f"Can't find key `{key}`"
if not rest:
setattr(self, key, value)
else:
self[key]: MetaFLAGS
self[key].set_value(rest, value)
@staticmethod
def set_frozen():
global _frozen
_frozen = True
def freeze(self):
for key, value in self.__dict__.items():
if not key.startswith('_'):
if isinstance(value, MetaFLAGS):
value.freeze()
self.finalize()
def finalize(self):
pass
class BaseFLAGS(metaclass=MetaFLAGS):
pass
def parse(cls):
global _initialized
if _initialized:
return
parser = argparse.ArgumentParser(description='Stochastic Lower Bound Optimization')
parser.add_argument('-c', '--config', type=str, help='configuration file (YAML)', nargs='+', action='append')
parser.add_argument('-s', '--set', type=str, help='additional options', nargs='*', action='append')
args, unknown = parser.parse_known_args()
for a in unknown:
logger.info('unknown arguments: %s', a)
# logger.info('parsed arguments = %s, unknown arguments: %s', args, unknown)
if args.config:
for config in sum(args.config, []):
cls.merge(yaml.load(open(expand(config))))
else:
logger.info('no config file specified.')
if args.set:
for instruction in sum(args.set, []):
path, *value = instruction.split('=')
cls.set_value(path.split('.'), yaml.load('='.join(value)))
_initialized = True
|
[
"linzichuan12@163.com"
] |
linzichuan12@163.com
|
05ee44b1e5c2c238d8118d81872b9810cb17608a
|
cd64e9076ab81f4b2b42215289b1791c8cc3a1dd
|
/LogHadoopJob/py/MR/MR_KWLIVEAD_POPUP_CLICK.py
|
94452bf28c31743ea62110d84f629538799e61c3
|
[] |
no_license
|
tonygodspeed/py
|
a8396c31fa31cfeb47ebc98dc86e3298e76d5dfa
|
eb38514c540b92903d53434bddc26d35bf67148d
|
refs/heads/master
| 2020-04-02T13:26:27.029232
| 2018-10-24T10:28:35
| 2018-10-24T10:28:35
| 154,481,142
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
#!/usr/bin/env python
#coding=utf8
from MR_BASE import *
reload(sys)
sys.setdefaultencoding("utf-8")
str_act = "ACT_KWLIVEAD_POPUP_CLICK"
class MR_KWLIVEAD_POPUP_CLICK(mr_base_ex):
def __init__(self):
mr_base_ex.__init__(self,str_act)
self.res_type = ["s","s","s","i","i"];
self.res_name = ["VER","CHID","MAC","closeret","time"];
#self.res_spec_str = "S:1010";
mr_obj = MR_KWLIVEAD_POPUP_CLICK()
if __name__ == '__main__':
#test_str = r'49:06| [INFO]: <SRC:MUSIC_8.5.2.0_BCS16|ACT:HIFI_LOG|S:KwMusic|TYPE:ENTER_HIFI_DOWNLOAD_PAGE|CHANNEL_NAME:10001_01|PROD:MUSIC|VER:8cs_20161208.exe}|K:546529252|RESEND:0|U:92204504>(60.181.172.98)TM:1481611747'
test_str = r'<SRC:KWSHELLEXT_1.0.6.9051_MUSIC8500PT|S:1010|PROD:KWSHELLEXT|DISVER:1.0.6.9072|OS:10.0.14393.2_|PLAT:X64|VER:2.0.2.17|GID:2562|CHID:MUSIC8500PT|PN:rundll32.exe|MAC:C860009C01D8|UAC:1|ADMIN:0|MVER:MUSIC_8.5.0.0_PT|MCID:57836354|ST:1481337017|CFGVER:14|ACT:ACT_KWLIVEAD_POPUP_CLOSE||autoclose:0|closeret:2|time:5|{}|U:>(222.87.155.143)TM:1481385606'
mr_obj.LocalTest(test_str)
pass
|
[
"412291198@qq.com"
] |
412291198@qq.com
|
ee191ec954ac1f34e4ac8adcfa289258e03fb944
|
b3f8a351f5d92469c987302661a3dcb0328de609
|
/fit_2D_LeastSq/fit_2Dpoly_mpfit.py
|
8712f33d9e1b3e64fe131f375ded4672311fbe7f
|
[
"MIT"
] |
permissive
|
Chloe20-cyber/PythonFitting
|
87412e5fd7c956f4bf0da77b0c235ad5f5059bff
|
54315e336593f7f105f516766fb323662eadd5e3
|
refs/heads/master
| 2022-04-24T02:46:38.224828
| 2020-03-31T05:04:50
| 2020-03-31T05:04:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,033
|
py
|
#!/usr/bin/env python
# Initial model parameters
inParms=[ {'value': 5.1,
'fixed': False,
'parname': 'amp',
'limited': [False, False]},
{'value': 1.0,
'fixed': False,
'parname': 'x1',
'limited': [False, False]},
{'value': 1.0,
'fixed': False,
'parname': 'x2',
'limited': [False, False]},
{'value': 1.0,
'fixed': False,
'parname': 'x3',
'limited': [False, False]},
{'value': 1.0,
'fixed': False,
'parname': 'y1',
'limited': [False, False]},
{'value': 1.0,
'fixed': False,
'parname': 'y2',
'limited': [False, False]},
{'value': 1.0,
'fixed': False,
'parname': 'y3',
'limited': [False, False]} ]
#=============================================================================#
import os, sys, shutil
import math as m
import numpy as np
import matplotlib as mpl
import pylab as pl
from mpfit import mpfit
#-----------------------------------------------------------------------------#
def main():
# Generate a noisy polynomial
# [off, x1, x2, x3, y1, y2, y3]
pIn = [2.0, 1.5, 0.1, 0.3, 1.0, 2.0, 0.05]
pIn = [1.0, 0.2, 0.0, 0.0, 0.1, 0.0, 0.0]
shape = (200, 200)
X, Y, Z, xyData = genpolydata(pIn, shape, 300, 10.2)
# Define an function to evaluate the residual
def errFn(p, fjac=None):
status = 0
# poly_surface' returns the 'rfunc' function and the X,Y data is
# inserted via argument unpacking.
return status, poly_surface(p)(*[Y, X]) - Z
# Fit the data starting from an initial guess
mp = mpfit(errFn, parinfo=inParms, quiet=False)
print()
for i in range(len(inParms)):
print("%s = %f +/- %f" % (inParms[i]['parname'],
mp.params[i],
mp.perror[i]))
p1 = mp.params
#-------------------------------------------------------------------------#
# Plot the original, fit & residual
fig = pl.figure(figsize=(18,4.3))
ax1 = fig.add_subplot(1,3,1)
cax1 = ax1.imshow(xyData, origin='lower',cmap=mpl.cm.jet)
cbar1=fig.colorbar(cax1, pad=0.0)
ax1.scatter(X, Y, c=Z, s=40, cmap=mpl.cm.jet)
ax1.set_title("Sampled Data")
ax1.set_xlim(0, shape[-1]-1)
ax1.set_ylim(0, shape[-2]-1)
ax1.set_aspect('equal')
ax2 = fig.add_subplot(1,3,2)
xyDataFit = poly_surface(p1, shape)
cax2 = ax2.imshow(xyDataFit, origin='lower', cmap=mpl.cm.jet)
cbar2=fig.colorbar(cax2, pad=0.0)
ax2.set_title("Model Fit")
ax3 = fig.add_subplot(1,3,3)
xyDataRes = xyData - xyDataFit
cax3 = ax3.imshow(xyDataRes, origin='lower', cmap=mpl.cm.jet)
cbar2=fig.colorbar(cax3, pad=0.0)
ax3.set_title("Residual")
pl.show()
#-----------------------------------------------------------------------------#
def poly_surface(params, shape=None):
p = params
def rfunc(y, x):
z = p[0] + (p[1]*x + p[2]*x**2.0 + p[3]*x**3.0 +
p[4]*y + p[5]*y**2.0 + p[6]*y**3.0)
return z
if shape is not None:
return rfunc(*np.indices(shape))
else:
return rfunc
#-----------------------------------------------------------------------------#
def genpolydata(params, shape, nSamps=300, noiseFrac=0.2):
# Generate a noisy gaussian image
xyData = poly_surface(params, shape)
xyData += (np.random.random(xyData.shape) - 0.5) * noiseFrac
# Sample the data at discrete pixels
X = np.random.random(nSamps) * xyData.shape[-1] -1
X = np.array(np.round(X), dtype='int')
Y = np.random.random(nSamps) * xyData.shape[-2] -1
Y = np.array(np.round(Y), dtype='int')
Z = xyData[Y, X]
return X, Y, Z, xyData
#-----------------------------------------------------------------------------#
main()
|
[
"cormac.r.purcell@gmail.com"
] |
cormac.r.purcell@gmail.com
|
fe75e56c1ef7300cbd5ff394f09c7970355079b6
|
51ec37fc8b633e90f699d4372e1301cf30b9d960
|
/angrmanagement/ui/toolbars/toolbar_action.py
|
b5297339032d4abc06afc2f05896b3ca27b13d43
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr-management
|
b7deffdefd53a99336c8da2cd21bd17f1eb689d7
|
f28bfb1c34313c74f99691d0b47de1d90ebfd4ec
|
refs/heads/master
| 2023-09-02T11:53:13.869102
| 2023-08-31T23:38:12
| 2023-08-31T23:38:12
| 40,425,410
| 727
| 125
|
BSD-2-Clause
| 2023-09-11T22:09:39
| 2015-08-09T04:35:26
|
Python
|
UTF-8
|
Python
| false
| false
| 585
|
py
|
class ToolbarAction:
def __init__(self, icon, name, tooltip, triggered, checkable=False, shortcut=None):
self.icon = icon
self.name = name
self.tooltip = tooltip
self.triggered = triggered
self.checkable = checkable
self.shortcut = shortcut
def __hash__(self):
return hash((ToolbarAction, self.name))
def __eq__(self, other):
return isinstance(other, ToolbarAction) and self.name == other.name
class ToolbarSplitter(ToolbarAction):
def __init__(self):
super().__init__(None, None, None, None)
|
[
"noreply@github.com"
] |
angr.noreply@github.com
|
fd30b09fe8bc3641f1b3def960bfbf914b20883d
|
1d1f173d67a04b78f732aee99ef0e2d4e8284d63
|
/dev/phylografter_workaround.py
|
c2afcd323e6a6731ab85836b191cd5292163b922
|
[
"Python-2.0",
"BSD-2-Clause"
] |
permissive
|
rvosa/peyotl
|
8767165ec85129c8f25c56a572f0bd879158aa2a
|
98154af9832d18cbcb079f7e2db3b0e45893e1da
|
refs/heads/master
| 2021-01-18T19:48:31.273061
| 2015-09-03T15:30:13
| 2015-09-03T15:30:13
| 41,867,598
| 0
| 0
| null | 2015-09-03T15:29:00
| 2015-09-03T15:29:00
| null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
#!/usr/bin/env python
import sys, json, codecs
from peyotl.phylografter.nexson_workaround import workaround_phylografter_export_diffs
inpfn = sys.argv[1]
outfn = sys.argv[2]
inp = codecs.open(inpfn, mode='rU', encoding='utf-8')
obj = json.load(inp)
workaround_phylografter_export_diffs(obj, outfn)
|
[
"mtholder@gmail.com"
] |
mtholder@gmail.com
|
ee9ca801490c9efbd0feeb7b4aef729657168eb5
|
cb0bde8ab641d5e411e91477728ade090836b729
|
/sdk/python/pulumi_azure_nextgen/web/latest/web_app_metadata.py
|
20a633976d48076e3187f390ed68311e3b95e47e
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
rchamorro/pulumi-azure-nextgen
|
7debd444063f0f9810ac0ee5fe11e7e8913b4886
|
09987cba1c466657730a23f5083aa62ec3dc8247
|
refs/heads/master
| 2023-03-03T09:32:59.634185
| 2021-02-10T16:13:24
| 2021-02-10T16:13:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,534
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = ['WebAppMetadata']
class WebAppMetadata(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
String dictionary resource.
Latest API Version: 2020-09-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: Settings.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['kind'] = kind
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__['name'] = name
__props__['properties'] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/v20150801:WebAppMetadata"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppMetadata"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppMetadata"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppMetadata"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppMetadata"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppMetadata"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppMetadata")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppMetadata, __self__).__init__(
'azure-nextgen:web/latest:WebAppMetadata',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppMetadata':
"""
Get an existing WebAppMetadata resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return WebAppMetadata(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Mapping[str, str]]:
"""
Settings.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"noreply@github.com"
] |
rchamorro.noreply@github.com
|
e6ba30f501ef7ca6789ce6408a5692531d2ee3fa
|
14a913fce4b538b22f28409645cd6abe3455808f
|
/bigtable/quickstart_happybase/main_test.py
|
5f08c30b8b7bf068b92a8642ce32bef179ddf70a
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
iamLoi/Python-Random-Number-Generator
|
8da7dbd37cb13a01232c8ed49b9df35a99c63d73
|
7579e8b15130802aaf519979e475c6c75c403eda
|
refs/heads/master
| 2022-08-29T19:05:32.649931
| 2019-09-14T14:48:58
| 2019-09-14T14:48:58
| 208,454,877
| 2
| 1
|
Apache-2.0
| 2022-08-05T21:57:49
| 2019-09-14T14:51:05
|
Python
|
UTF-8
|
Python
| false
| false
| 873
|
py
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from main import main
PROJECT = os.environ['GCLOUD_PROJECT']
BIGTABLE_CLUSTER = os.environ['BIGTABLE_CLUSTER']
TABLE_NAME = 'my-table'
def test_main(capsys):
main(PROJECT, BIGTABLE_CLUSTER, TABLE_NAME)
out, _ = capsys.readouterr()
assert '"cf1:c1": "test-value"' in out
|
[
"noreply@github.com"
] |
iamLoi.noreply@github.com
|
e7648cbf466cc94ea89fc72b4f47f5aa9e8ef1c9
|
301ff8012353185db7d1ad76f05e1b7972306f19
|
/pickup/pickup2ground.py
|
6a81bcf215cca8cf7db2c4c578143fa76cc5bcaf
|
[] |
no_license
|
msyriac/tenki
|
36cc9e7cee8f5bbc688ac0c946e0cc5c3be1df2c
|
b727f0f40f00a431679fea41e5dd693f07cc496b
|
refs/heads/master
| 2021-01-22T07:32:03.015010
| 2017-08-23T14:15:28
| 2017-08-23T14:15:28
| 102,306,661
| 0
| 0
| null | 2017-09-04T01:36:40
| 2017-09-04T01:36:40
| null |
UTF-8
|
Python
| false
| false
| 2,095
|
py
|
# Project a single pickup map into horizontal coordinates
import numpy as np, os
from enlib import enmap, utils, config, array_ops
from enact import actdata, filedb
parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("pickup_map")
parser.add_argument("template")
parser.add_argument("sel_repr")
parser.add_argument("el", type=float)
parser.add_argument("ofile")
args = parser.parse_args()
filedb.init()
nrow, ncol = 33, 32
# Read our template, which represents the output horizontal coordinates
template = enmap.read_map(args.template)
# Use our representative selector to get focalplane offsets and polangles
entry = filedb.data[filedb.scans[args.sel_repr][0]]
d = actdata.read(entry, ["boresight", "point_offsets", "polangle"])
d.boresight[2] = args.el # In degrees, calibrated in next step
d = actdata.calibrate(d, exclude=["autocut"])
def reorder(map, nrow, ncol, dets):
return enmap.samewcs(map[utils.transpose_inds(dets,nrow,ncol)],map)
# Read our map, and give each row a weight
pickup = enmap.read_map(args.pickup_map)
pickup = reorder(pickup, nrow, ncol, d.dets)
weight = np.median((pickup[:,1:]-pickup[:,:-1])**2,-1)
weight[weight>0] = 1/weight[weight>0]
# Find the output pixel for each input pixel
baz = pickup[:1].posmap()[1,0]
bel = baz*0 + args.el * utils.degree
ipoint = np.array([baz,bel])
opoint = ipoint[:,None,:] + d.point_offset.T[:,:,None]
opix = template.sky2pix(opoint[::-1]).astype(int) # [{y,x},ndet,naz]
opix = np.rollaxis(opix, 1) # [ndet,{y,x},naz]
omap = enmap.zeros((3,)+template.shape[-2:], template.wcs)
odiv = enmap.zeros((3,3)+template.shape[-2:], template.wcs)
for det in range(d.ndet):
omap += utils.bin_multi(opix[det], template.shape[-2:], weight[det]*pickup[det]) * d.det_comps[det,:,None,None]
odiv += utils.bin_multi(opix[det], template.shape[-2:], weight[det]) * d.det_comps[det,:,None,None,None] * d.det_comps[det,None,:,None,None]
odiv = enmap.samewcs(array_ops.svdpow(odiv, -1, axes=[0,1]), odiv)
omap = enmap.samewcs(array_ops.matmul(odiv, omap, axes=[0,1]), omap)
enmap.write_map(args.ofile, omap)
|
[
"sigurdkn@astro.uio.no"
] |
sigurdkn@astro.uio.no
|
db082c6ccc44d72a76219a522cda7b5f8c59f543
|
b9acbd83aca1f147db64620127a5ea2518910265
|
/calvin-base-master/calvin/runtime/north/control_apis/runtime_api.py
|
9d6fb6a3278a678546f6b9dea69e960f54ec8238
|
[
"Apache-2.0"
] |
permissive
|
skyjan0428/WorkSpace
|
c1484bde0e4a79a02486f45c518113ba4e3072bf
|
837be7be75f06b6823df1cb63128506d9ce0016e
|
refs/heads/main
| 2023-02-27T11:15:56.974778
| 2021-01-31T16:53:42
| 2021-01-31T16:53:42
| 321,545,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,959
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from calvin.requests import calvinresponse
from calvin.utilities.calvinlogger import get_logger
from calvin.utilities.calvin_callback import CalvinCB
from calvin.runtime.south.async import async
from routes import handler, register
from authentication import authentication_decorator
from calvin.runtime.north.calvinsys import get_calvinsys
from calvin.runtime.north.calvinlib import get_calvinlib
_log = get_logger(__name__)
#Can't be access controlled, as it is needed to find authorization server
# @authentication_decorator
@handler(method="GET", path="/id")
def handle_get_node_id(self, handle, connection, match, data, hdr):
"""
GET /id
Get id of this calvin node
Response status code: OK
Response: node-id
"""
self.send_response(handle, connection, json.dumps({'id': self.node.id}))
@handler(method="GET", path="/capabilities")
def handle_get_node_capabilities(self, handle, connection, match, data, hdr):
"""
GET /capabilities
Get capabilities of this calvin node
Response status code: OK
Response: list of capabilities
"""
self.send_response(handle, connection, json.dumps(get_calvinsys().list_capabilities() + get_calvinlib().list_capabilities()))
@handler(method="POST", path="/peer_setup")
def handle_peer_setup(self, handle, connection, match, data, hdr):
"""
POST /peer_setup
Add calvin nodes to network
Body: {"peers: ["calvinip://<address>:<port>", ...] }
Response status code: OK or SERVICE_UNAVAILABLE
Response: {<peer control uri>: [<peer node id>, <per peer status>], ...}
"""
_log.analyze(self.node.id, "+", data)
self.node.peersetup(data['peers'], cb=CalvinCB(self.handle_peer_setup_cb, handle, connection))
@register
def handle_peer_setup_cb(self, handle, connection, status=None, peer_node_ids=None):
_log.analyze(self.node.id, "+", status.encode())
if peer_node_ids:
data = json.dumps({k: (v[0], v[1].status) for k, v in peer_node_ids.items()})
else:
data = None
self.send_response(handle, connection, data, status=status.status)
@handler(method="GET", path="/nodes")
@authentication_decorator
def handle_get_nodes(self, handle, connection, match, data, hdr):
"""
GET /nodes
List nodes in network (excluding self) known to self
Response status code: OK
Response: List of node-ids
"""
self.send_response(handle, connection, json.dumps(self.node.network.list_links()))
@handler(method="DELETE", path="/node", optional=["/now", "/migrate", "/clean"])
@authentication_decorator
def handle_quit(self, handle, connection, match, data, hdr):
"""
DELETE /node{/now|/migrate|/clean}
Stop (this) calvin node
now: stop the runtime without handling actors on the runtime
migrate: migrate any actors before stopping the runtime
clean: stop & destroy all actors before stopping [default]
Response status code: ACCEPTED
Response: none
"""
if match.group(1) == "now":
stop_method = self.node.stop
elif match.group(1) == "migrate":
stop_method = self.node.stop_with_migration
else: # Clean up
stop_method = self.node.stop_with_cleanup
async.DelayedCall(.2, stop_method)
self.send_response(handle, connection, None, status=calvinresponse.ACCEPTED)
@handler(method="OPTIONS", path=r"/{path}")
@authentication_decorator
def handle_options(self, handle, connection, match, data, hdr):
"""
OPTIONS /url
Request for information about the communication options available on url
Response status code: OK
Response: Available communication options
"""
response = "HTTP/1.1 200 OK\n"
# Copy the content of Access-Control-Request-Headers to the response
if 'access-control-request-headers' in hdr:
response += "Access-Control-Allow-Headers: " + \
hdr['access-control-request-headers'] + "\n"
response += "Content-Length: 0\n" \
"Access-Control-Allow-Origin: *\n" \
"Access-Control-Allow-Methods: GET, POST, PUT, DELETE, OPTIONS\n" \
"Content-Type: *\n" \
"\n\r\n"
if connection is None:
msg = {"cmd": "httpresp", "msgid": handle, "header": response, "data": None}
self.tunnel_client.send(msg)
else:
connection.send(response)
|
[
"skyjan0428@gmail.com"
] |
skyjan0428@gmail.com
|
b7486ff523640c2dc9848f2de261a32473eedb3a
|
9e549ee54faa8b037f90eac8ecb36f853e460e5e
|
/venv/lib/python3.6/site-packages/pylint/test/functional/bad_continuation.py
|
34c7c864c86ed2230807c6ccdf8b1eb41b67afdf
|
[
"MIT"
] |
permissive
|
aitoehigie/britecore_flask
|
e8df68e71dd0eac980a7de8c0f20b5a5a16979fe
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
refs/heads/master
| 2022-12-09T22:07:45.930238
| 2019-05-15T04:10:37
| 2019-05-15T04:10:37
| 177,354,667
| 0
| 0
|
MIT
| 2022-12-08T04:54:09
| 2019-03-24T00:38:20
|
Python
|
UTF-8
|
Python
| false
| false
| 3,862
|
py
|
"""Regression test case for bad-continuation."""
# pylint: disable=print-statement,implicit-str-concat-in-sequence,using-constant-test,missing-docstring,wrong-import-position
# Various alignment for brackets
from __future__ import print_function
LIST0 = [1, 2, 3]
LIST1 = [1, 2, 3]
LIST2 = [1, 2, 3] # [bad-continuation]
# Alignment inside literals
W0 = [
1,
2,
3,
4,
5,
6,
7, # [bad-continuation]
8,
9,
10,
11,
12,
13,
# and a comment
14,
15,
16,
]
W1 = {"a": 1, "b": 2, "c": 3} # [bad-continuation]
W2 = {"a": 1, "b": 2, "c": 3} # [bad-continuation]
W2 = [
"some",
"contents" # with a continued comment that may be aligned
# under the previous comment (optionally)
"and",
"more", # but this
# [bad-continuation] is not accepted
"contents", # [bad-continuation] nor this.
]
# Values in dictionaries should be indented 4 spaces further if they are on a
# different line than their key
W4 = {
"key1": "value1", # Grandfather in the old style
"key2": "value2", # [bad-continuation]
"key3": "value3", # Comma here
}
# And should follow the same rules as continuations within parens
W5 = {
"key1": "long value" "long continuation",
"key2": "breaking" "wrong", # [bad-continuation]
"key3": 2 * (2 + 2),
"key4": ("parenthesis", "continuation"), # No comma here
}
# Allow values to line up with their keys when the key is next to the brace
W6 = {"key1": "value1", "key2": "value2"}
# Or allow them to be indented
W7 = {"key1": "value1", "key2": "value2"}
# Bug that caused a warning on the previous two cases permitted these odd
# incorrect indentations
W8 = {"key1": "value1"} # [bad-continuation]
W9 = {"key1": "value1"} # [bad-continuation]
# Alignment of arguments in function definitions
def continue1(some_arg, some_other_arg):
"""A function with well-aligned arguments."""
print(some_arg, some_other_arg)
def continue2(some_arg, some_other_arg):
"""A function with well-aligned arguments."""
print(some_arg, some_other_arg)
def continue3(some_arg, some_other_arg): # [bad-continuation] # [bad-continuation]
"""A function with misaligned arguments"""
print(some_arg, some_other_arg)
def continue4(arg1, arg2): # pylint:disable=missing-docstring
print(arg1, arg2)
def callee(*args):
"""noop"""
print(args)
callee("a", "b")
callee("a", "b") # [bad-continuation]
callee(5, {"a": "b", "c": "d"})
if 1:
pass
if 1:
pass
if 1:
pass # [bad-continuation]
if 1 and 2: # [bad-continuation]
pass
while 1 and 2:
pass
while 1 and 2 and 3: # [bad-continuation]
pass
if 2:
pass # [bad-continuation]
if 1 or 2 or 3:
pass
if 1 or 2 or 3: # [bad-continuation]
print(1, 2)
if 1 and 2:
pass # [bad-continuation]
if 2:
pass
if 2: # [bad-continuation]
pass
L1 = lambda a, b: a + b
if not (1 and 2):
print(3)
if not (1 and 2): # [bad-continuation]
print(3)
continue2("foo", some_other_arg="this " "is " "fine")
from contextlib import contextmanager
@contextmanager
def mycontext(*args):
yield args
with mycontext("this is", "great stuff", "mane"):
pass
# pylint: disable=using-constant-test
# More indentation included to distinguish this from the rest.
def long_function_name(var_one, var_two, var_three, var_four):
print(var_one, var_two, var_three, var_four)
def short_func_name(first, second, third):
# Add some extra indentation on the conditional continuation line.
if first and second == first == "some_big_long_statement_that_should_not_trigger":
third()
# Some normal multi-line statements with double-indented continuation lines.
LARGE_COLLECTION = ["spam", "eggs", "beans"]
long_function_name("1", "2", "3", "4")
CONCATENATED_TEXT = "spam" "eggs" "beans"
|
[
"aitoehigie@gmail.com"
] |
aitoehigie@gmail.com
|
e7ad02c46c836321fc801c6337ecc6989ac40f3a
|
6a609bc67d6a271c1bd26885ce90b3332995143c
|
/exercises/array/kth_largest_element_in_a_stream.py
|
9e85a56efb00d9c855375c9d8927a296ae770bdc
|
[] |
no_license
|
nahgnaw/data-structure
|
1c38b3f7e4953462c5c46310b53912a6e3bced9b
|
18ed31a3edf20a3e5a0b7a0b56acca5b98939693
|
refs/heads/master
| 2020-04-05T18:33:46.321909
| 2016-07-29T21:14:12
| 2016-07-29T21:14:12
| 44,650,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
# -*- coding: utf-8 -*-
"""
Design a data structure that supports the following two operations:
void addNum(int num) - Add a integer number from the data stream to the data structure.
int findKthLargest() - Return the Kth largest number.
"""
class StreamData(object):
def __init__(self, k):
self.heap = []
self.k = k
def addNum(self, num):
import heapq
if len(self.heap) < self.k:
heapq.heappush(self.heap, num)
else:
if num > self.heap[0]:
heapq.heappushpop(self.heap, num)
def findKthLargest(self):
if len(self.heap) < self.k:
return None
return self.heap[0]
if __name__ == '__main__':
sd = StreamData(3)
sd.addNum(3)
sd.addNum(6)
sd.addNum(2)
sd.addNum(1)
sd.addNum(10)
sd.addNum(4)
sd.addNum(1)
print sd.findKthLargest()
|
[
"wanghan15@gmail.com"
] |
wanghan15@gmail.com
|
1b8071621e5a807dfbf32f489db90293f1e03389
|
96148bf17555c028f5650d51f496f349c89e8c79
|
/build/cob_common/cob_srvs/catkin_generated/pkg.develspace.context.pc.py
|
e9d6f4abd9b3c97cf7a0411b20a4e7c5eea11669
|
[] |
no_license
|
kerekare/ros_hydra_libphidgetsupdated
|
239daed94a95f60743c5659f1102183641761240
|
e05e58417fb03a14d627bc80d09af3b2a0fcceab
|
refs/heads/master
| 2016-09-05T23:35:43.792883
| 2014-03-25T16:32:01
| 2014-03-25T16:32:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/kerekare/workspace/care-o-bot/devel/include".split(';') if "/home/kerekare/workspace/care-o-bot/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs;trajectory_msgs;geometry_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "cob_srvs"
PROJECT_SPACE_DIR = "/home/kerekare/workspace/care-o-bot/devel"
PROJECT_VERSION = "0.5.0"
|
[
"kerekare@i60sr2.(none)"
] |
kerekare@i60sr2.(none)
|
76fe0a3f5aa81d72157f8f98e1075cbfe5e407f8
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_243/ch170_2020_06_15_20_16_56_108338.py
|
806f3ed123f5f56aca42de8307af5b88fd84f731
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
def apaga_repetidos(letras):
dic = {}
novas = ""
for letra in letras:
if letra in dic:
novas += "*"
else:
novas += letra
dic[letra]=letra
return novas
|
[
"you@example.com"
] |
you@example.com
|
4c2b7947b41201f8cef1f67b6c56bb5f479c0730
|
993cf64df4795e7912a7f9157bd8bf02aa985506
|
/Tasks/2_AD/190401-5251-MinDis.py
|
fc43dbd939d9cd03400ba6661775334357519819
|
[] |
no_license
|
jiwonjulietyoon/Algorithm
|
b541e630c5b01b47cc05b538970d2b73d452baf5
|
a11be16f4700e7e55382d4dcfd88d534a232f024
|
refs/heads/master
| 2020-04-24T01:54:05.200538
| 2019-11-09T03:56:47
| 2019-11-09T03:56:47
| 171,616,523
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,125
|
py
|
# 5251. [파이썬 S/W 문제해결 구현] 7일차 - 최소 이동 거리
import sys
sys.stdin = open('../Input/5251.txt', 'r')
"""
# source, destination, length
# start: node 0
# goal: node N
"""
def min_not_vis(vis, dis):
min_idx = -1
min_dis = 11111
for i in range(N+1):
if vis[i]:
continue
else:
if dis[i] < min_dis:
min_dis = dis[i]
min_idx = i
return min_idx
TC = int(input())
for T in range(1, TC+1):
N, E = map(int, input().split()) # N+1 nodes including 0, E edges
edges = [[] for _ in range(N+1)]
for _ in range(E): # E edges
tmp = list(map(int, input().split()))
edges[tmp[0]] += [(tmp[1], tmp[2])]
dis = [0] + [11111 for _ in range(N)] # start at 0; max given weight is 10
vis = [0]*(N+1)
while 0 in vis: # c: current node; run while() until all nodes are marked visited
c = min_not_vis(vis, dis)
vis[c] = 1
for x in edges[c]:
if dis[c] + x[1] < dis[x[0]]:
dis[x[0]] = dis[c] + x[1]
print(f"#{T} {dis[-1]}")
|
[
"jiwonjulietyoon@gmail.com"
] |
jiwonjulietyoon@gmail.com
|
94bb0ba1f6e517ca8fff0ea8bc4b7a8fea66af16
|
a0e0bd0aacc93aa1b494a220aa79a6f99f6e8565
|
/trunk/src/appserver/lib/console.py
|
6306dba962b50cf2a5d9b94cd759694065dd53be
|
[] |
no_license
|
newguangzhou/now-hello
|
b3f21c5b07c7f5b0b69eb91430c760d059499a03
|
a0b970711318547afaa5d6ce1b500c089a2ded59
|
refs/heads/master
| 2021-08-20T03:21:42.927049
| 2017-11-28T02:40:59
| 2017-11-28T02:40:59
| 112,283,378
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,770
|
py
|
# -*- coding: utf-8 -*-
import logging
import tornado.tcpserver
import tornado.ioloop
from tornado import gen
class Console(tornado.tcpserver.TCPServer):
def __init__(self):
tornado.tcpserver.TCPServer.__init__(self)
@gen.coroutine
def _read_line(self, stream, address):
yield stream.write(">>")
line = yield stream.read_until("\n", None, 1024)
line = line.strip()
self.handle_line(stream, address, line)
def handle_stream(self, stream, address):
logging.debug("A new console client, peer=%s", str(address))
self._read_line(stream, address)
def handle_line(self, stream, address, line):
logging.debug("Receive a console line \"%s\", peer=%s", line, address)
cmd = []
if line:
cmd = line.split(' ')
if not self.handle_cmd(stream, address, cmd):
stream.close()
self._read_line(stream, address)
def handle_cmd(self, stream, address, cmd):
return False
@gen.coroutine
def send_response(self, stream, response):
yield stream.write(response + "\r\n")
if __name__ == "__main__":
class _MyConsole(Console):
def handle_cmd(self, stream, address, cmd):
if len(cmd) == 1 and cmd[0] == "quit":
self.send_response(stream, "Byte!")
return False
elif len(cmd) == 0:
return True
else:
self.send_response(stream, "Invalid command!")
return True
import tornado.options
tornado.options.parse_command_line()
c = _MyConsole()
c.bind(9090, "127.0.0.1")
c.start()
tornado.ioloop.IOLoop.current().start()
|
[
"bingodongtian@gmail.com"
] |
bingodongtian@gmail.com
|
38ac9b5d20a5a72d267ad789829aecd19e2a9e44
|
c492c405f0535cb4eada74d9099b395f8e9701c3
|
/demo/migrations/0002_auto_20191124_1646.py
|
1c60f482747ab41c3f3a68ea11fd2022d3aa6277
|
[] |
no_license
|
Spirovanni/PyTut
|
a6a0713dcd100bbd35af21022e5b95f0894badf0
|
51489b7550ad8b4a70548de268624f806f827dc4
|
refs/heads/master
| 2020-09-16T15:39:55.081384
| 2019-11-29T03:39:42
| 2019-11-29T03:39:42
| 223,816,048
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
# Generated by Django 2.2.7 on 2019-11-25 00:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('demo', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='book',
name='title',
field=models.CharField(default='', max_length=32, unique=True),
),
]
|
[
"blackshieldsx@gmail.com"
] |
blackshieldsx@gmail.com
|
6155e5faa1425f554bea5aec1808a9bf81438e7b
|
2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5
|
/tests/layer_tests/tensorflow_lite_tests/test_tfl_ReverseV2.py
|
66a615945471d1c82044abccff6a5518cdc6b829
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/openvino
|
38ea745a247887a4e14580dbc9fc68005e2149f9
|
e4bed7a31c9f00d8afbfcabee3f64f55496ae56a
|
refs/heads/master
| 2023-08-18T03:47:44.572979
| 2023-08-17T21:24:59
| 2023-08-17T21:24:59
| 153,097,643
| 3,953
| 1,492
|
Apache-2.0
| 2023-09-14T21:42:24
| 2018-10-15T10:54:40
|
C++
|
UTF-8
|
Python
| false
| false
| 1,671
|
py
|
import pytest
import tensorflow as tf
from common.tflite_layer_test_class import TFLiteLayerTest
test_params = [
{'shape': [1], 'axis': [-1]},
{'shape': [1], 'axis': [0]},
{'shape': [2, 6], 'axis': [-1, -2]},
{'shape': [2, 6], 'axis': [1]},
{'shape': [2, 4, 6], 'axis': [0, -2]},
{'shape': [2, 4, 6], 'axis': [2]},
{'shape': [2, 4, 6, 8], 'axis': [0, 3, -3, 2]},
{'shape': [2, 4, 6, 8], 'axis': [-3]},
{'shape': [2, 3, 1, 2, 2], 'axis': [0, 3, -3, 1, -1]},
{'shape': [2, 3, 1, 2, 2], 'axis': [4]},
{'shape': [2, 1, 1, 1, 2, 3, 2, 2], 'axis': [-1]},
{'shape': [2, 1, 1, 1, 2, 3, 2, 2], 'axis': [0, 1, 2, 3, 4, 5, 6, 7]},
]
class TestTFLiteReverseV2LayerTest(TFLiteLayerTest):
inputs = ["Input"]
outputs = ["ReverseV2"]
allowed_ops = ['REVERSE_V2']
def make_model(self, params):
assert len(set(params.keys()).intersection({'shape', 'axis'})) == 2, \
'Unexpected parameters for test: ' + ','.join(params.keys())
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
place_holder = tf.compat.v1.placeholder(params.get('dtype', tf.float32), params['shape'],
name=self.inputs[0])
tf.reverse(place_holder, params['axis'], name=self.outputs[0])
net = sess.graph_def
return net
@pytest.mark.parametrize("params", test_params)
@pytest.mark.nightly
def test_reverse_v2(self, params, ie_device, precision, temp_dir):
if len(params['axis']) > 1:
pytest.xfail('CVS-109932')
self._test(ie_device, precision, temp_dir, params)
|
[
"noreply@github.com"
] |
openvinotoolkit.noreply@github.com
|
cbb758a402a190f2d6e6f0bac3ba7dba9a42a43b
|
3e24611b7315b5ad588b2128570f1341b9c968e8
|
/pacbiolib/pacbio/pythonpkgs/pysiv2/bin/testkit_to_json
|
d59af355fd78ac17429a55e647e303afeb146872
|
[
"BSD-2-Clause"
] |
permissive
|
bioCKO/lpp_Script
|
dc327be88c7d12243e25557f7da68d963917aa90
|
0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2
|
refs/heads/master
| 2022-02-27T12:35:05.979231
| 2019-08-27T05:56:33
| 2019-08-27T05:56:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,634
|
#! python
import os
import argparse
import sys
import logging
from logging import log
from pysiv2 import utils
__author__ = 'gconcepcion'
log = logging.getLogger()
def setup_log(alog, file_name=None, level=logging.DEBUG):
if file_name is None:
handler = logging.StreamHandler(sys.stdout)
else:
handler = logging.FileHandler(file_name)
str_formatter = '[%(levelname)s] %(asctime)-15s [%(name)s %(funcName)s %(lineno)d] %(message)s'
formatter = logging.Formatter(str_formatter)
handler.setFormatter(formatter)
alog.addHandler(handler)
alog.setLevel(level)
def get_parser():
desc = ["Generate a pbservice analysis.json from a testkit.cfg"]
parser = argparse.ArgumentParser(description="\n".join(desc))
parser.add_argument('testkit_cfg', help="Path to testkit.cfg")
parser.add_argument("-o", "--output", dest="output", action="store",
help="Output file name - will be automatically "+
"generated if not specified")
parser.add_argument(
'--debug', action='store_true', help='Debug to stdout.')
return parser
def main():
parser = get_parser()
args = parser.parse_args()
testkit = args.testkit_cfg
if args.debug:
setup_log(log, level=logging.DEBUG)
else:
log.addHandler(logging.NullHandler())
if os.path.exists(testkit):
file_name = utils.testkit_to_analysis_json(testkit, output=args.output)
return 0
else:
log.error("Unable to find file: {f}".format(f=testkit))
return 1
if __name__ == "__main__":
sys.exit(main())
|
[
"409511038@qq.com"
] |
409511038@qq.com
|
|
b4982047c820e343197ea0f6d3ad9ca252d41425
|
5984fdb0c07861f3d3b3a3b1944201b1b7217c1b
|
/github_contents.py
|
135e27621a92afa23f802f284642b8de6771eb77
|
[
"Apache-2.0"
] |
permissive
|
jaywgraves/github-contents
|
3e9db50f235b3a7a79551d5c48cd76d94f4cca33
|
8bb91a13d6d483227839a603489c67a83325ce63
|
refs/heads/master
| 2021-04-02T03:27:28.165486
| 2020-03-13T21:48:55
| 2020-03-13T21:48:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,781
|
py
|
import base64
from requests import Session
class GithubContents:
class NotFound(Exception):
pass
class UnknownError(Exception):
pass
def __init__(self, owner, repo, token, branch=None):
self.owner = owner
self.repo = repo
self.token = token
self.branch = branch
self.session = Session()
def base_url(self):
return "https://api.github.com/repos/{}/{}".format(self.owner, self.repo)
def headers(self):
return {"Authorization": "token {}".format(self.token)}
def read(self, filepath):
"Returns (file_contents_in_bytes, sha1)"
# Try reading using content API
content_url = "{}/contents/{}".format(self.base_url(), filepath)
response = self.session.get(content_url, headers=self.headers())
if response.status_code == 200:
data = response.json()
return base64.b64decode(data["content"]), data["sha"]
elif response.status_code == 404:
raise self.NotFound(filepath)
elif response.status_code == 403:
# It's probably too large
if response.json()["errors"][0]["code"] != "too_large":
raise self.UnknownError(response.content)
else:
return self.read_large(filepath)
else:
raise self.UnknownError(response.content)
def read_large(self, filepath):
master = self.session.get(
self.base_url() + "/git/trees/master?recursive=1", headers=self.headers()
).json()
try:
tree_entry = [t for t in master["tree"] if t["path"] == filepath][0]
except IndexError:
raise self.NotFound(filepath)
data = self.session.get(tree_entry["url"], headers=self.headers()).json()
return base64.b64decode(data["content"]), data["sha"]
def write(
self, filepath, content_bytes, sha=None, commit_message="", committer=None
):
if not isinstance(content_bytes, bytes):
raise TypeError("content_bytes must be a bytestring")
github_url = "{}/contents/{}".format(self.base_url(), filepath)
payload = {
"path": filepath,
"content": base64.b64encode(content_bytes).decode("latin1"),
"message": commit_message,
}
if sha:
payload["sha"] = sha
if committer:
payload["committer"] = committer
response = self.session.put(github_url, json=payload, headers=self.headers())
if (
response.status_code == 403
and response.json()["errors"][0]["code"] == "too_large"
):
return self.write_large(filepath, content_bytes, commit_message, committer)
elif (
sha is None
and response.status_code == 422
and "sha" in response.json().get("message", "")
):
# Missing sha - we need to figure out the sha and try again
_, old_sha = self.read(filepath)
return self.write(
filepath,
content_bytes,
sha=old_sha,
commit_message=commit_message,
committer=committer,
)
elif response.status_code in (201, 200):
updated = response.json()
return updated["content"]["sha"], updated["commit"]["sha"]
else:
raise self.UnknownError(
str(response.status_code) + ":" + repr(response.content)
)
def write_large(self, filepath, content_bytes, commit_message="", committer=None):
if not isinstance(content_bytes, bytes):
raise TypeError("content_bytes must be a bytestring")
# Create a new blob with the file contents
created_blob = self.session.post(
self.base_url() + "/git/blobs",
json={
"encoding": "base64",
"content": base64.b64encode(content_bytes).decode("latin1"),
},
headers=self.headers(),
).json()
# Retrieve master tree sha
master_sha = self.session.get(
self.base_url() + "/git/trees/master?recursive=1", headers=self.headers()
).json()["sha"]
# Construct a new tree
created_tree = self.session.post(
self.base_url() + "/git/trees",
json={
"base_tree": master_sha,
"tree": [
{
"mode": "100644", # file (blob),
"path": filepath,
"type": "blob",
"sha": created_blob["sha"],
}
],
},
headers=self.headers(),
).json()
# Create a commit which references the new tree
payload = {
"message": commit_message,
"parents": [master_sha],
"tree": created_tree["sha"],
}
if committer:
payload["committer"] = committer
created_commit = self.session.post(
self.base_url() + "/git/commits", json=payload, headers=self.headers()
).json()
# Move HEAD reference on master to the new commit
self.session.patch(
self.base_url() + "/git/refs/heads/master",
json={"sha": created_commit["sha"]},
headers=self.headers(),
).json()
return created_blob["sha"], created_commit["sha"]
def branch_exists(self):
assert self.branch
return (
self.session.get(
self.base_url() + "/git/refs/heads/{}".format(self.branch),
headers=self.headers(),
).status_code
== 200
)
|
[
"swillison@gmail.com"
] |
swillison@gmail.com
|
0ea25180c5dc115b5b37177068603cec2d6827f4
|
4bb1a23a62bf6dc83a107d4da8daefd9b383fc99
|
/work/agc016_a2.py
|
750469c7c38ef8a70ef749bb3d8fa1439c1b0a6b
|
[] |
no_license
|
takushi-m/atcoder-work
|
0aeea397c85173318497e08cb849efd459a9f6b6
|
f6769f0be9c085bde88129a1e9205fb817bb556a
|
refs/heads/master
| 2021-09-24T16:52:58.752112
| 2021-09-11T14:17:10
| 2021-09-11T14:17:10
| 144,509,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
s = input()
n = len(s)
res = 10**5
for c in set(list(s)):
ss = list(s)
cnt = 0
while len(set(ss))>1:
cnt += 1
for i in range(len(ss)-1):
if c==ss[i+1]:
ss[i] = c
ss.pop()
res = min(res, cnt)
print(res)
|
[
"takushi-m@users.noreply.github.com"
] |
takushi-m@users.noreply.github.com
|
733ae494a4ecbad01c04af80d49ec3f90f6c6b46
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/44/usersdata/98/14637/submittedfiles/desvpad.py
|
070c267968854b9ffecc983f6ff646f40f3e8c8c
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
#comece abaixo
n=input('Digite a quantidade de valores: ')
l=[]
soma=0
soma2=0
for i in range(0,n,1):
l.append(input('Digite um valor: '))
soma=soma+l[i]
media= soma/n
for i in range(1,n+1,1):
soma2=(i-media)**2
s= ((1/(n-1))*soma2)**(1/2)
print ('%.2f' %l[0])
print ('%.2f' %l[n-1])
print ('%.2f' %media)
print ('%.2f'%s)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
2e06e1980cde49ce41b1e7da5a288d1e723ff451
|
d83118503614bb83ad8edb72dda7f449a1226f8b
|
/src/dprj/platinumegg/app/cabaret/kpi/csv/scouteventgachapointconsume.py
|
3e3e67c5ad7740f436e5a8d6a34b403592ebd208
|
[] |
no_license
|
hitandaway100/caba
|
686fe4390e182e158cd9714c90024a082deb8c69
|
492bf477ac00c380f2b2758c86b46aa7e58bbad9
|
refs/heads/master
| 2021-08-23T05:59:28.910129
| 2017-12-03T19:03:15
| 2017-12-03T19:03:15
| 112,512,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,106
|
py
|
# -*- coding: utf-8 -*-
import settings
from platinumegg.app.cabaret.util.db_util import ModelRequestMgr
from platinumegg.app.cabaret.kpi.models.scoutevent import ScoutEventGachaPointConsumeHash
from platinumegg.app.cabaret.kpi.csv.scoutevent import ScoutEventCSVBase
backup_db = getattr(settings, 'DB_BACKUP', settings.DB_READONLY)
class Manager(ScoutEventCSVBase):
"""スカウトイベントガチャポイント消費量.
"""
def __init__(self, date, output_dir):
ScoutEventCSVBase.__init__(self, date, output_dir)
def get_data(self):
model_mgr = ModelRequestMgr()
eventid = self.getScoutEventId(model_mgr)
data = ScoutEventGachaPointConsumeHash.aggregate(eventid)
if data:
return list(data.items())
else:
return None
def delete(self):
model_mgr = ModelRequestMgr()
if self.isScoutEventPresentEnd(model_mgr):
eventid = self.getScoutEventId(model_mgr)
ScoutEventGachaPointConsumeHash.getDB().delete(ScoutEventGachaPointConsumeHash.makeKey(eventid))
|
[
"shangye@mail.com"
] |
shangye@mail.com
|
8e77edd8dbfd0120efcb306a4b71746957ff489b
|
3bed14fe6abcd8370916de178daff9746335b999
|
/PythonProjects/02-ObjectsFunctionsAndMethods/src/m4_functions.py
|
0e66614b27a449f865d2e95fd574fa3359682bc4
|
[
"MIT"
] |
permissive
|
sanaebrahimi/csse120-public
|
346912d13aa1721dd780bfb9f0403c8ea2e5c9d0
|
128199b278e5cc5386bdfe5a9151b738ce09f8ff
|
refs/heads/master
| 2022-12-14T12:58:45.803734
| 2020-09-08T03:33:44
| 2020-09-08T03:33:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,907
|
py
|
"""
Practice DEFINING and CALLING
FUNCTIONS
Authors: David Mutchler, Sana Ebrahimi, Mohammed Noureddine, Vibha Alangar,
Matt Boutell, Dave Fisher, their colleagues, and
PUT_YOUR_NAME_HERE.
""" # TODO: 1. PUT YOUR NAME IN THE ABOVE LINE.
###############################################################################
# TODO: 2.
# Allow this module to use the rosegraphics.py module by marking the
# src
# folder in this project as a "Sources Root", as follows:
# _
# In the Project window (to the left), right click on the src folder,
# then select Mark Directory As ~ Sources Root.
###############################################################################
import rosegraphics as rg
def main():
"""
TESTS the functions that you will write below.
You write the tests per the _TODO_s below.
"""
window = rg.TurtleWindow()
# Put your TESTS immediately below this line, as directed by _TODO_s below.
window.close_on_mouse_click()
###############################################################################
# TODO: 3a. Define a function immediately below this _TODO_.
# It takes two arguments that denote, for a right triangle,
# the lengths of the two sides adjacent to its right angle,
# and it returns the length of the hypotenuse of that triangle.
# HINT: Apply the Pythagorean theorem.
# _
# You may name the function and its parameters whatever you wish,
# but choose DESCRIPTIVE (self-documenting) names.
#
# TODO: 3b. In main, CALL your function TWICE (with different values
# for the arguments) and print the returned values,
# to test whether you defined the function correctly.
###############################################################################
###############################################################################
# TODO: 4a. Define a function immediately below this _TODO_.
# It takes two arguments:
# -- a string that represents a color (e.g. "red")
# -- a positive integer that represents the thickness of a Pen.
# _
# The function should do the following (in the order listed):
# a. Constructs two SimpleTurtle objects, where:
# - one has a Pen whose color is "green" and has the GIVEN thickness
# - the other has a Pen whose color is the GIVEN color
# and whose thickness is 5
# _
# Note: the "GIVEN" color means the PARAMETER that represents a color.
# Likewise, the "GIVEN" thickness means the PARAMETER for thickness.
# _
# b. Makes the first (green) SimpleTurtle move FORWARD 100 pixels.
# _
# c. Makes the other (thickness 5) SimpleTurtle move BACKWARD 100 pixels.
# _
# You may name the function and its parameters whatever you wish,
# but choose DESCRIPTIVE (self-documenting) names.
#
# TODO: 4b. In main, CALL your function at least TWICE (with different values
# for the arguments) to test whether you defined the function correctly.
###############################################################################
###############################################################################
# TODO: 5.
# COMMIT-and-PUSH your work (after changing this _TODO_ to DONE).
# _
# As a reminder, here is how you should do so:
# 1. Select VCS from the menu bar (above).
# 2. Choose Commit from the pull-down menu that appears.
# 3a. In the Commit Changes window that pops up,
# - If there is no message in the
# Commit Message
# sub-box, put one there, e.g. "Done."
# 3b: In that same Commit Changes window that pops up:
# - Press the Commit and Push button.
# (Note: If you see only a Commit button:
# - HOVER over the Commit button
# (in the lower-right corner of the window)
# - CLICK on Commit and Push.)
# _
# COMMIT adds the changed work to the version control system on your COMPUTER.
# PUSH adds the changed work into your repository in the "cloud".
# _
# Always PUSH (in addition to the COMMIT) so that your work
# is backed-up in the cloud. If you COMMIT but forget to PUSH,
# you can subsequently do the PUSH by:
# VCS ~ Git ~ Push...
# _
# Oh, one more thing:
# Do you have any blue bars on the scrollbar-like thing to the
# right? If so, click on each blue bar and change its _TODO_ to
# DONE and then run the module (to make sure you did not break
# anything) and COMMIT-and-PUSH again.
# _
# You can COMMIT-and-PUSH as often as you like.
# DO IT FREQUENTLY; AT LEAST once per module.
###############################################################################
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
|
[
"mutchler@rose-hulman.edu"
] |
mutchler@rose-hulman.edu
|
d356ff767d243dca741ca1a7a526a9ab397e7661
|
564d6a4d305a8ac6a7e01c761831fb2081c02d0f
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations/_load_balancer_network_interfaces_operations.py
|
5e351840793d45108df3555656e21cf0f89b9bd3
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
paultaiton/azure-sdk-for-python
|
69af4d889bac8012b38f5b7e8108707be679b472
|
d435a1a25fd6097454b7fdfbbdefd53e05029160
|
refs/heads/master
| 2023-01-30T16:15:10.647335
| 2020-11-14T01:09:50
| 2020-11-14T01:09:50
| 283,343,691
| 0
| 0
|
MIT
| 2020-07-28T22:43:43
| 2020-07-28T22:43:43
| null |
UTF-8
|
Python
| false
| false
| 5,623
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerNetworkInterfacesOperations:
"""LoadBalancerNetworkInterfacesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> AsyncIterable["models.NetworkInterfaceListResult"]:
"""Gets associated load balancer network interfaces.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/networkInterfaces'} # type: ignore
|
[
"noreply@github.com"
] |
paultaiton.noreply@github.com
|
e5cbe4defe3eb48759672a185f2e75739378bb9d
|
3a891a79be468621aae43defd9a5516f9763f36e
|
/desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl/writer/excel.py
|
b95245ec2ba4408c6f85a9b4e3915339b51d9ddc
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"BSD-Advertising-Acknowledgement",
"MIT"
] |
permissive
|
oyorooms/hue
|
b53eb87f805063a90f957fd2e1733f21406269aa
|
4082346ef8d5e6a8365b05752be41186840dc868
|
refs/heads/master
| 2020-04-15T20:31:56.931218
| 2019-01-09T19:02:21
| 2019-01-09T19:05:36
| 164,998,117
| 4
| 2
|
Apache-2.0
| 2019-01-10T05:47:36
| 2019-01-10T05:47:36
| null |
UTF-8
|
Python
| false
| false
| 6,332
|
py
|
# file openpyxl/writer/excel.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Write a .xlsx file."""
# Python stdlib imports
from zipfile import ZipFile, ZIP_DEFLATED
from ....compat import BytesIO as StringIO
# package imports
from ..shared.ooxml import ARC_SHARED_STRINGS, ARC_CONTENT_TYPES, \
ARC_ROOT_RELS, ARC_WORKBOOK_RELS, ARC_APP, ARC_CORE, ARC_THEME, \
ARC_STYLE, ARC_WORKBOOK, \
PACKAGE_WORKSHEETS, PACKAGE_DRAWINGS, PACKAGE_CHARTS
from ..writer.strings import create_string_table, write_string_table
from ..writer.workbook import write_content_types, write_root_rels, \
write_workbook_rels, write_properties_app, write_properties_core, \
write_workbook
from ..writer.theme import write_theme
from ..writer.styles import StyleWriter
from ..writer.drawings import DrawingWriter, ShapeWriter
from ..writer.charts import ChartWriter
from ..writer.worksheet import write_worksheet, write_worksheet_rels
class ExcelWriter(object):
"""Write a workbook object to an Excel file."""
def __init__(self, workbook):
self.workbook = workbook
self.style_writer = StyleWriter(self.workbook)
def write_data(self, archive):
"""Write the various xml files into the zip archive."""
# cleanup all worksheets
shared_string_table = self._write_string_table(archive)
archive.writestr(ARC_CONTENT_TYPES, write_content_types(self.workbook))
archive.writestr(ARC_ROOT_RELS, write_root_rels(self.workbook))
archive.writestr(ARC_WORKBOOK_RELS, write_workbook_rels(self.workbook))
archive.writestr(ARC_APP, write_properties_app(self.workbook))
archive.writestr(ARC_CORE,
write_properties_core(self.workbook.properties))
archive.writestr(ARC_THEME, write_theme())
archive.writestr(ARC_STYLE, self.style_writer.write_table())
archive.writestr(ARC_WORKBOOK, write_workbook(self.workbook))
self._write_worksheets(archive, shared_string_table, self.style_writer)
def _write_string_table(self, archive):
for ws in self.workbook.worksheets:
ws.garbage_collect()
shared_string_table = create_string_table(self.workbook)
archive.writestr(ARC_SHARED_STRINGS,
write_string_table(shared_string_table))
for k, v in shared_string_table.items():
shared_string_table[k] = bytes(v)
return shared_string_table
def _write_worksheets(self, archive, shared_string_table, style_writer):
drawing_id = 1
chart_id = 1
shape_id = 1
for i, sheet in enumerate(self.workbook.worksheets):
archive.writestr(PACKAGE_WORKSHEETS + '/sheet%d.xml' % (i + 1),
write_worksheet(sheet, shared_string_table,
style_writer.get_style_by_hash()))
if sheet._charts or sheet.relationships:
archive.writestr(PACKAGE_WORKSHEETS +
'/_rels/sheet%d.xml.rels' % (i + 1),
write_worksheet_rels(sheet, drawing_id))
if sheet._charts:
dw = DrawingWriter(sheet)
archive.writestr(PACKAGE_DRAWINGS + '/drawing%d.xml' % drawing_id,
dw.write())
archive.writestr(PACKAGE_DRAWINGS + '/_rels/drawing%d.xml.rels' % drawing_id,
dw.write_rels(chart_id))
drawing_id += 1
for chart in sheet._charts:
cw = ChartWriter(chart)
archive.writestr(PACKAGE_CHARTS + '/chart%d.xml' % chart_id,
cw.write())
if chart._shapes:
archive.writestr(PACKAGE_CHARTS + '/_rels/chart%d.xml.rels' % chart_id,
cw.write_rels(drawing_id))
sw = ShapeWriter(chart._shapes)
archive.writestr(PACKAGE_DRAWINGS + '/drawing%d.xml' % drawing_id,
sw.write(shape_id))
shape_id += len(chart._shapes)
drawing_id += 1
chart_id += 1
def save(self, filename):
"""Write data into the archive."""
archive = ZipFile(filename, 'w', ZIP_DEFLATED)
self.write_data(archive)
archive.close()
def save_workbook(workbook, filename):
"""Save the given workbook on the filesystem under the name filename.
:param workbook: the workbook to save
:type workbook: :class:`openpyxl.workbook.Workbook`
:param filename: the path to which save the workbook
:type filename: string
:rtype: bool
"""
writer = ExcelWriter(workbook)
writer.save(filename)
return True
def save_virtual_workbook(workbook):
"""Return an in-memory workbook, suitable for a Django response."""
writer = ExcelWriter(workbook)
temp_buffer = StringIO()
try:
archive = ZipFile(temp_buffer, 'w', ZIP_DEFLATED)
writer.write_data(archive)
finally:
archive.close()
virtual_workbook = temp_buffer.getvalue()
temp_buffer.close()
return virtual_workbook
|
[
"abraham@elmahrek.com"
] |
abraham@elmahrek.com
|
0d69b1cca7ac402b2bf0126e0fbc92837503b45f
|
0f85db2a2bda863359ad7c81ec6ebba5b42ad939
|
/36-challenges/ex115.py
|
590d9cf00f800827b703040bb59b5bcfdda09f1e
|
[] |
no_license
|
ferreret/python-bootcamp-udemy
|
894a08ba086bad41ba02f2015112956545f3b581
|
ce499458d7da9ff64f9113114cf855afbc6f8163
|
refs/heads/master
| 2022-11-18T00:12:24.613797
| 2020-07-15T19:46:45
| 2020-07-15T19:46:45
| 257,936,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
'''
includes([1, 2, 3], 1) # True
includes([1, 2, 3], 1, 2) # False
includes({ 'a': 1, 'b': 2 }, 1) # True
includes({ 'a': 1, 'b': 2 }, 'a') # False
includes('abcd', 'b') # True
includes('abcd', 'e') # False
'''
def includes(collection, value, start_index=0):
if (isinstance(collection, dict)):
return any(x == value for x in collection.values())
return any(x == value for x in collection[start_index:])
print(includes([1, 2, 3], 1)) # True
print(includes([1, 2, 3], 1, 2)) # False
print(includes({'a': 1, 'b': 2}, 1)) # True
print(includes({'a': 1, 'b': 2}, 'a')) # False
print(includes('abcd', 'b')) # True
print(includes('abcd', 'e')) # False
|
[
"ferreret@gmail.com"
] |
ferreret@gmail.com
|
a1f84e1cd613f2db636b17cd80dc027b0d1e3c59
|
d43100d78daa1a8167e462e0faaa7d2a0fe97671
|
/touchtechnology/common/tests/test_models.py
|
a7a26375da8cde01ddd7115cf63998b17a077224
|
[
"BSD-3-Clause"
] |
permissive
|
goodtune/vitriolic
|
1d6ee4758ed41f1674b70311be88c7135b2d1ed0
|
d4b3da0a8c9b5ccbda4d898003d82934ccad6a7b
|
refs/heads/main
| 2023-08-22T21:29:05.488534
| 2023-07-23T03:00:16
| 2023-07-23T03:00:16
| 73,355,905
| 0
| 0
|
BSD-3-Clause
| 2023-09-04T16:25:44
| 2016-11-10T07:06:41
|
Python
|
UTF-8
|
Python
| false
| false
| 355
|
py
|
from django.utils.encoding import smart_str
from test_plus import TestCase
from touchtechnology.common.tests import factories
class SitemapNodeTests(TestCase):
def setUp(self):
self.object = factories.SitemapNodeFactory.create()
def test_string_representation(self):
self.assertEqual(self.object.title, smart_str(self.object))
|
[
"gary@touch.asn.au"
] |
gary@touch.asn.au
|
ca12ea91f5c1cfafc228d306af427cdb5e2fd9fe
|
5b28005b6ee600e6eeca2fc7c57c346e23da285f
|
/nomadic_recording_lib/ui/qt/bases/qtsimple.py
|
b93a45beb012474f93e49d848144358df034b7e6
|
[] |
no_license
|
nocarryr/wowza_logparse
|
c31d2db7ad854c6b0d13495a0ede5f406c2fce3f
|
d6daa5bf58bae1db48ac30031a845bf975c7d5cc
|
refs/heads/master
| 2021-01-17T07:19:00.347206
| 2017-06-24T16:57:32
| 2017-06-24T16:57:32
| 25,835,704
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 341
|
py
|
from ...bases import simple
class Color(simple.Color):
pass
class EntryBuffer(simple.EntryBuffer):
pass
class Spin(simple.Spin):
pass
class Radio(simple.Radio):
pass
class Toggle(simple.Toggle):
pass
class Fader(simple.Fader):
pass
class ScaledFader(simple.ScaledFader):
pass
|
[
"matt@nomadic-recording.com"
] |
matt@nomadic-recording.com
|
030b8a4210f6316bcd16d65f28a93e647afdd838
|
7d58cb5bb403d394e609a1f4be8f438cfcaa3895
|
/queue/arrayqueue.py
|
1f9d7b91dcefda86a4f23f09064ecd934caeccc8
|
[] |
no_license
|
TechInTech/dataStructure
|
7a07a0ca3f4ccf4b60f766df536908a36520dd51
|
eb4d6c315867ebb676a1119a5099024aa37988eb
|
refs/heads/master
| 2020-05-03T04:23:55.319238
| 2019-05-24T07:27:47
| 2019-05-24T07:27:47
| 178,420,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,129
|
py
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2019/2/21 11:04
# @Author : Despicable Me
# @Email :
# @File : arrayqueue.py
# @Software: PyCharm
# @Explain :
from arrays import Array
from abstractqueue import AbstractQueue
class ArrayQueue1(AbstractQueue):
"""移动队尾rear"""
DEFAULT_CAPACITY = 10
def __init__(self, sourceCollection = None):
self._items = Array(ArrayQueue1.DEFAULT_CAPACITY)
AbstractQueue.__init__(self, sourceCollection)
self._front = 0
self._rear = max(len(self) - 1, 0)
def __iter__(self):
cursor = 0
while cursor < len(self):
yield self._items[cursor]
cursor += 1
def clear(self):
self._size = 0
self._items = Array(ArrayQueue1.DEFAULT_CAPACITY)
def pop(self):
if self.isEmpty():
raise KeyError(" queue is empty.")
cursor = 0
oldItem = self._items[cursor]
for i range(0, len(self) - 1):
self._items[i] = self._items[i + 1]
self._size -= 1
return oldItem
def peek(self):
if self.isEmpty():
raise KeyError("queue is empty.")
return self._items[self._front]
def __contains__(self, item):
if self.isEmpty():
raise KeyError("queue is empty.")
for i in range(len(self)):
if self._items[i] == item:
return True
return False
def add(self, item):
if self.isEmpty():
self._items[0] = item
self._size += 1
elif (self._rear + 1) < ArrayQueue1.DEFAULT_CAPACITY:
self._items[self._rear] = item
self._rear += 1
self._size += 1
else:
raise KeyError("the queue is full.")
class ArrayQueue2(AbstractQueue):
"""移动队头front"""
DEFAULT_CAPACITY = 10
def __init__(self, sourceCollection = None):
self._items = Array(ArrayQueue2.DEFAULT_CAPACITY)
AbstractQueue.__init__(self, sourceCollection)
self._front = 0
self._rear = max(len(self) - 1, 0)
def __iter__(self):
cursor = self._front
while cursor <= self._rear:
yield self._items[cursor]
cursor += 1
def clear(self):
self._size = 0
self._items = Array(ArrayQueue2.DEFAULT_CAPACITY)
def pop(self):
if self.isEmpty():
raise KeyError(" queue is empty.")
oldItem = self._items[self._front]
if self._front < self._rear:
self._front += 1
elif self._front == self._rear:
self._front = 0
self._rear = 0
self._size -= 1
return oldItem
def peek(self):
if self.isEmpty():
raise KeyError("queue is empty.")
return self._items[self._front]
def __contains__(self, item):
if self.isEmpty():
raise KeyError("queue is empty.")
for i in range(self._front, self._rear + 1):
if self._items[i] == item:
return True
return False
def add(self, item):
if self.isEmpty():
self._items[self._front] = item
self._size += 1
elif (self._rear + 1) < ArrayQueue2.DEFAULT_CAPACITY:
self._items[self._rear] = item
self._rear += 1
self._size += 1
if self._front > 0 and self._rear == (ArrayQueue2.DEFAULT_CAPACITY - 1):
for i in range(self._front, self._rear + 1):
self._items[i - 1] = self._items[i]
self._front -= 1
self._rear -= 1
else:
raise KeyError("the queue is full.")
class ArrayQueue3(AbstractQueue):
"""循环队列"""
DEFAULT_CAPACITY = 10
def __init__(self, sourceCollection=None):
self._items = Array(ArrayQueue3.DEFAULT_CAPACITY)
AbstractQueue.__init__(self, sourceCollection)
self._front = 0
self._rear = max(len(self) - 1, 0)
def __iter__(self):
if self._front <= self._rear:
cursor = self._front
while cursor <= self._rear:
yield self._items[cursor]
cursor += 1
else:
cursor = self._front
while cursor <= len(self) - 1:
yield self._items[cursor]
cursor += 1
cursor = 0
while cursor <= self._rear:
yield self._items[cursor]
cursor += 1
def clear(self):
self._size = 0
self._items = Array(ArrayQueue3.DEFAULT_CAPACITY)
def pop(self):
if self.isEmpty():
raise KeyError(" queue is empty.")
oldItem = self._items[self._front]
if self._front == (len(self) -1):
self._front = 0
else:
self._front += 1
self._size -= 1
return oldItem
def peek(self):
if self.isEmpty():
raise KeyError("queue is empty.")
return self._items[self._front]
def __contains__(self, item):
if self.isEmpty():
raise KeyError("queue is empty.")
elif self._front <= self._rear:
for i in range(self._front, self._rear + 1):
if self._items[i] == item:
return True
return False
else:
for i in range(0, self._rear + 1):
if self._items[i] == item:
return True
for i in range(self._front, len(self)):
if self._items[i] == item:
return True
return False
def add(self, item):
if self.isEmpty():
self._items[self._rear] = item
self._size += 1
elif len(self) == ArrayQueue3.DEFAULT_CAPACITY:
raise KeyError("the queue is full.")
elif self._rear == len(self) - 1:
self._items[0] = item
self._rear += 1
self._size += 1
else:
self._rear += 1
self._items[self._rear] = item
self._size += 1
|
[
"wdw_bluesky@163.com"
] |
wdw_bluesky@163.com
|
00e1101a2d4dffa2fae86d26f0dbd14410a93620
|
5d5b6a7bd7fffe46980d41e452fe92c28a08d5d1
|
/UDP--/asyncio-queue.py
|
57c5600728289fcd7729ba4f31fd87e3791cea9d
|
[] |
no_license
|
calllivecn/testing
|
ce21442f1398b177675ca2b655c4ed3aaf1edcb3
|
05c1d335d54bb12fbbcf3721260763e4537dcaf4
|
refs/heads/master
| 2023-08-20T21:51:15.511595
| 2023-06-13T13:26:37
| 2023-06-13T13:26:37
| 49,574,572
| 0
| 1
| null | 2022-12-08T09:29:09
| 2016-01-13T13:11:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,523
|
py
|
#!/usr/bin/env python3
# coding=utf-8
# date 2022-09-09 21:21:12
# author calllivecn <c-all@qq.com>
import queue
import asyncio
import threading
async def customer(q):
while (task := await q.get()) is not None:
q.task_done()
print(f"customer: {task}")
async def producter(q):
for i in range(10):
c = f"生产资料:{i}"
await q.put(c)
print(c)
await q.put(None)
def customer2(q):
while (task := q.get()) is not None:
q.task_done()
print(f"customer: {task}")
def producter2(q):
for i in range(10):
c = f"生产资料:{i}"
q.put(c)
print(c)
q.put(None)
class run(threading.Thread):
def __init__(self, queue):
super().__init__()
self.queue = queue
def run(self):
customer2(self.queue)
async def async_main():
q = asyncio.Queue(2)
print("启动消费者")
th = asyncio.create_task(customer(q))
print("启动生产者")
p = asyncio.create_task(producter(q))
# 这样才是并发的
await th
await p
async def async_main2():
q = asyncio.Queue(2)
print("启动消费者")
print("启动生产者")
L = await asyncio.gather(customer(q), producter(q))
print("结果:", L)
def main():
q = queue.Queue(2)
print("启动消费者")
th = run(q)
th.start()
print("启动生产者")
producter2(q)
if __name__ == "__main__":
# asyncio.run(async_main())
asyncio.run(async_main2())
# main()
|
[
"calllivecn@outlook.com"
] |
calllivecn@outlook.com
|
688d5d0aa4a544d0b6ebdd24b9ca3c2c5ebfae91
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/iothub/azure-mgmt-iothubprovisioningservices/azure/mgmt/iothubprovisioningservices/models/iot_dps_properties_description_py3.py
|
c8f57abf34739bf39900f01dedfbe64c8f796761
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507
| 2020-09-04T15:48:52
| 2020-09-04T15:48:52
| 205,457,088
| 1
| 2
|
MIT
| 2020-06-16T16:38:15
| 2019-08-30T21:08:55
|
Python
|
UTF-8
|
Python
| false
| false
| 3,854
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class IotDpsPropertiesDescription(Model):
"""the service specific properties of a provisoning service, including keys,
linked iot hubs, current state, and system generated properties such as
hostname and idScope.
Variables are only populated by the server, and will be ignored when
sending a request.
:param state: Current state of the provisioning service. Possible values
include: 'Activating', 'Active', 'Deleting', 'Deleted',
'ActivationFailed', 'DeletionFailed', 'Transitioning', 'Suspending',
'Suspended', 'Resuming', 'FailingOver', 'FailoverFailed'
:type state: str or ~azure.mgmt.iothubprovisioningservices.models.State
:param provisioning_state: The ARM provisioning state of the provisioning
service.
:type provisioning_state: str
:param iot_hubs: List of IoT hubs assosciated with this provisioning
service.
:type iot_hubs:
list[~azure.mgmt.iothubprovisioningservices.models.IotHubDefinitionDescription]
:param allocation_policy: Allocation policy to be used by this
provisioning service. Possible values include: 'Hashed', 'GeoLatency',
'Static'
:type allocation_policy: str or
~azure.mgmt.iothubprovisioningservices.models.AllocationPolicy
:ivar service_operations_host_name: Service endpoint for provisioning
service.
:vartype service_operations_host_name: str
:ivar device_provisioning_host_name: Device endpoint for this provisioning
service.
:vartype device_provisioning_host_name: str
:ivar id_scope: Unique identifier of this provisioning service.
:vartype id_scope: str
:param authorization_policies: List of authorization keys for a
provisioning service.
:type authorization_policies:
list[~azure.mgmt.iothubprovisioningservices.models.SharedAccessSignatureAuthorizationRuleAccessRightsDescription]
"""
_validation = {
'service_operations_host_name': {'readonly': True},
'device_provisioning_host_name': {'readonly': True},
'id_scope': {'readonly': True},
}
_attribute_map = {
'state': {'key': 'state', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'iot_hubs': {'key': 'iotHubs', 'type': '[IotHubDefinitionDescription]'},
'allocation_policy': {'key': 'allocationPolicy', 'type': 'str'},
'service_operations_host_name': {'key': 'serviceOperationsHostName', 'type': 'str'},
'device_provisioning_host_name': {'key': 'deviceProvisioningHostName', 'type': 'str'},
'id_scope': {'key': 'idScope', 'type': 'str'},
'authorization_policies': {'key': 'authorizationPolicies', 'type': '[SharedAccessSignatureAuthorizationRuleAccessRightsDescription]'},
}
def __init__(self, *, state=None, provisioning_state: str=None, iot_hubs=None, allocation_policy=None, authorization_policies=None, **kwargs) -> None:
super(IotDpsPropertiesDescription, self).__init__(**kwargs)
self.state = state
self.provisioning_state = provisioning_state
self.iot_hubs = iot_hubs
self.allocation_policy = allocation_policy
self.service_operations_host_name = None
self.device_provisioning_host_name = None
self.id_scope = None
self.authorization_policies = authorization_policies
|
[
"noreply@github.com"
] |
YijunXieMS.noreply@github.com
|
1c83b0d6834355f9d556f510537c7ebea1b4ac9f
|
3fd8eae327323ea45d31909d537fd7ee8f49c2de
|
/torch_stft/util.py
|
e1a92295009ab8e82ce1f2060b0da5cb790e44c7
|
[
"BSD-3-Clause"
] |
permissive
|
taeminlee/torch-stft
|
4f61c754f8a953d51d492404de602310bfaa38ca
|
c6236f77af113207a78feff93b4b9cbeeccb9143
|
refs/heads/master
| 2020-11-29T19:55:50.041494
| 2019-12-26T06:15:14
| 2019-12-26T06:15:14
| 230,204,083
| 0
| 0
|
BSD-3-Clause
| 2019-12-26T05:58:20
| 2019-12-26T05:58:19
| null |
UTF-8
|
Python
| false
| false
| 1,657
|
py
|
import numpy as np
from scipy.signal import get_window
import librosa.util as librosa_util
def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
n_fft=800, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm)**2
win_sq = librosa_util.pad_center(win_sq, n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x
|
[
"prem@u.northwestern.edu"
] |
prem@u.northwestern.edu
|
5c9b1176bbf81a0b9b815632de912c5e83333052
|
2e60bdaf03181f1479701efebbb495f88615df4c
|
/nlp/ner/lstm/dataset/dataset.py
|
b55fb2e52e9c0e63d501499eba7771fc0cb3eac0
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
whatisnull/tensorflow_nlp
|
dc67589ee4069f7a71baa1640d796bac3445bb5c
|
0ecb1e12bbe1fc3d5a63e68d788547d0ae92aeef
|
refs/heads/master
| 2023-04-23T08:23:55.914154
| 2019-09-15T03:47:55
| 2019-09-15T03:47:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,471
|
py
|
# -*- coding:utf-8 -*-
import numpy as np
class Dataset(object):
def __init__(self, word_data, tag_data):
self._start = 0
self._cursor = 0
self._num_samples = word_data.shape[0]
self._word_data = word_data
self._tag_data = tag_data
@property
def word_data(self):
return self._word_data
@property
def tag_data(self):
return self._tag_data
@property
def num_samples(self):
return self._num_samples
def has_next(self):
return self._cursor < self._num_samples
def reset(self):
self._cursor = 0
self._start = 0
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
self._start = self._cursor
self._cursor += batch_size
if self._start + batch_size > self._num_samples:
rest_num_samples = self._num_samples - self._start
word_batch = np.zeros((batch_size, self._word_data.shape[1]), dtype=np.int32)
tag_batch = np.zeros((batch_size, self._word_data.shape[1]), dtype=np.int32)
word_batch[0:rest_num_samples] = self._word_data[self._start:self._num_samples]
tag_batch[0:rest_num_samples] = self._tag_data[self._start:self._num_samples]
return word_batch, tag_batch
else:
end = self._cursor
return self._word_data[self._start:end], self._tag_data[self._start:end]
|
[
"endymecy@sina.cn"
] |
endymecy@sina.cn
|
84064f13bfdd302ead83e4bef54ded511537fc93
|
87dae6d55c66df1d40d6881272009319a1600cb3
|
/Practica_6__Ejercicio_4__Inmigrantes2.py
|
f10e29a73ebd529401d44818335e229ff2f34447
|
[] |
no_license
|
abaldeg/EjerciciosPython
|
92a30a82c05ec75aa7f313c8a6fa0dd052a8db11
|
c8a3238587ebf6b10dbff32516c81bf00bb01630
|
refs/heads/master
| 2021-07-09T07:46:11.584855
| 2020-11-09T11:51:50
| 2020-11-09T11:51:50
| 210,438,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 900
|
py
|
# Practica 6 - Ejercicio 4
try:
entrada = open("Apellidos.txt")
armenia = open("armenia.txt", "wt")
italia = open("italia.txt", "wt")
españa = open("españa.txt", "wt")
print("\nLeyendo datos...")
datos = entrada.readline()
while datos:
if datos.upper().find("IAN,")!=-1:
armenia.write(datos.title()+"\n")
elif datos.upper().find("INI,")!=-1:
italia.write(datos.title()+"\n")
elif datos.upper().find("EZ,")!=-1:
españa.write(datos.title()+"\n")
datos = entrada.readline()
print("Archivos generados correctamente")
except FileNotFoundError:
print("No se encontró el archivo de entrada")
except OSError as error:
print("ERROR:",str(error))
finally:
try:
entrada.close()
armenia.close()
italia.close()
españa.close()
except NameError:
pass
|
[
"abaldeg@gmail.com"
] |
abaldeg@gmail.com
|
a84a3a1979c3cf029fedadcb95908324fb1a010c
|
fd4fac4c6001dcedee7d5e87327368b5050c45d5
|
/htseq/db/archive/fusion-tables-client-python-read-only/src/sql/sqlbuilder.py
|
799056bbeb57353eadef5ef07793ae90e6830b53
|
[] |
no_license
|
nickloman/omicsmaps
|
6b92e4dbe568287af1049d8d2814a5bad934942b
|
fd17a2f84d3dc4be86539e223c77f5e4bc5880ed
|
refs/heads/master
| 2021-01-23T13:32:07.623246
| 2015-01-04T12:55:11
| 2015-01-04T12:55:11
| 28,738,982
| 8
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,551
|
py
|
#!/usr/bin/python
#
# Copyright (C) 2010 Google Inc.
""" Builds SQL strings.
Builds SQL strings to pass to FTClient query method.
"""
__author__ = 'kbrisbin@google.com (Kathryn Hurley)'
class SQL:
""" Helper class for building SQL queries """
def showTables(self):
""" Build a SHOW TABLES sql statement.
Returns:
the sql statement
"""
return 'SHOW TABLES'
def describeTable(self, table_id):
""" Build a DESCRIBE <tableid> sql statement.
Args:
table_id: the ID of the table to describe
Returns:
the sql statement
"""
return 'DESCRIBE %d' % (table_id)
def createTable(self, table):
""" Build a CREATE TABLE sql statement.
Args:
table: a dictionary representing the table. example:
{
"tablename":
{
"col_name1":"STRING",
"col_name2":"NUMBER",
"col_name3":"LOCATION",
"col_name4":"DATETIME"
}
}
Returns:
the sql statement
"""
table_name = table.keys()[0]
cols_and_datatypes = ",".join(["'%s': %s" % (col[0], col[1])
for col in sorted(table.get(table_name).items())])
return "CREATE TABLE '%s' (%s)" % (table_name, cols_and_datatypes)
def select(self, table_id, cols=None, condition=None):
""" Build a SELECT sql statement.
Args:
table_id: the id of the table
cols: a list of columns to return. If None, return all
condition: a statement to add to the WHERE clause. For example,
"age > 30" or "Name = 'Steve'". Use single quotes as per the API.
Returns:
the sql statement
"""
stringCols = "*"
if cols: stringCols = ("'%s'" % ("','".join(cols))) \
.replace("\'rowid\'", "rowid") \
.replace("\'ROWID\'", "ROWID")
if condition: select = 'SELECT %s FROM %d WHERE %s' % (stringCols, table_id, condition)
else: select = 'SELECT %s FROM %d' % (stringCols, table_id)
return select
def update(self, table_id, cols, values=None, row_id=None):
""" Build an UPDATE sql statement.
Args:
table_id: the id of the table
cols: list of columns to update
values: list of the new values
row_id: the id of the row to update
OR if values is None and type cols is a dictionary -
table_id: the id of the table
cols: dictionary of column name to value pairs
row_id: the id of the row to update
Returns:
the sql statement
"""
if row_id == None: return None
if type(cols) == type({}):
updateStatement = ""
count = 1
for col,value in cols.iteritems():
if type(value).__name__ == 'int':
updateStatement = '%s%s=%d' % (updateStatement, col, value)
elif type(value).__name__ == 'float':
updateStatement = '%s%s=%f' % (updateStatement, col, value)
else:
updateStatement = "%s%s='%s'" % (updateStatement, col,
value.encode('string-escape'))
if count < len(cols): updateStatement = "%s," % (updateStatement)
count += 1
return "UPDATE %d SET %s WHERE ROWID = '%d'" % (table_id,
updateStatement, row_id)
else:
if len(cols) != len(values): return None
updateStatement = ""
count = 1
for i in range(len(cols)):
updateStatement = "%s'%s' = " % (updateStatement, cols[i])
if type(values[i]).__name__ == 'int':
updateStatement = "%s%d" % (updateStatement, values[i])
elif type(values[i]).__name__ == 'float':
updateStatement = "%s%f" % (updateStatement, values[i])
else:
updateStatement = "%s'%s'" % (updateStatement,
values[i].encode('string-escape'))
if count < len(cols): updateStatement = "%s," % (updateStatement)
count += 1
return "UPDATE %d SET %s WHERE ROWID = '%d'" % (table_id, updateStatement, row_id)
def delete(self, table_id, row_id):
""" Build DELETE sql statement.
Args:
table_id: the id of the table
row_id: the id of the row to delete
Returns:
the sql statement
"""
return "DELETE FROM %d WHERE ROWID = '%d'" % (table_id, row_id)
def insert(self, table_id, values):
""" Build an INSERT sql statement.
Args:
table_id: the id of the table
values: dictionary of column to value. Example:
{
"col_name1":12,
"col_name2":"mystring",
"col_name3":"Mountain View",
"col_name4":"9/10/2010"
}
Returns:
the sql statement
"""
stringValues = ""
count = 1
cols = values.keys()
values = values.values()
for value in values:
if type(value).__name__=='int':
stringValues = '%s%d' % (stringValues, value)
elif type(value).__name__=='float':
stringValues = '%s%f' % (stringValues, value)
else:
stringValues = "%s'%s'" % (stringValues, value.replace("'", "\\'"))
if count < len(values): stringValues = "%s," % (stringValues)
count += 1
str = 'INSERT INTO %d (%s) VALUES (%s)' % \
(int(table_id), ','.join(["'%s'" % col for col in cols]), stringValues)
return str
def dropTable(self, table_id):
""" Build DROP TABLE sql statement.
Args:
table_id: the id of the table
Returns:
the sql statement
"""
return "DROP TABLE %d" % (table_id)
if __name__ == '__main__':
pass
|
[
"n.j.loman@bham.ac.uk"
] |
n.j.loman@bham.ac.uk
|
83a367b329229b92477b1a88fa5d6ed59bce7bf3
|
9577a61a677142067b9c9f9b60e192e66904d8aa
|
/docs/images/plot.py
|
7879ac19e771ef5dbef1c2416913ed4a67c88706
|
[
"Apache-2.0"
] |
permissive
|
QuantumBFS/YaoAD.jl
|
9e9a8409e927564ebecaaae1218b9757e17c6abc
|
e591ccbe6927a907d6454458676e6035a966f09b
|
refs/heads/master
| 2020-05-26T10:01:25.824430
| 2020-03-11T02:09:47
| 2020-03-11T02:09:47
| 188,196,347
| 2
| 1
|
Apache-2.0
| 2020-02-23T21:54:54
| 2019-05-23T08:49:21
|
Julia
|
UTF-8
|
Python
| false
| false
| 534
|
py
|
#!/usr/bin/env python
import fire
from plotlib import *
class PLT(object):
def fig1(self, tp='pdf'):
nsite = 4
data = np.loadtxt("../data/loss_history_%d.dat"%nsite)
EG = -8
with DataPlt(figsize=(5,4), filename="fig1.%s"%tp) as dp:
plt.plot(np.arange(len(data)), data/nsite, lw=2)
plt.xlabel("Training Step")
plt.ylabel("Energy/Site")
plt.axhline(EG/nsite, ls='--', lw=2)
plt.tight_layout()
plt.xlim(0,200)
fire.Fire(PLT())
|
[
"cacate0129@gmail.com"
] |
cacate0129@gmail.com
|
80f65c753fd32363d2f48a628df5baae43e116fe
|
22e6dcbebad329b32579e531af8b33bc657088c9
|
/AtCoder/ABC111/ProbC.py
|
7857a4a06712c062e6574447a30cb76569137f91
|
[] |
no_license
|
daigo0927/ProgrammingContest
|
a63b74bb79ece46181b03dc359bf665604b11ea5
|
f54aa8f485ebfd30d5ee84fd74fa9e0661c2a7df
|
refs/heads/master
| 2021-06-21T09:26:23.699668
| 2019-06-22T18:51:32
| 2019-06-22T18:51:32
| 132,655,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
n = int(input())
v = list(map(int, input().split()))
ans = 0
even = v[::2]
nums_e = {}
for e in even:
if not e in nums_e.keys():
nums_e[e] = 1
else:
nums_e[e] += 1
e_sorted = []
for key, value in nums_e.items():
e_sorted.append([value, key])
e_sorted = sorted(e_sorted, reverse = True) + [[0, 0-1]]
odd = v[1::2]
nums_o = {}
for o in odd:
if not o in nums_o.keys():
nums_o[o] = 1
else:
nums_o[o] += 1
o_sorted = []
for key, value in nums_o.items():
o_sorted.append([value, key])
o_sorted = sorted(o_sorted, reverse = True) + [[-1, 0]]
if e_sorted[0][1] != o_sorted[0][1]:
print(n - e_sorted[0][0] - o_sorted[0][0])
else:
ans1 = n - e_sorted[1][0] - o_sorted[0][0]
ans2 = n - e_sorted[0][0] - o_sorted[1][0]
print(min(ans1, ans2))
|
[
"Daigo@Daigo-no-MacBook-Air.local"
] |
Daigo@Daigo-no-MacBook-Air.local
|
8be6496c26ae7209b534e5a00a5f87083b90ed55
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_1674486_0/Python/Epcylon/diamonds.py
|
2e2157e6604aa88f7abc29801ac943c856abe3e6
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,430
|
py
|
#!/usr/bin/python
"""
Author: Morten Lied Johansen - mortenjo@ifi.uio.no
Google CodeJam 2012
Round: 1C
Problem: Diamond inheritance
"""
import collections
import os
import sys
import time
from pprint import pformat
from cStringIO import StringIO
import unittest
import logging
from logging import info, debug, error
from multiprocessing import Pool
# Set up basic logging
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
def yield_line_of_items(reader):
for x in reader.readline().strip().split():
yield x
def read_line_of_items(reader):
return list(yield_line_of_items(reader))
def yield_line_of_ints(reader):
for i in yield_line_of_items(reader):
yield int(i)
def read_line_of_ints(reader):
return list(yield_line_of_ints(reader))
def yield_lines_of_items(reader, num=1):
for i in range(num):
yield read_line_of_items(reader)
def read_lines_of_items(reader, num=1):
return list(yield_lines_of_ints(reader, num))
def yield_lines_of_ints(reader, num=1):
for i in range(num):
yield read_line_of_ints(reader)
def read_lines_of_ints(reader, num=1):
return list(yield_lines_of_ints(reader, num))
def run_in_process(case_solver):
return case_solver.solve()
class Solver(object):
def __init__(self, input_name, use_mp=False):
self.input_name = input_name
self.output_name = self._make_output_name()
self.use_mp = use_mp
def _make_output_name(self):
basename, ext = os.path.splitext(self.input_name)
output_name = basename + ".out"
return output_name
def open_output(self):
return open(self.output_name, "w")
def open_input(self):
return open(self.input_name, "r")
def main(self):
input = self.open_input()
output = self.open_output()
self.solve(input, output)
def solve(self, input, output):
number_of_cases = read_line_of_ints(input)[0]
solvers = list()
for casenr in xrange(number_of_cases):
solvers.append(CaseSolver(casenr+1, *self.read_case_input(input)))
if self.use_mp:
p = Pool()
solutions = p.map(run_in_process, solvers)
else:
solutions = map(run_in_process, solvers)
for casenr, result in sorted(solutions):
output.write("Case #%d: %s\n" % (casenr, result))
output.flush()
def read_case_input(self, input):
number_of_classes = read_line_of_ints(input)[0]
debug("Number of classes: %d", number_of_classes)
nodes = [Node(0, [])]
roots = set(range(1, number_of_classes+1))
for id in xrange(1, number_of_classes+1):
node_list = read_line_of_ints(input)
if node_list[0]:
parent_ids = node_list[1:]
node = Node(id, parent_ids)
roots.difference_update(parent_ids)
else:
node = Node(id, list())
nodes.append(node)
debug("Roots: %r", roots)
debug("Nodes: %r", nodes)
return nodes, roots
class Node(object):
def __init__(self, id, parent_ids):
self.id = id
self.parent_ids = parent_ids
self.visited_by_root = collections.defaultdict(bool)
def __repr__(self):
return "<Node(%d, %r)>" % (self.id, self.parent_ids)
class DiamondFound(Exception):
pass
class CaseSolver(object):
def __init__(self, casenr, nodes, roots):
self.casenr = casenr
self.nodes = nodes
self.roots = roots
def solve(self):
info("Solving case %d", self.casenr)
result = "No"
try:
for id in self.roots:
self.process(id, id)
except DiamondFound:
result = "Yes"
debug("Result: %s", result)
return self.casenr, result
def process(self, id, root):
node = self.nodes[id]
for parent_id in node.parent_ids:
parent = self.nodes[parent_id]
if parent.visited_by_root[root]:
raise DiamondFound()
parent.visited_by_root[root] = True
for parent_id in node.parent_ids:
self.process(parent_id, root)
# === Verify correctness of sample data
class SampleTester(unittest.TestCase):
def setUp(self):
self.data = open("sample.correct", "r").read()
def test_sample(self):
output = StringIO()
solver = Solver("sample.in")
input = solver.open_input()
solver.solve(input, output)
self.assertEqual(self.data, output.getvalue())
if __name__ == "__main__":
if "--debug" in sys.argv:
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
use_mp = False
if "--use-mp" in sys.argv:
use_mp = True
input_name = sys.argv[1]
if input_name == "test":
suite = unittest.TestLoader().loadTestsFromTestCase(SampleTester)
unittest.TextTestRunner(verbosity=2).run(suite)
else:
start = time.time()
solver = Solver(input_name, use_mp)
solver.main()
end = time.time()
info("Time spent: %s" % time.strftime("%M minutes, %S seconds", time.gmtime(end-start)))
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
336de6e435da28ab320f65a19b891c0a296eeb6b
|
39398e12e41dd9574488af872c2a06546ddca4ad
|
/factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/notifications/consumers.py
|
2638d7ce68bdebcf6c1321c2bb511b38216a4d8d
|
[
"MIT"
] |
permissive
|
piyushka17/azure-intelligent-edge-patterns
|
612dc2ff9442fe37343844ca642308cf3d892240
|
0d088899afb0022daa2ac434226824dba2c997c1
|
refs/heads/master
| 2022-12-01T03:04:14.881931
| 2020-08-11T14:38:10
| 2020-08-11T14:38:10
| 286,632,773
| 0
| 0
|
MIT
| 2020-08-11T03:07:46
| 2020-08-11T03:07:46
| null |
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
# -*- coding: utf-8 -*-
"""Notification Consumer
"""
# pylint: disable=unused-import
import asyncio
# pylint: enable=unused-import
import logging
from channels.generic.websocket import AsyncJsonWebsocketConsumer
# from .models import Notification
logger = logging.getLogger(__name__)
class NotificationConsumer(AsyncJsonWebsocketConsumer):
"""NotificationConsumer
"""
async def websocket_connect(self, message):
"""websocket connect
"""
# Auth here
await self.accept()
#self.channel_name = "notification"
await self.channel_layer.group_add("notification", self.channel_name)
logger.info("connect %s", message)
async def websocket_receive(self, message):
"""websocket receive
"""
logger.info("recieve %s", message)
await self.send("Connected")
await self.channel_layer.group_send("link", {
"type": "link.send",
"message": "msg from websocket",
})
async def websocket_disconnect(self, message):
"""websocket close
"""
logger.info("disconnect %s", message)
await self.close()
await self.channel_layer.group_discard("link", self.channel_name)
async def notification_send(self, event):
"""websocket send
"""
logger.info("notification_send!!!!!!!!!!")
await self.send_json(event)
|
[
"peteeelol@gmail.com"
] |
peteeelol@gmail.com
|
54cc4616d2c462da1ff90769d605ca3f52d839c6
|
d6a0cc09c0fd86d95bc0ee8034acf09334a2c377
|
/plant/크롤링2.py
|
847846c5eb4d4617364ebcee452fe8201ec28bfc
|
[] |
no_license
|
Kimuksung/bigdata
|
e2b0e04e817a7113cba8c5d4acdd8cf20b664147
|
e7cce223eb55709d9ebcb631e39360b347a02764
|
refs/heads/master
| 2021-07-13T10:16:54.498961
| 2021-03-08T02:27:28
| 2021-03-08T02:27:28
| 241,530,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,264
|
py
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
# 습도 - https://www.weather.go.kr/weather/observation/currentweather.jsp?tm=2020.06.19.01:00&type=t13&mode=0®=100&auto_man=m&stn=129
# 이슬점 - https://www.weather.go.kr/weather/observation/currentweather.jsp?tm=2020.06.19.01:00&type=t12&mode=0®=100&auto_man=m&stn=129
import datetime
date1 = '2016-05-01'
date2 = '2019-04-18'
start = datetime.datetime.strptime(date1, '%Y-%m-%d')
end = datetime.datetime.strptime(date2, '%Y-%m-%d')
step = datetime.timedelta(days=30)
tmp = {}
while start <= end:
print (str(start.date()).replace("-" , "."))
basic_path = "https://www.weather.go.kr/weather/observation/currentweather.jsp?type=t13&mode=2&stn=129®=101&auto_man=m&tm="
basic_path2 = ".01:00&dtm=0"
date = str(start.date()).replace("-" , ".")
url = basic_path+date+ basic_path2
html = urlopen(url)
source = html.read()
#print(html)
soup = BeautifulSoup(source, "html5lib")
#print(soup)
tr=soup.find_all("tr")
#print(tr)
html.close()
# =============================================================================
# tr[1].find("a")
# tr[1].a.attrs
# tr[1].a.string
# tr[1].find_all("td")[1:]
# =============================================================================
for i in range(1 ,32):#len(tr)):
#print(tr[i].a.string)
tmp2 = []
for j in tr[i].find_all("td")[1:]:
#print(j.string)
tmp2.append(j.string)
tmp[str(start.year)+"."+tr[i].a.string] = tmp2
start += step
print(tmp.keys())
answer={}
#for i in tmp.keys():
for i in tmp.keys():
print(i)
for j in range(0,8):
answer[(datetime.datetime.strptime(i, '%Y.%m.%d') + datetime.timedelta(hours= (j+1)*3)).strftime('%Y-%m-%d %H')] = tmp[i][j]
#print((datetime.datetime.strptime(i, '%Y.%m.%d') + datetime.timedelta(hours= (j+1)*3)).strftime('%Y-%m-%d %H'))
type(answer)
answer['2016-05-02 00']
'''
import pandas as pd
df = pd.DataFrame(answer.values() , index = answer.keys() , columns = ["dew_point"])
df
df.to_csv("dewpoint.csv")
print(df)
df2 = pd.read_csv("dewpoint.csv")
type(df2)
df2 = df2.set_index('Unnamed: 0')
df2
'''
|
[
"kimuksung2@gmail.com"
] |
kimuksung2@gmail.com
|
0e38d2a2bdbcdc5299f5b0bd9fd4047035b1acff
|
d88868386b529a7adecb1f0caf1db6cdf743951f
|
/triematcher.py
|
2bbfb3bf0e5e2006e2004960a8ad0aa5c738a320
|
[] |
no_license
|
hldai/ks-studio-el
|
4b87476846a8aa987203d882e51562b1847445f9
|
995ae4af67c360744170125da63472e940bed87d
|
refs/heads/master
| 2020-12-24T21:21:25.911325
| 2016-12-01T10:56:38
| 2016-12-01T10:56:38
| 58,045,365
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,897
|
py
|
class TrieNode:
def __init__(self):
self.ch = None
self.child = None
self.next_child = None
self.rid = None
def add_child(self, ch):
if not self.child:
self.child = TrieNode()
self.child.ch = ch
return self.child
cur_child = self.child
while cur_child.ch != ch and cur_child.next_child:
cur_child = cur_child.next_child
if cur_child.ch == ch:
return cur_child
cur_child.next_child = TrieNode()
cur_child.next_child.ch = ch
return cur_child.next_child
def find_child(self, ch):
cur_child = self.child
while cur_child:
if cur_child.ch == ch:
return cur_child
cur_child = cur_child.next_child
return None
class MeshDetect:
word_sep = [',', '.', '"', '\'', '(', ')', '/', '-', '\n', ';']
def __init__(self, dict_file, exclude_words_file):
exclude_words_set = None
if exclude_words_file:
exclude_words_set = MeshDetect.load_exclude_words(exclude_words_file)
self.trie_root = TrieNode()
print 'Loading mesh dict ...'
fin = open(dict_file, 'rb')
fin.readline()
cur_name = None
line_idx = 0
for line_idx, line in enumerate(fin):
line = line.strip()
if cur_name:
cur_rid = line
if cur_name.isupper():
self.add_term(cur_name, cur_rid)
cur_name = None
continue
cur_name_lc = cur_name.lower()
if not exclude_words_set or cur_name_lc not in exclude_words_set:
self.add_term(cur_name, cur_rid)
if cur_name_lc != cur_name:
self.add_term(cur_name_lc, cur_rid)
cur_name = None
else:
cur_name = line.decode('utf-8')
fin.close()
print line_idx, 'lines'
def add_term(self, term, rid):
cur_node = self.trie_root
for ch in term:
cur_node = cur_node.add_child(ch)
if not cur_node.rid:
cur_node.rid = rid
def match(self, text, beg_pos):
cur_node = self.trie_root
pos = beg_pos
hit_node = None
hit_pos = -1
result_span = [beg_pos, -1]
while pos < len(text) and cur_node:
cur_node = cur_node.find_child(text[pos])
if cur_node and cur_node.rid:
hit_node = cur_node
hit_pos = pos
pos += 1
if hit_node:
result_span[1] = hit_pos
return result_span, hit_node.rid
return None
def find_all_terms(self, doc_text):
span_list = list()
id_list = list()
# results = list()
pos = 0
text_len = len(doc_text)
while pos < text_len:
# print doc_text[pos:]
result = self.match(doc_text, pos)
if result and (result[0][1] == text_len - 1 or MeshDetect.is_word_sep(doc_text[result[0][1] + 1])):
# results.append(result)
span_list.append(result[0])
id_list.append(result[1])
pos = result[0][1] + 1
else:
while pos < text_len and not MeshDetect.is_word_sep(doc_text[pos]):
pos += 1
pos += 1
return span_list, id_list
@staticmethod
def is_word_sep(ch):
if ch.isspace():
return True
return ch in MeshDetect.word_sep
@staticmethod
def load_exclude_words(file_name):
fin = open(file_name, 'rb')
fin.readline()
words_set = set()
for line in fin:
words_set.add(line.strip())
fin.close()
return words_set
|
[
"hldai@outlook.com"
] |
hldai@outlook.com
|
da5596d54ac00268281f5b368ce5bdb61bbf3e85
|
1575d5acc07eb67cb4e3cd523a24bb1d39efcb84
|
/pattenRecognition/mnist/demo-p2/train.py
|
6fa6b5694e1fb2743ed1bdcf92262344f744728e
|
[] |
no_license
|
ChenLiangbo/DeepLearning
|
4bd80ddb2a41b883ef70947a8b1fdb3b19656df0
|
3464c27116dc00bd597d2b9c25313964e1d89797
|
refs/heads/master
| 2020-12-24T12:39:27.666215
| 2017-05-09T13:49:44
| 2017-05-09T13:49:44
| 72,974,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 907
|
py
|
#!usr/bin/env/python
# -*- coding: utf-8 -*-
import numpy as np
import input_data
from cnnModel import MyCNNmodel
trainNumber = 55000
mnist = input_data.read_data_sets("../MNIST_data/", one_hot=True)
x_train, y_train, x_test, y_test = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
print "x_train.shape = ",x_train.shape
x_train = x_train[0:trainNumber]
y_train = y_train[0:trainNumber]
x_train = x_train.reshape(-1, 28, 28, 1)
print "x_train.shape = ",(x_train.shape,y_train.shape)
myModel = MyCNNmodel()
myModel.iterTimes = 200
myModel.batchSize = 178
myModel.train(x_train,y_train)
import os
import json
content = {"iterTimes":myModel.iterTimes,"batchSize":myModel.batchSize,"trainNumber":trainNumber}
command = 'python predict.py'
os.system(command)
fp = open('./file/mnist_result.txt','ab')
content = json.dumps(content)
fp.write(content)
fp.write('\r\n')
fp.close()
|
[
"chenlb@polarwin.cn"
] |
chenlb@polarwin.cn
|
c2d224c8012c4eab20df0ed6980600ceaa4e0906
|
e1f941604b7a8bf4b86b700a2a0fd302dbe70434
|
/add_extinction_column_rejected.py
|
8f288639ed8d6c93a2bbc7373188bbcdf20a6e05
|
[] |
no_license
|
frenchd24/gt
|
fcc0bfee6b7f3c68871218f237e31c395889b5de
|
e0840800d8a3f906844b30701b294f102b030243
|
refs/heads/master
| 2021-06-03T18:05:10.797021
| 2020-07-06T04:22:37
| 2020-07-06T04:22:37
| 109,777,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,074
|
py
|
#!/usr/bin/env python
'''
By David French (frenchd@astro.wisc.edu)
$Id: add_extinction_column_rejected.py, v 2.1 09/08/17
does the thing below but on file rejected_results_redo.csv
Based on:
Id: add_extinction_column.py, v 2.0 04/24/17
Add extinction column to return_basic_full.csv based on new IRSA values
Returns: return_basic_full_extinc.csv
** Successfully made /usr/data/moosejaw/frenchd/GT_update2/return_basic_full_extinc.csv
on 04/24/17
**
ADOPTED FROM: add_extinction_column.py, v 1.0 02/06/14 =
"
Adds a column in NewGalaxyTable2.csv for E(B-V) based on values from IRSA.
**Successfully made NewGalaxyTable3.csv on 02/13/15**
"
'''
import sys
import os
import csv
# import string
# import warnings
# import urllib
# import numpy
from pylab import *
# import atpy
import math
import getpass
import itertools
from utilities import *
# import scipy.optimize as optimization
# import pickle
# import numpy as np
# import matplotlib.pyplot as plt
# from matplotlib.ticker import NullFormatter
def schechter(m,phi,mstar,alpha):
# construct a Schechter luminosity function and return the associated density function
# s = 0.4*log(10)*phi*(10**(0.4*(mstar-m)))**(alpha +1) * exp(-10**(0.4*(mstar-m)))
# s = 0.4*log(10)*phi*(10**(0.4*(m-mstar)*(alpha +1))) * exp(-10**(0.4*(m-mstar)))
s = 0.4 * math.log(10) * phi * (10**(0.4*(mstar-m)))**(alpha +1) * exp(-10**(0.4*(mstar-m)))
return s
def absoluteMag(m,d):
# m is apparent magnitude, d is distance in Mpc
M = float(m) - 5*log10((float(d)*10**6)/10)
return M
def lstarValue(mstar,m):
# calculate and return L/Lstar
lratio = 10**(-0.4*(m-mstar))
return lratio
def readlines_into_lists(file, header):
# take in a file. Reads each line and splits on spaces. returns a list of rows,
# each of which is list of values
# header is a list of characters that indicates header lines that should be skipped
# example: header = ['\','|']
outList = []
lines = file.readlines()
for l in lines:
isHeader = False
for c in header:
# if a header character, 'c', is found at the start of the line, 'l'
if str(l)[0] == c:
isHeader = True
print 'was header: ',str(l)
if not isHeader:
splitLine = l.split()
outList.append(splitLine)
return outList
def main():
# open and read the galaxy table
if getpass.getuser() == 'frenchd':
inputFilename = '/usr/data/moosejaw/frenchd/GT_update2/rejected_results_redo.csv'
outputFilename = '/usr/data/moosejaw/frenchd/GT_update2/rejected_results_redo_extinc.csv'
extincTableDirectory = '/usr/data/moosejaw/frenchd/GT_update2/extincTables/'
else:
print 'Could not determine username. Exiting.'
sys.exit()
# open the galaxy file
inputFile = open(inputFilename,'rU')
reader = csv.DictReader(inputFile)
# new fieldnames for updated galaxy table
fieldnames = ('preferredName',\
'oldName',\
'redshift',\
'degreesJ2000RA_Dec',\
'J2000RA_Dec',\
'galacticLong_Lat',\
'rIndependentDistMean_sd_min_max (Mpc)',\
'morphology',\
'distanceIndicator',\
'luminosityClass',\
'EBminusV',\
'radialVelocity (km/s)',\
'vcorr (km/s)',\
'angDiameters (arcsec)',\
'linDiameters (kpc)',\
'distvcorr (Mpc)',\
'inclination (deg)',\
'photometry',\
'alternativeNames')
writerOutFile = open(outputFilename,'wt')
writer = csv.DictWriter(writerOutFile, fieldnames=fieldnames)
headers = dict((n,n) for n in fieldnames)
writer.writerow(headers)
header = ['\\','|']
# open output
file1 = open('{0}/rejected_results_redo_extinc.txt'.format(extincTableDirectory),'rU')
lines1 = readlines_into_lists(file1, header)
allLines = lines1
count = 0
# for gline, eline in zip(reader, itertools.chain(lines1,lines2,lines3,lines4,lines5,lines6)):
for gline, eline in zip(reader, allLines):
ra,dec = eval(gline['degreesJ2000RA_Dec'])
era,edec = eline[0],eline[1]
EBminusV_SF = eline[3]
meanEBminusV_SF = eline[4]
# convert ra,dec to same format as the extinction values
rat = trunc(ra,9)
lenRat = len(rat)
if lenRat == 3:
rat = str(rat)+'000000'
if lenRat == 4:
rat = str(rat)+'00000'
if lenRat ==5:
rat = str(rat)+'0000'
if lenRat == 6:
rat = str(rat)+'000'
if lenRat == 7:
rat = str(rat)+'00'
if lenRat == 8:
rat = str(rat)+'0'
else:
rat = str(rat)
dect = trunc(dec,9)
dect2 = trunc(str(dect),5)
rat2 = trunc(str(rat),5)
erat = trunc(str(era),5)
edect = trunc(str(edec),5)
# if count <20000:
# lines1
#
# if count>=20000 and count<40000:
# file2.write(' {0} {1}\n'.format(rat,dect))
#
# if count>=40000 and count<60000:
# file3.write(' {0} {1}\n'.format(rat,dect))
#
# if count>=60000 and count<80000:
# file4.write(' {0} {1}\n'.format(rat,dect))
#
# if count>=80000 and count <100000:
# file5.write(' {0} {1}\n'.format(rat,dect))
#
# if count>=100000:
# file6.write(' {0} {1}\n'.format(rat,dect))
count+=1
if erat == rat2 and edect == dect2:
# print 'match ',count
objectInfoList = (\
gline['preferredName'],\
gline['oldName'],\
gline['redshift'],\
gline['degreesJ2000RA_Dec'],\
gline['J2000RA_Dec'],\
gline['galacticLong_Lat'],\
gline['rIndependentDistMean_sd_min_max (Mpc)'],\
gline['morphology'],\
gline['distanceIndicator'],\
gline['luminosityClass'],\
meanEBminusV_SF,\
gline['radialVelocity (km/s)'],\
gline['vcorr (km/s)'],\
gline['angDiameters (arcsec)'],\
gline['linDiameters (kpc)'],\
gline['distvcorr (Mpc)'],\
gline['inclination (deg)'],\
gline['photometry'],\
gline['alternativeNames'])
row = dict((f,o) for f,o in zip(fieldnames,objectInfoList))
writer.writerow(row)
# update counter
percentComplete = round((float(count)/130759)*100,2)
sys.stdout.write('Percent complete: {0}\r'.format(percentComplete))
sys.stdout.flush()
else:
print 'no match: {0},{1} != {2},{3}'.format(erat, edect, rat2, dect2)
print 'count = ',count
file1.close()
inputFile.close()
writerOutFile.close()
if __name__=="__main__":
main()
|
[
"frenchd24@gmail.com"
] |
frenchd24@gmail.com
|
125eda5c2ea26724993805d2bdd9694df6fbe0fb
|
ab9eac7d27788b98bd3d43577bf11658fa6c67c5
|
/src/clean_data.py
|
2e7beaeb134717e44de0b902f242bee563130bad
|
[] |
no_license
|
IkeyBenz/Instagram-Network-Graph
|
1b0d5163b945a56ec024af77419bc03c3088bbac
|
82ca93b94cb7b75b341683d4c20b489960c7378d
|
refs/heads/master
| 2023-02-02T01:52:44.235220
| 2020-12-20T21:12:00
| 2020-12-20T21:12:00
| 321,414,811
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
from os import listdir, path
from util import get_data_dir, get_mutual_followship_path, get_user_connections_path, get_authenticated_username
data_dir = get_data_dir()
authenticated_username = get_authenticated_username()
connections_path = get_user_connections_path()
def get_users_connections():
return set(open(connections_path).read().splitlines())
def correct_mutual_follwers():
for account in listdir(data_dir):
mutuals_path = get_mutual_followship_path(account)
if account is authenticated_username or not path.exists(mutuals_path):
continue
mutuals = set(open(mutuals_path).read().splitlines())
corrected = mutuals.intersection(get_users_connections())
with open(mutuals_path, 'w') as out:
out.write("\n".join(corrected))
def check_mutual_correctness():
for account in listdir(data_dir):
mutuals_path = get_mutual_followship_path(account)
if account is authenticated_username or not path.exists(mutuals_path):
continue
stored_mutuals = set(open(mutuals_path).read().splitlines())
extras = stored_mutuals.difference(get_users_connections())
if len(extras) > 0:
print(account, "has extra mutuals:", extras)
if __name__ == '__main__':
correct_mutual_follwers()
check_mutual_correctness()
|
[
"ikey.benz@gmail.com"
] |
ikey.benz@gmail.com
|
0afc429868366eb8eadd730a1566d020e31b6f46
|
dbb32a7d5b96a94533b27a6ccf2474c660a863b7
|
/containers/actor/sources/utils/__init__.py
|
756cad2b8abb42638833a16139c9961fc42fd77d
|
[] |
no_license
|
ankurhcu/FogBus2
|
772e8346c5e01e2aa8a02da9ef91fd696dd587a7
|
2cefabdd1d131fc8e9015ca31d414665e6014a69
|
refs/heads/main
| 2023-08-07T15:33:54.039724
| 2021-09-21T05:02:49
| 2021-09-21T05:02:49
| 410,610,212
| 1
| 0
| null | 2021-09-26T16:57:23
| 2021-09-26T16:57:22
| null |
UTF-8
|
Python
| false
| false
| 1,823
|
py
|
from .component import BasicComponent
from .component import PeriodicTaskRunner
from .config import ConfigActor
from .config import ConfigMaster
from .config import ConfigRemoteLogger
from .config import ConfigTaskExecutor
from .config import ConfigUser
from .connection import BasicMessageHandler
from .connection import MessageReceived
from .connection import MessageReceiver
from .connection import MessageSender
from .connection import MessageToSend
from .container import ContainerManager
from .container import ContainerManager
from .debugLogPrinter import DebugLogPrinter
from .resourceDiscovery import DiscoveredActors
from .resourceDiscovery import DiscoveredMasters
from .resourceDiscovery import DiscoveredRemoteLoggers
from .resourceDiscovery import ResourcesDiscovery
from .tools import camelToSnake
from .tools import decrypt
from .tools import encrypt
from .tools import filterIllegalCharacter
from .tools import newDebugLogger
from .tools import snakeToCamel
from .tools import terminate
from .types import ActorResources
from .types import Address
from .types import AutoDictionary
from .types import CannotBindAddr
from .types import Component
from .types import ComponentIdentity
from .types import ComponentRole
from .types import CPU
from .types import LoopSourceDestination
from .types import Memory
from .types import Message
from .types import MessageDoesNotContainSourceInfo
from .types import MessageDoesNotContainType
from .types import MessageSubSubType
from .types import MessageSubType
from .types import MessageType
from .types import PairsMedian
from .types import PeriodicTask
from .types import PeriodicTasks
from .types import ProcessingTime
from .types import Resources
from .types import SequenceMedian
from .types import SerializableDictionary
from .types import SynchronizedAttribute
|
[
"plocircle@live.com"
] |
plocircle@live.com
|
1a29ed7174a5e46688668e138299e976917f4743
|
34a9a91e6c3fbf427826d2cb2ad3d7c7a00ad0c0
|
/collision_detection_program/SBI/beans/__init__.py
|
87e717e7f04422002fd6fbaeabcf242107d76132
|
[
"MIT"
] |
permissive
|
structuralbioinformatics/SPServer
|
015d7ede4b2c439c648b663b9af56a0ca98e277b
|
946b7afdac16aef391ddd162daabfcc968eb9110
|
refs/heads/master
| 2021-04-23T14:02:10.935764
| 2020-07-24T09:00:19
| 2020-07-24T09:00:19
| 249,930,917
| 3
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
__all__ = [
"Singleton",
"Butler",
"File",
"FileError",
"StorableObject",
"Executable",
"Path",
"IndexedNum",
"JSONer"
]
from .singleton import Singleton
from .butler import Butler
from .file import (File, FileError)
from .StorableObject import StorableObject
from .Executable import Executable
from .Path import Path
from .IndexedNum import IndexedNum
from .JSONer import JSONer
|
[
"quim.aguirre@hotmail.com"
] |
quim.aguirre@hotmail.com
|
090b01787d67ad38963fba38a99e8b1e8a557d7c
|
15581a76b36eab6062e71d4e5641cdfaf768b697
|
/LeetCode_30days_challenge/2021/June/Pascal's Triangle.py
|
d5f72948364fa07f9c70d38dfef7769ff10d9ebb
|
[] |
no_license
|
MarianDanaila/Competitive-Programming
|
dd61298cc02ca3556ebc3394e8d635b57f58b4d2
|
3c5a662e931a5aa1934fba74b249bce65a5d75e2
|
refs/heads/master
| 2023-05-25T20:03:18.468713
| 2023-05-16T21:45:08
| 2023-05-16T21:45:08
| 254,296,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
from typing import List
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
if numRows == 1:
return [[1]]
rows = [[1], [1, 1]]
for i in range(2, numRows):
row = [1]
for j in range(1, i):
row.append(rows[-1][j] + rows[-1][j - 1])
row.append(1)
rows.append(row)
return rows
|
[
"mariandanaila01@gmail.com"
] |
mariandanaila01@gmail.com
|
d9450370110654bbba361d0adb0ff18def6f3bf6
|
52f0984561895b48f3e6e40658a6e52c97705715
|
/python-folder/year-grade.py
|
5b6647ed5326a8d753ec1092b8476883e8bf511b
|
[] |
no_license
|
jsanon01/python
|
8da2755e7724850875518455c1760bb9f04dd873
|
edd52214e3578f18b71b0ad944c287411fb23dfb
|
refs/heads/master
| 2022-05-20T00:29:10.550169
| 2022-05-10T01:08:48
| 2022-05-10T01:08:48
| 165,682,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
# This script prints a while-loop with if-elif statement
year = " "
while year != 'q':
year = input('Enter a grade from 0 - 13 or q to quit: ')
if year.isdigit():
year = int(year)
if year == 0:
print('You are in Pre-School')
elif year == 1:
print('You are in Kindergarten')
elif year == 2:
print('You are in 1st Grade')
elif year == 3:
print('You are in 2nd Grade')
elif year == 4:
print('You are in 3rd Grade')
elif year == 5:
print('You are in 4th Grade')
elif year == 6:
print('You are in 5th Grade')
elif year == 7:
print('You are in 6th Grade')
elif year == 8:
print('You are in 7th Grade')
elif year == 9:
print('You are in 8th Grade')
elif year == 10:
print('You are in 9th Grade or Freshman')
elif year == 11:
print('You are in 10th Grade or Sophomore')
elif year == 12:
print('You are in 11th Grade or Junior')
elif year == 13:
print('You are in 12th Grade or Senior')
else:
print('You entered an invalid entry')
|
[
"jeansanon180@gmail.com"
] |
jeansanon180@gmail.com
|
005fc965039152d62022c24120d51fc81fda661b
|
4bde2d1e2282014f71b8cfec4440cb062db172cb
|
/euler_021.py
|
1bbf96ecb93e58589edeffbbaf5d3fcf9c7699a2
|
[] |
no_license
|
MrDeshaies/NOT-projecteuler.net
|
6b107a515b1322fcd5f7d88e187ca2ea97edddcf
|
c6f0bd38d074b427345b4f5b41733bda38fbcdb4
|
refs/heads/master
| 2022-11-17T18:39:43.321814
| 2022-11-13T11:35:10
| 2022-11-13T11:35:10
| 201,793,983
| 0
| 0
| null | 2019-08-18T19:50:45
| 2019-08-11T17:20:18
|
Python
|
UTF-8
|
Python
| false
| false
| 923
|
py
|
from euler import *
import math
# Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).
# If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and each of a and b are called
# amicable numbers.
#
# For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110;
# therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.
#
# Evaluate the sum of all the amicable numbers under 10000.
def sumDivisors(x):
return sum(factLessItself(x))
amicableSet = set()
#skip 1, since d(1) = 1, so does not satisby a!=b
for x in range(2,10000):
if x in amicableSet:
continue
d = sumDivisors(x)
y = sumDivisors(d)
if d != x and y == x:
print( str(x) + " and " + str(d) + " are best buds.")
amicableSet.update([x,d])
print(amicableSet)
print(sum(amicableSet))
|
[
"benoit.deshaies@gmail.com"
] |
benoit.deshaies@gmail.com
|
206043e6d4c95bbf4afa57ff9b6d0fa29d8d4d3d
|
bc441bb06b8948288f110af63feda4e798f30225
|
/resource_monitor_sdk/model/resource_manage/filter_strategy_instance_data_pb2.py
|
754fef25b2c606e53ea2f232404bda2034096d3d
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 4,615
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: filter_strategy_instance_data.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from resource_monitor_sdk.model.resource_manage import filter_condition_group_pb2 as resource__monitor__sdk_dot_model_dot_resource__manage_dot_filter__condition__group__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='filter_strategy_instance_data.proto',
package='resource_manage',
syntax='proto3',
serialized_options=_b('ZIgo.easyops.local/contracts/protorepo-models/easyops/model/resource_manage'),
serialized_pb=_b('\n#filter_strategy_instance_data.proto\x12\x0fresource_manage\x1aGresource_monitor_sdk/model/resource_manage/filter_condition_group.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xa2\x01\n\x1a\x46ilterStrategyInstanceData\x12\n\n\x02id\x18\x01 \x01(\t\x12\x1a\n\x12strategyInstanceId\x18\x02 \x01(\t\x12%\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x35\n\x06\x66ilter\x18\x04 \x03(\x0b\x32%.resource_manage.FilterConditionGroupBKZIgo.easyops.local/contracts/protorepo-models/easyops/model/resource_manageb\x06proto3')
,
dependencies=[resource__monitor__sdk_dot_model_dot_resource__manage_dot_filter__condition__group__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_FILTERSTRATEGYINSTANCEDATA = _descriptor.Descriptor(
name='FilterStrategyInstanceData',
full_name='resource_manage.FilterStrategyInstanceData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='resource_manage.FilterStrategyInstanceData.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='strategyInstanceId', full_name='resource_manage.FilterStrategyInstanceData.strategyInstanceId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='resource_manage.FilterStrategyInstanceData.data', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filter', full_name='resource_manage.FilterStrategyInstanceData.filter', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=160,
serialized_end=322,
)
_FILTERSTRATEGYINSTANCEDATA.fields_by_name['data'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_FILTERSTRATEGYINSTANCEDATA.fields_by_name['filter'].message_type = resource__monitor__sdk_dot_model_dot_resource__manage_dot_filter__condition__group__pb2._FILTERCONDITIONGROUP
DESCRIPTOR.message_types_by_name['FilterStrategyInstanceData'] = _FILTERSTRATEGYINSTANCEDATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FilterStrategyInstanceData = _reflection.GeneratedProtocolMessageType('FilterStrategyInstanceData', (_message.Message,), {
'DESCRIPTOR' : _FILTERSTRATEGYINSTANCEDATA,
'__module__' : 'filter_strategy_instance_data_pb2'
# @@protoc_insertion_point(class_scope:resource_manage.FilterStrategyInstanceData)
})
_sym_db.RegisterMessage(FilterStrategyInstanceData)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
52cc436d976d9ead1d13b314196b6be9d9d8fc4c
|
c29eba01ce299ebb27b886a83e19e59add7e2f6b
|
/tests/pytest_extension/fixtures/test_issue_github_54.py
|
34ceadfa56fc64602d0e04f8a54879098f489c44
|
[
"BSD-3-Clause"
] |
permissive
|
smarie/python-pytest-cases
|
e87516e73d5067d5c307c7fdb37cc5f1f97c417e
|
ab3b7190d728b18512141b9f5f3a1c3dfc7cedf2
|
refs/heads/main
| 2023-07-08T11:41:57.278697
| 2023-02-23T13:11:25
| 2023-02-23T13:11:25
| 138,296,136
| 286
| 40
|
BSD-3-Clause
| 2023-07-03T14:57:02
| 2018-06-22T11:42:19
|
Python
|
UTF-8
|
Python
| false
| false
| 600
|
py
|
# Authors: Sylvain MARIE <sylvain.marie@se.com>
# + All contributors to <https://github.com/smarie/python-pytest-cases>
#
# License: 3-clause BSD, <https://github.com/smarie/python-pytest-cases/blob/master/LICENSE>
import pytest
from pytest_cases.fixture_core1_unions import InvalidParamsList
from pytest_cases import parametrize, fixture_ref
@pytest.fixture
def test():
return ['a', 'b', 'c']
def test_invalid_argvalues():
with pytest.raises(InvalidParamsList):
@parametrize('main_msg', fixture_ref(test))
def test_prints(main_msg):
print(main_msg)
|
[
"sylvain.marie@se.com"
] |
sylvain.marie@se.com
|
a3e9a18765fad1e19b88ac4df2ef46b6ddef4d9b
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/detection/SOLOv1/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x.py
|
eba5902c5acd2a9c3bbb92f63de00ac450eb4f6b
|
[
"LicenseRef-scancode-proprietary-license",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 6,718
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# model settings
model = dict(
type='FasterRCNN',
pretrained='open-mmlab://resnet50_caffe',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='GARPNHead',
in_channels=256,
feat_channels=256,
octave_base_scale=8,
scales_per_octave=3,
octave_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
anchor_base_sizes=None,
anchoring_means=[.0, .0, .0, .0],
anchoring_stds=[0.07, 0.07, 0.14, 0.14],
target_means=(.0, .0, .0, .0),
target_stds=[0.07, 0.07, 0.11, 0.11],
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
center_ratio=0.2,
ignore_ratio=0.5,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=300,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=300,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=1e-3, nms=dict(type='nms', iou_thr=0.5), max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ga_faster_rcnn_r50_caffe_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
861755d3c8cbf83029189ac9a98f4896f67dafad
|
b0110e27e3162e2092259dd299481de1dafb4ea8
|
/parallel/p7red.test.key.py
|
44867a6a942964cfe29d902c74675fd2ad65708f
|
[
"MIT"
] |
permissive
|
mobarski/sandbox
|
f9be203bf7015f6df70badd605a40172b63a90f8
|
f9054fb3252488208e503a87efba5df74fc70538
|
refs/heads/master
| 2023-05-29T14:51:00.125028
| 2023-05-14T21:02:38
| 2023-05-14T21:02:38
| 86,854,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
from __future__ import print_function
import sys
lines = sys.stdin.readlines()
rows = [str.partition(x,' ') for x in lines if x.strip()]
key_sum = 0
key = rows[0][0]
for k,_,x in rows:
if k!=key:
print(key,key_sum)
key_sum = 0
key = k
key_sum += int(x)
print(key,key_sum)
|
[
"mobarski@pl.grupa.iti"
] |
mobarski@pl.grupa.iti
|
06bd77a00c108cd3162f43c0b8c735e395c7c330
|
a12a4be7e8c792b4c1f2765d3e7a43056e9196b0
|
/399-evaluate-division/399-evaluate-division.py
|
317cbb08733f23f1593c0c5e5836a04b160ea65c
|
[] |
no_license
|
fdas3213/Leetcode
|
d4b7cfab70446b3f6a961252a55b36185bc87712
|
1335d5759c41f26eb45c8373f33ee97878c4a638
|
refs/heads/master
| 2022-05-28T16:24:15.856679
| 2022-05-19T21:56:35
| 2022-05-19T21:56:35
| 94,024,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,198
|
py
|
class Solution:
def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
#step 1. initialize a graph
graph = defaultdict(defaultdict)
for pair, value in zip(equations, values):
v1, v2 = pair[0], pair[1]
graph[v1][v2] = value
graph[v2][v1] = 1/value
def evaluate(cur_node, target_node, product, visited):
visited.add(cur_node)
val = -1
neighbors = graph[cur_node]
if target_node in neighbors:
return product * neighbors[target_node]
else:
for neighbor, value in neighbors.items():
if neighbor not in visited:
val = evaluate(neighbor, target_node, product*value, visited)
if val != -1:
break
visited.remove(cur_node)
return val
def evaluate_bfs(cur_node, target_node):
visited = set()
queue = deque([(cur_node, 1)])
while queue:
cur_node, cur_val = queue.popleft()
visited.add(cur_node)
if target_node == cur_node:
return cur_val
neighbors = graph[cur_node]
for neighbor, val in neighbors.items():
if neighbor not in visited:
queue.append((neighbor, cur_val*val))
return -1
#step 2. evaluate the query
res = []
for n1,n2 in queries:
#if either of the node does not exist in the graph
if n1 not in graph or n2 not in graph:
res.append(-1)
continue
#if n1 and n2 is the same node
if n1 == n2:
res.append(1)
continue
#dfs
visited = set()
res.append(evaluate(n1, n2, 1, visited))
#bfs: res.append(evaluate_bfs(n1, n2))
return res
|
[
"szx9404@gmail.com"
] |
szx9404@gmail.com
|
4749db7324c75666dd8e25a25566092e3b09963e
|
dd3bbd4e7aaee7a8a5f26b927ce28ac472c855a5
|
/eggs/plone.app.kss-1.6.2-py2.7.egg/plone/app/kss/demo/bbb_oldkssdemo.py
|
112c7267a425cbc4ada948bfe06706153dd4619d
|
[] |
no_license
|
nacho22martin/tesis
|
ea0a822f8bdbdef6f13f41276ecd4d6e85427ca5
|
e137eb6225cc5e724bee74a892567796166134ac
|
refs/heads/master
| 2020-12-24T13:20:58.334839
| 2013-11-09T12:42:41
| 2013-11-09T12:42:41
| 14,261,570
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,315
|
py
|
# XXX future BBB
# Provide a way for the old kss.demo version, not to fail
# with import error - even if it cannot execute these tests.
# This enables that the package can contain application level
# test setup, but it still does not fail with the old version.
try:
import kss.demo
from kss.demo import (
KSSSeleniumTestDirectory,
KSSDemo,
KSSSeleniumTestCase,
KSSSeleniumTestSuite,
KSSSeleniumTestLayerBase,
KSSSeleniumSandboxCreationTestCase,
)
except ImportError:
# nonexistent constructs. They will not work, but
# they will run without errors.
class Fake(object):
# test_directory is needed because the caller code
# will treat us as a TestDirectory. So, we give a
# directory that does not contain any *.html files.
test_directory = '/'
def __init__(self, *arg, **kw):
pass
#
import kss.demo.resource
# Provide the classes directly on kss.demo namespace
kss.demo.KSSSeleniumTestDirectory = kss.demo.resource.KSSSeleniumTestDirectory
kss.demo.KSSDemo = kss.demo.resource.KSSDemo
kss.demo.KSSSeleniumTestCase = Fake
kss.demo.KSSSeleniumTestSuite = Fake
kss.demo.KSSSeleniumTestLayerBase = Fake
kss.demo.KSSSeleniumSandboxCreationTestCase = Fake
|
[
"ignacio@plone.(none)"
] |
ignacio@plone.(none)
|
10688edc40347097c51ecda235be420e4c48ecaa
|
2bcc421ee345b00cf805c543b37d18b5d019dc04
|
/adafruit-circuitpython-bundle-6.x-mpy-20201126/examples/led_animation_group.py
|
011a019ee803683432760c85a49cbbacb6bfd77c
|
[] |
no_license
|
saewoonam/sc-current-source-titano
|
5a1ad46889c1b09c168424901fd71cb4eab5c61b
|
1c136aa8b61268d9ac0b5a682b30ece70ab87663
|
refs/heads/main
| 2023-03-02T22:12:26.685537
| 2021-02-09T03:28:01
| 2021-02-09T03:28:01
| 317,299,900
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,947
|
py
|
"""
This example shows three different ways to use AnimationGroup: syncing two animations, displaying
two animations at different speeds, and displaying two animations sequentially, across two separate
pixel objects such as the built-in NeoPixels on a Circuit Playground Bluefruit and a NeoPixel strip.
This example is written for Circuit Playground Bluefruit and a 30-pixel NeoPixel strip connected to
pad A1. It does not work on Circuit Playground Express.
"""
import board
import neopixel
from adafruit_circuitplayground import cp
from adafruit_led_animation.animation.blink import Blink
from adafruit_led_animation.animation.comet import Comet
from adafruit_led_animation.animation.chase import Chase
from adafruit_led_animation.group import AnimationGroup
from adafruit_led_animation.sequence import AnimationSequence
import adafruit_led_animation.color as color
strip_pixels = neopixel.NeoPixel(board.A1, 30, brightness=0.5, auto_write=False)
cp.pixels.brightness = 0.5
animations = AnimationSequence(
# Synchronized to 0.5 seconds. Ignores the second animation setting of 3 seconds.
AnimationGroup(
Blink(cp.pixels, 0.5, color.CYAN),
Blink(strip_pixels, 3.0, color.AMBER),
sync=True,
),
# Different speeds
AnimationGroup(
Comet(cp.pixels, 0.1, color.MAGENTA, tail_length=5),
Comet(strip_pixels, 0.01, color.MAGENTA, tail_length=15),
),
# Different animations
AnimationGroup(
Blink(cp.pixels, 0.5, color.JADE),
Comet(strip_pixels, 0.05, color.TEAL, tail_length=15),
),
# Sequential animations on the built-in NeoPixels then the NeoPixel strip
Chase(cp.pixels, 0.05, size=2, spacing=3, color=color.PURPLE),
Chase(strip_pixels, 0.05, size=2, spacing=3, color=color.PURPLE),
advance_interval=3.0,
auto_clear=True,
auto_reset=True,
)
while True:
animations.animate()
|
[
"nams@nist.gov"
] |
nams@nist.gov
|
e6aa1fc31893a65606e16abf84d605a55a52173a
|
e5a20362b2f9b17055cb95d56dc8dea2059205fb
|
/arrays_manipulations_algorithms/is_str_equal.py
|
4c1d539801e17f907e0371055984660ed94ffd56
|
[] |
no_license
|
uchenna-j-edeh/dailly_problems
|
0c97d1ab3c91756abf625a04e3bb6e0cd6e3405c
|
7bd47232704297851f8acdd9331f90da96c732af
|
refs/heads/master
| 2023-08-17T12:27:00.640834
| 2023-08-07T17:03:00
| 2023-08-07T17:03:00
| 158,981,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 548
|
py
|
# write a code to check if two str are equal
def is_equal(s1, s2):
for i in range(len(s1)):
# if len(s2) - 1 >= i:
# return False
if len(s2) - 1 >= i or (s1[i].lower() != s2[i].lower()):
return False
for i in range(len(s2)):
# if len(s1) - 1 >= i:
# return False
if len(s1) - 1 >= i or (s1[i].lower() != s2[i].lower()): # i = 4, len(s1) = 4
return False
return True
s1 = "abcd" # 4
s2 = "ABCDj" # 5
print(is_equal(s1, s2))
|
[
"uedeh@bethel.jw.org"
] |
uedeh@bethel.jw.org
|
edd919cfe5efef37c9386e8f94227f5bb2b80185
|
09ba5ae2edc51f3fd812b9205188b1b01e6bea77
|
/test/src/CPMel/core/metaclass.py
|
61cec13411e956af6f67e786d3014ce281188ff7
|
[] |
no_license
|
cpcgskill/Maya_tools
|
c6a43ad20eab3b97e82c9dfe40a1745b6098e5c4
|
93f9e66e5dc3bb51f33df0615415a56a60613ff1
|
refs/heads/main
| 2023-02-26T16:20:52.959050
| 2021-01-28T06:12:18
| 2021-01-28T06:12:18
| 325,512,423
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 854
|
py
|
#!/usr/bin/python
# -*-coding:utf-8 -*-
u"""
:创建时间: 2020/5/18 23:57
:作者: 苍之幻灵
:我的主页: https://cpcgskill.com
:QQ: 2921251087
:爱发电: https://afdian.net/@Phantom_of_the_Cang
:aboutcg: https://www.aboutcg.org/teacher/54335
:bilibili: https://space.bilibili.com/351598127
"""
import functools
def newClass(name, bases, attrs):
u"""
构建元类使用此元类的类在创建时自动创建对象
:param name:
:param bases:
:param attrs:
:return:
"""
cls = type(name, bases, attrs)
return cls()
def createClass(name, bases, attrs):
u"""
创建器元类
以此为元类的类在创建时将不会自动调用__init__
:param name:
:param bases:
:param attrs:
:return:
"""
cls = type(name, bases, attrs)
return functools.partial(cls.__new__, cls)
|
[
"www.cpcgskill.com"
] |
www.cpcgskill.com
|
053b8628f236c89b6e4071334424c1a57a3c1d50
|
5cbde24d02eea9e762994af976aff8b4fdc731b3
|
/actus/wsgi.py
|
657d6f36c1718379520c54b937aa9fb42599d2c5
|
[] |
no_license
|
paulo-romano/actus
|
f94e874ef3351181c79539ba69df9f7bbdb9e90f
|
d424afa6672f6f714f094b2080d0255bad257268
|
refs/heads/master
| 2021-01-17T15:06:40.486493
| 2016-12-17T00:47:03
| 2016-12-17T00:59:53
| 70,018,546
| 2
| 1
| null | 2016-11-04T00:05:47
| 2016-10-05T00:41:06
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 424
|
py
|
"""
WSGI config for actus project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "actus.settings")
application = Cling(get_wsgi_application())
|
[
"pauloromanocarvalho@gmail.com"
] |
pauloromanocarvalho@gmail.com
|
a8e18dcbe6113a775bc2a7239cc76ff8420db740
|
3fb0ce33f00b96ae3808a32da44de3e887434afb
|
/.提出一覧/AtCoder/ABC156/b/main.py
|
54120439e9bf75e86574bad0f396250ddd7c9bf0
|
[] |
no_license
|
Yukikazari/kyoupuro
|
ca3d74d8db024b1988cd0ff00bf069ab739783d7
|
343de455c4344dbcfa4524b492f7f6205c9db26f
|
refs/heads/master
| 2023-02-21T01:53:52.403729
| 2021-01-27T03:55:01
| 2021-01-27T03:55:01
| 282,222,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
#!/usr/bin/env python3
#import
#import math
#import numpy as np
#= int(input())
#= input()
N, K = map(int, input().split())
for i in range(1, 10 ** 6):
if K ** i > N:
print(i)
exit()
|
[
"haya_nanakusa793@yahoo.co.jp"
] |
haya_nanakusa793@yahoo.co.jp
|
263d88bc1127e17bd9788a19259e6a996b95f48f
|
8ce2b8314fd2e11f3118f7b57f15d1aeb661eec9
|
/backend/bagel_buoy_1801/settings.py
|
e5641f87d26096e7d8b6da1fdacf8134eb1580ba
|
[] |
no_license
|
crowdbotics-apps/bagel-buoy-1801
|
2b4f17b3ea8f56fc574f01736900a9d15a216ca8
|
d053cf93ff55a0dab5d6af49a6351fe740022ac0
|
refs/heads/master
| 2022-12-09T00:33:29.351845
| 2019-03-30T22:14:51
| 2019-03-30T22:14:51
| 178,616,820
| 0
| 0
| null | 2022-12-03T04:13:29
| 2019-03-30T22:14:47
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,758
|
py
|
"""
Django settings for bagel_buoy_1801 project.
Generated by 'django-admin startproject' using Django 1.11.16.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
env = environ.Env()
environ.Env.read_env(os.path.join(BASE_DIR, '.env'))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool('DEBUG', default=True)
ALLOWED_HOSTS = ['*']
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'bagel_buoy_1801.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bagel_buoy_1801.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bagel_buoy_1801',
'USER': 'bagel_buoy_1801',
'PASSWORD': 'bagel_buoy_1801',
'HOST': 'localhost',
'PORT': '5432',
}
}
if env.str('DATABASE_URL', default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = env.str('SENDGRID_USERNAME', '')
EMAIL_HOST_PASSWORD = env.str('SENDGRID_PASSWORD', '')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Import local settings
try:
from .local_settings import *
INSTALLED_APPS += DEBUG_APPS
except:
pass
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
d7600096286394a49b83fc56e6f04ee102c8d3b4
|
53e58c213232e02250e64f48b97403ca86cd02f9
|
/16/mc/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysisM3000_R_0-9.py
|
fa92ea4e8a878d40f96c26dc962f729463d22f78
|
[] |
no_license
|
xdlyu/fullRunII_ntuple_102X
|
32e79c3bbc704cfaa00c67ab5124d40627fdacaf
|
d420b83eb9626a8ff1c79af5d34779cb805d57d8
|
refs/heads/master
| 2020-12-23T15:39:35.938678
| 2020-05-01T14:41:38
| 2020-05-01T14:41:38
| 237,192,426
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,303
|
py
|
from WMCore.Configuration import Configuration
name = 'WWW/sig'
steam_dir = 'xulyu'
config = Configuration()
config.section_("General")
config.General.requestName = 'M3000_R0-9_off'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFPuppi.txt']
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis_sig.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/WkkToWRadionToWWW_M3000-R0-9-TuneCUETP8M1_13TeV-madgraph-pythia/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v1/MINIAODSIM'
#config.Data.inputDBS = 'global'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob =5
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outLFNDirBase = '/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/' + steam_dir + '/' + name + '/'
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'M3000_R0-9_off'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
|
[
"XXX@cern.ch"
] |
XXX@cern.ch
|
624480f7f2ed0cbdd5c554530d35447d513dcd1b
|
02778455d6c88a4e83bbad836f4598d49ebe81e5
|
/recipes/shared_logging/server.py
|
70d9aafd35f8e98eb1fdfc2509c9dea385db9c5a
|
[
"MIT"
] |
permissive
|
stjordanis/easyrpc
|
d703ad81e7c2a5cb83dab2e5a424baeea5d997c6
|
1c0d6f8c33aaf70ccf62d75777f5e4ca8c55fedc
|
refs/heads/main
| 2023-08-13T05:52:18.459507
| 2021-10-13T20:15:44
| 2021-10-13T20:15:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
# central logging server
import logging
from fastapi import FastAPI
from easyrpc.server import EasyRpcServer
logging.basicConfig()
server = FastAPI()
@server.on_event('startup')
async def setup():
logger = logging.getLogger()
rpc_server = EasyRpcServer(server, '/ws/server', server_secret='abcd1234', debug=True)
rpc_server.register_logger(logger, namespace='logger')
|
[
"joshjamison1@gmail.com"
] |
joshjamison1@gmail.com
|
ce0994b51ccee45b1b6cb2f4bcb1f11296c7c002
|
538833a15b119ca835b82886ca047dc25e71f134
|
/app/bin/file/text_remove_duplicate.py
|
76f01c83d8fd0252f6345e396e259a54a5368c1d
|
[] |
no_license
|
buxizhizhoum/tool_scripts
|
901ffb3749aa9521912636039bc897f969759d67
|
d13b9217b4cde6b626451e9638d737911a0911c5
|
refs/heads/master
| 2021-01-01T15:39:01.396282
| 2018-12-11T06:53:29
| 2018-12-11T06:53:29
| 97,667,877
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
def text_remove_duplicate(original_file, processed_file):
file_buffer = []
with open(original_file, "r") as f:
for line in f.readlines():
if line not in file_buffer:
file_buffer.append(line)
with open(processed_file, "w") as f:
f.writelines(file_buffer)
text_remove_duplicate("a.txt", "b.txt")
|
[
"mapeaks@126.com"
] |
mapeaks@126.com
|
5cd33b20e5bc4c1c4b6e25e9df92b6fdc8d17e1a
|
5ec06dab1409d790496ce082dacb321392b32fe9
|
/clients/python/generated/swaggeraemosgi/model/com_adobe_granite_system_monitoring_impl_system_stats_m_bean_impl_info.py
|
3e05ccd3726996c38d3fdaef1f4e610603a6ae96
|
[
"Apache-2.0"
] |
permissive
|
shinesolutions/swagger-aem-osgi
|
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
|
c2f6e076971d2592c1cbd3f70695c679e807396b
|
refs/heads/master
| 2022-10-29T13:07:40.422092
| 2021-04-09T07:46:03
| 2021-04-09T07:46:03
| 190,217,155
| 3
| 3
|
Apache-2.0
| 2022-10-05T03:26:20
| 2019-06-04T14:23:28
| null |
UTF-8
|
Python
| false
| false
| 7,630
|
py
|
"""
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0-pre.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from swaggeraemosgi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from swaggeraemosgi.model.com_adobe_granite_system_monitoring_impl_system_stats_m_bean_impl_properties import ComAdobeGraniteSystemMonitoringImplSystemStatsMBeanImplProperties
globals()['ComAdobeGraniteSystemMonitoringImplSystemStatsMBeanImplProperties'] = ComAdobeGraniteSystemMonitoringImplSystemStatsMBeanImplProperties
class ComAdobeGraniteSystemMonitoringImplSystemStatsMBeanImplInfo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'pid': (str,), # noqa: E501
'title': (str,), # noqa: E501
'description': (str,), # noqa: E501
'properties': (ComAdobeGraniteSystemMonitoringImplSystemStatsMBeanImplProperties,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'pid': 'pid', # noqa: E501
'title': 'title', # noqa: E501
'description': 'description', # noqa: E501
'properties': 'properties', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ComAdobeGraniteSystemMonitoringImplSystemStatsMBeanImplInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
pid (str): [optional] # noqa: E501
title (str): [optional] # noqa: E501
description (str): [optional] # noqa: E501
properties (ComAdobeGraniteSystemMonitoringImplSystemStatsMBeanImplProperties): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
[
"cliffano@gmail.com"
] |
cliffano@gmail.com
|
e7eda5397bfd521186cf038a7a0de9700c42024a
|
871d2a367e45164f21ecdbefe52bf442b563b33c
|
/tests/tests/correctness/EPLAnalytics/Streaming_Calculations/FFT/fft_cor_003/run.py
|
9d18dc0589837f3b625b0e287ef5aa58bf669523
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
SoftwareAG/apama-industry-analytics-kit
|
c0f6c30badf31411a29bc6daa4a7125b76f4e737
|
a3f6039915501d41251b6f7ec41b0cb8111baf7b
|
refs/heads/master
| 2022-02-19T20:47:27.180233
| 2022-02-02T12:58:23
| 2022-02-02T12:58:23
| 185,572,282
| 3
| 2
|
Apache-2.0
| 2022-02-02T12:58:24
| 2019-05-08T09:14:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,472
|
py
|
# $Copyright (c) 2015 Software AG, Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA, and/or Terracotta Inc., San Francisco, CA, USA, and/or Software AG (Canada) Inc., Cambridge, Ontario, Canada, and/or, Software AG (UK) Ltd., Derby, United Kingdom, and/or Software A.G. (Israel) Ltd., Or-Yehuda, Israel and/or their licensors.$
# Use, reproduction, transfer, publication or disclosure is prohibited except as specifically provided for in your License Agreement with Software AG
from industry.framework.AnalyticsBaseTest import AnalyticsBaseTest
from pysys.constants import *
class PySysTest(AnalyticsBaseTest):
def execute(self):
# Start the correlator
correlator = self.startTest()
self.injectAnalytic(correlator)
self.injectFFTAnalysis(correlator)
self.ready(correlator)
correlator.injectMonitorscript(['test.mon'], self.input)
self.waitForSignal('correlator.out', expr='TEST COMPLETE', condition='==1', timeout=5)
def validate(self):
# Basic sanity checks
self.checkSanity()
# Ensure the test output was correct
exprList=[]
exprList.append('FAILED TO CREATE ANALYTIC: 1')
exprList.append('TEST PASSED: 2')
exprList.append('FAILED TO CREATE ANALYTIC: 3')
exprList.append('FAILED TO CREATE ANALYTIC: 4')
exprList.append('FAILED TO CREATE ANALYTIC: 5')
exprList.append('FAILED TO CREATE ANALYTIC: 6')
exprList.append('FAILED TO CREATE ANALYTIC: 7')
exprList.append('FAILED TO CREATE ANALYTIC: 8')
exprList.append('FAILED TO CREATE ANALYTIC: 9')
exprList.append('FAILED TO CREATE ANALYTIC: 10')
exprList.append('FAILED TO CREATE ANALYTIC: 11')
exprList.append('FAILED TO CREATE ANALYTIC: 12')
exprList.append('FAILED TO CREATE ANALYTIC: 13')
exprList.append('FAILED TO CREATE ANALYTIC: 14')
exprList.append('FAILED TO CREATE ANALYTIC: 15')
exprList.append('FAILED TO CREATE ANALYTIC: 16')
exprList.append('FAILED TO CREATE ANALYTIC: 17')
exprList.append('FAILED TO CREATE ANALYTIC: 18')
exprList.append('FAILED TO CREATE ANALYTIC: 19')
exprList.append('FAILED TO CREATE ANALYTIC: 20')
exprList.append('TEST PASSED: 21')
self.assertOrderedGrep("correlator.out", exprList=exprList)
# Make sure that the we got the right number of actions/listeners called
self.assertLineCount('correlator.out', expr='TEST PASSED', condition='==2')
self.assertLineCount('correlator.out', expr='FAILED TO CREATE ANALYTIC:', condition='==19')
|
[
"Richard.Peach@softwareag.com"
] |
Richard.Peach@softwareag.com
|
a25599fcc363658ae14985fb1168f14a33ecb67e
|
ef7a5e1445706482a0e20d2632f6cd3d0e279031
|
/amy/extrequests/migrations/0026_auto_20201107_1428.py
|
f1264cda61cc04a509b387aff45fb9e84eeac2d9
|
[
"MIT"
] |
permissive
|
pbanaszkiewicz/amy
|
7bf054463f4ecfa217cc9e52a7927d22d32bcd84
|
f97631b2f3dd8e8f502e90bdb04dd72f048d4837
|
refs/heads/develop
| 2022-11-17T18:56:18.975192
| 2022-11-03T23:19:41
| 2022-11-03T23:19:41
| 28,005,098
| 0
| 3
|
MIT
| 2018-03-20T18:48:55
| 2014-12-14T19:25:22
|
Python
|
UTF-8
|
Python
| false
| false
| 714
|
py
|
# Generated by Django 2.2.13 on 2020-11-07 14:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('extrequests', '0025_auto_20201105_1949'),
]
operations = [
migrations.AddField(
model_name='selforganisedsubmission',
name='end',
field=models.DateField(null=True, verbose_name='Workshop end date'),
),
migrations.AddField(
model_name='selforganisedsubmission',
name='start',
field=models.DateField(help_text='Please provide the dates that your Self-Organised workshop will run.', null=True, verbose_name='Workshop start date'),
),
]
|
[
"piotr@banaszkiewicz.org"
] |
piotr@banaszkiewicz.org
|
c298137ca5f8ba3d23d361dc3cc858f6eb4f2f2e
|
15a0797f087a9c05b7a679f47fefeeb875affab5
|
/fermipy/validate/utils.py
|
af79b178f1eb202b6ff272a7d3fa1304526c98b8
|
[
"BSD-3-Clause"
] |
permissive
|
XanAstia/fermipy
|
2496a6a07980faff20958f1a20ad1a3171bf7b35
|
8d9995934fd44959d51ad7bdcd2981b3694fa35e
|
refs/heads/master
| 2021-01-05T20:03:15.590334
| 2020-07-22T12:35:18
| 2020-07-22T12:35:18
| 257,225,629
| 0
| 0
|
BSD-3-Clause
| 2020-06-24T13:45:52
| 2020-04-20T09:00:16
|
Python
|
UTF-8
|
Python
| false
| false
| 4,675
|
py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import os
import copy
import re
import yaml
import sys
import mimetypes
import tempfile
import string
import random
from os.path import splitext, basename
import xml.etree.cElementTree as ElementTree
import argparse
import numpy as np
def rand_str(size=7):
chars = string.ascii_uppercase + string.ascii_lowercase + string.digits
return ''.join(random.choice(chars) for x in range(size))
def replace_aliases(cut_dict, aliases):
"""Substitute aliases in a cut dictionary."""
for k, v in cut_dict.items():
for k0, v0 in aliases.items():
cut_dict[k] = cut_dict[k].replace(k0, '(%s)' % v0)
def strip(input_str):
"""Strip newlines and whitespace from a string."""
return str(input_str.replace('\n', '').replace(' ', ''))
def get_files(files, extnames=['.root']):
"""Extract a list of file paths from a list containing both paths
and file lists with one path per line."""
files_out = []
for f in files:
mime = mimetypes.guess_type(f)
if os.path.splitext(f)[1] in extnames:
files_out += [f]
elif mime[0] == 'text/plain':
files_out += list(np.loadtxt(f, unpack=True, dtype='str'))
else:
raise Exception('Unrecognized input type.')
return files_out
def load_chain(chain, files, nfiles=None):
if isinstance(nfiles, list) and len(nfiles) == 1:
files = files[:nfiles[0]]
elif isinstance(nfiles, list) and len(nfiles) >= 2:
files = files[nfiles[0]:nfiles[1]]
elif nfiles is not None:
files = files[:nfiles]
print("Loading %i files..." % len(files))
for f in files:
chain.Add(f)
return chain
def load_aliases(alias_files):
aliases = {}
for f in alias_files:
if f.endswith('.xml'):
aliases.update(get_cuts_from_xml(f))
elif f.endswith('.yaml'):
aliases.update(yaml.load(open(f, 'r')))
else:
raise Exception('Invalid file type for aliases option.')
return aliases
def get_cuts_from_xml(xmlfile):
"""Extract event selection strings from the XML file."""
root = ElementTree.ElementTree(file=xmlfile).getroot()
event_maps = root.findall('EventMap')
alias_maps = root.findall('AliasDict')[0]
event_classes = {}
event_types = {}
event_aliases = {}
for m in event_maps:
if m.attrib['altName'] == 'EVENT_CLASS':
for c in m.findall('EventCategory'):
event_classes[c.attrib['name']] = strip(
c.find('ShortCut').text)
elif m.attrib['altName'] == 'EVENT_TYPE':
for c in m.findall('EventCategory'):
event_types[c.attrib['name']] = strip(c.find('ShortCut').text)
for m in alias_maps.findall('Alias'):
event_aliases[m.attrib['name']] = strip(m.text)
replace_aliases(event_aliases, event_aliases.copy())
replace_aliases(event_aliases, event_aliases.copy())
replace_aliases(event_classes, event_aliases)
replace_aliases(event_types, event_aliases)
event_selections = {}
event_selections.update(event_classes)
event_selections.update(event_types)
event_selections.update(event_aliases)
return event_selections
def set_event_list(tree, selection=None, fraction=None, start_fraction=None):
"""
Set the event list for a tree or chain.
Parameters
----------
tree : `ROOT.TTree`
Input tree/chain.
selection : str
Cut string defining the event list.
fraction : float
Fraction of the total file to include in the event list
starting from the *end* of the file.
"""
import ROOT
elist = rand_str()
if selection is None:
cuts = ''
else:
cuts = selection
if fraction is None or fraction >= 1.0:
n = tree.Draw(">>%s" % elist, cuts, "goff")
tree.SetEventList(ROOT.gDirectory.Get(elist))
elif start_fraction is None:
nentries = int(tree.GetEntries())
first_entry = min(int((1.0 - fraction) * nentries), nentries)
n = tree.Draw(">>%s" % elist, cuts, "goff", nentries, first_entry)
tree.SetEventList(ROOT.gDirectory.Get(elist))
else:
nentries = int(tree.GetEntries())
first_entry = min(int(start_fraction * nentries), nentries)
n = first_entry + int(nentries * fraction)
n = tree.Draw(">>%s" % elist, cuts, "goff",
n - first_entry, first_entry)
tree.SetEventList(ROOT.gDirectory.Get(elist))
return n
|
[
"mdwood137@gmail.com"
] |
mdwood137@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.