blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
03dd33c5872c44c363516af41041b942fc4b82c7
|
a6ed990fa4326c625a2a02f0c02eedf758ad8c7b
|
/meraki/sdk/python/removeNetworkSwitchSwitchStack.py
|
ea22d146e97d6bbedda21ccbaa78bfaab2c71d73
|
[] |
no_license
|
StevenKitavi/Meraki-Dashboard-API-v1-Documentation
|
cf2352976c6b6c00c17a5f6442cedf0aeed46c22
|
5ed02a7def29a2ce455a3f2cfa185f76f44789f5
|
refs/heads/main
| 2023-03-02T08:49:34.846055
| 2021-02-05T10:31:25
| 2021-02-05T10:31:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
import meraki
# Defining your API key as a variable in source code is not recommended
API_KEY = '6bec40cf957de430a6f1f2baa056b99a4fac9ea0'
# Instead, use an environment variable as shown under the Usage section
# @ https://github.com/meraki/dashboard-api-python/
dashboard = meraki.DashboardAPI(API_KEY)
network_id = 'L_646829496481105433'
switch_stack_id = ''
serial = 'QBZY-XWVU-TSRQ'
response = dashboard.switch.removeNetworkSwitchSwitchStack(
network_id, switch_stack_id, serial
)
print(response)
|
[
"shiychen@cisco.com"
] |
shiychen@cisco.com
|
18eb37c2ffe3434f8bcd511d3c748630d8feec5c
|
d6458a979207e00da6dc653c278b9bfb818ce18d
|
/Additional Stuff/Medium Stuff/PythonCrypto/crypto9.py
|
c0eb73e93ad223663400383fdecbc56cad757bcf
|
[] |
no_license
|
Hackman9912/05-Python-Programming
|
61ce7bb48188b4cd3cd8e585480325fdd02e579b
|
d03a319c952794b2f298a3ef4ddd09c253e24d36
|
refs/heads/master
| 2020-08-29T14:28:48.403323
| 2019-12-18T21:30:55
| 2019-12-18T21:30:55
| 218,061,276
| 0
| 0
| null | 2019-10-28T14:07:31
| 2019-10-28T14:07:31
| null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
alphabets = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
string_input = input("Enter a string: ")
input_length = len(string_input)
string_output = ""
for i in range(input_length):
character = string_input[i]
location_of_character = alphabets.find(character)
new_location = location_of_character + 3;
string_output = string_output + alphabets[new_location]
print("Encrypted text: ", string_output)
# print(string_input)
# print(input_length)
|
[
"charleshackett89@gmail.com"
] |
charleshackett89@gmail.com
|
6dbf65dea55f3575b84c21c3e7a60a933815fa0e
|
87b4c1e282782ddfa22df95d8f494322bf2f2fb9
|
/Flower Classification with Image Histogram/dataset.py
|
1b47f756b06a5dd1afd718f35f291a0afe4c1872
|
[] |
no_license
|
janFrancoo/Python-Projects
|
34e9515ae167bdca2f8e601c3ccc4bd4a6cb48cb
|
875ed126e4adb7cd4c2884660f24d6515086995c
|
refs/heads/master
| 2021-06-26T17:40:47.740967
| 2021-01-31T15:27:25
| 2021-01-31T15:27:25
| 199,189,125
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 931
|
py
|
import os
import cv2
labels = ["Daffodil", "Snowdrop", "Lilly Valley", "Bluebell", "Crocus", "Iris", "Tigerlily", "Tulip", "Fritillary",
"Sunflower", "Daisy", "Colts' Foot", "Dandelion", "Cowslip", "Buttercup", "Windflower", "Pansy"]
def get_flowers(flowers_path, masks_path):
count = -1
masks = []
flowers = []
classes = []
for i, file_name in enumerate(os.listdir(flowers_path)):
if i % 80 == 0:
count += 1
raw_file_name = file_name.split(".")[0]
file_name_for_mask = raw_file_name + ".png"
if os.path.exists(os.path.join(masks_path, file_name_for_mask)):
mask = cv2.imread(os.path.join(masks_path, file_name_for_mask))
masks.append(cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY))
flowers.append(cv2.imread(os.path.join(flowers_path, file_name)))
classes.append(labels[count])
return flowers, masks, classes
|
[
"noreply@github.com"
] |
janFrancoo.noreply@github.com
|
4aaad55843e277a02646a91c6816ac641bb76a96
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_171/ch4_2019_04_03_14_50_06_906813.py
|
13c4f9117194d74ac4dc2b5209ab49e9cc9ef2fc
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
def classifica_idade (x):
if x<=11:
print('crianca')
return x
elif x>11 and x<=17:
print('adolescente')
return x
else:
print('adulto')
return x
|
[
"you@example.com"
] |
you@example.com
|
15f466d20b51d0e199a6bca4759d7a97d12b9d39
|
e1aeede7cecf2bdb3317954e042f41810745b980
|
/winston/commands/__init__.py
|
879f94514a8039ff04a915527499ca075f99746c
|
[] |
no_license
|
G10DRAS/winston
|
b0f50822af077d374e864f2eefa559275c673fef
|
c72c7f77a89f77d1de31cd0f401b3dc836338b36
|
refs/heads/master
| 2021-01-15T16:04:40.719122
| 2014-02-27T22:31:56
| 2014-02-27T22:31:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,365
|
py
|
import re
class Command(object):
"""
Stores a command that is executed by external events such as a voice command,
a change of state or a notification.
"""
# The name with which all commands begin. Can be a word or a regex.
# Example: jenkins, alfred, robot. "Jenkins! Turn on the lights!"
signal = "winston"
def on_event(self, event, sender):
"""
Handles events from the interpreter and other sources
"""
# Do something here.
class RegexCommand(Command):
"""
Command that matches against a regex string
Set `polite` to True if the regex should match "could you", "please" and other
command decorations.
"""
# Command prefixes and suffixes. Can be a tuple of words or a regex
prefixes = "( can you| could you)?( please)?"
suffixes = "( please)?"
def __init__(self, regex, polite=False):
super(RegexCommand, self).__init__()
if polite:
final_regex = "{signal}{prefix} {command}{suffix}".format(
signal = self.signal,
command = regex,
prefix = self.prefixes,
suffix = self.suffixes,
)
self.regex = re.compile(final_regex)
else:
self.regex = re.compile(regex)
def match(self, text):
return self.regex.match(text)
|
[
"contact@nicolasbouliane.com"
] |
contact@nicolasbouliane.com
|
8acc9bc358a8f92477e4d4014cb1f0dd864c69da
|
375c87462c4ed200cecce0aeab09c6161ac10dcd
|
/pwg_ls2/RV/dict_2_changes.py
|
dd80bde0aefbda7ab8b5fe3967bd41d33ad19f5b
|
[] |
no_license
|
sanskrit-lexicon/PWG
|
2e7ab371ec7e4da43d81d50663b06fa2e2b44806
|
d32d701366cff1156b7f7bb0aea8ea27cd7fb7dd
|
refs/heads/master
| 2023-02-07T02:49:53.179915
| 2023-02-03T19:53:25
| 2023-02-03T19:53:25
| 15,903,655
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,421
|
py
|
#-*- coding:utf-8 -*-
""" dict_2_changes.py
"""
import sys,re,codecs
## https:##stackoverflow.com/questions/27092833/unicodeencodeerror-charmap-codec-cant-encode-characters
## This required by git bash to avoid error
## UnicodeEncodeError: 'charmap' codec cannot encode characters
## when run in a git bash script.
sys.stdout.reconfigure(encoding='utf-8')
class Change(object):
def __init__(self,metaline,iline,old,new):
self.metaline = metaline
self.iline = iline
self.old = old
self.new = new
def init_changes(lines1,lines2):
changes = [] # array of Change objects
metaline = None
imetaline1 = None
page = None
for iline,line1 in enumerate(lines1):
line2 = lines2[iline]
if iline == 0: # %***This File is E:\\APTE.ALL, Last update 11.09.06
continue #
if line1.startswith('<L>'):
metaline = line1
imetaline1 = iline+1
if line1 == line2:
continue
# generate a change
change = Change(metaline,iline,line1,line2)
changes.append(change)
print(len(changes),'changes found')
return changes
def change_out(change,ichange):
outarr = []
case = ichange + 1
#outarr.append('; TODO Case %s: (reason = %s)' % (case,change.reason))
try:
ident = change.metaline
except:
print('ERROR:',change.iline,change.old)
exit(1)
if ident == None:
ident = 'No metaline available'
outarr.append('; ' + ident)
# change for iline
lnum = change.iline + 1
line = change.old
new = change.new
outarr.append('%s old %s' % (lnum,line))
outarr.append('%s new %s' % (lnum,new))
outarr.append(';')
return outarr
def write_changes(fileout,changes,filein1,filein2):
with codecs.open(fileout,"w","utf-8") as f:
for ichange,change in enumerate(changes):
outarr = change_out(change,ichange)
for out in outarr:
f.write(out+'\n')
print(len(changes),"changes written to",fileout)
if __name__=="__main__":
filein1 = sys.argv[1] # xxx.txt (first version)
filein2 = sys.argv[2] # xxx.txt (second version)
fileout = sys.argv[3] # possible change transactions
with codecs.open(filein1,"r","utf-8") as f:
lines1 = [x.rstrip('\r\n') for x in f]
with codecs.open(filein2,"r","utf-8") as f:
lines2 = [x.rstrip('\r\n') for x in f]
if len(lines1) != len(lines2):
print('ERROR: require same number of lines in the two input files')
exit(1)
print(len(lines1),'lines compared')
changes = init_changes(lines1,lines2)
write_changes(fileout,changes,filein1,filein2)
|
[
"funderburkjim@gmail.com"
] |
funderburkjim@gmail.com
|
a879df24d86dc8af1ae7633235f859be1a1e0509
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_QC1759.py
|
bcb20cc7d58111256fe3f74a18f02994896b444e
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,797
|
py
|
# qubit number=5
# total number=60
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.x(input_qubit[4]) # number=53
prog.cx(input_qubit[2],input_qubit[0]) # number=45
prog.z(input_qubit[2]) # number=46
prog.h(input_qubit[0]) # number=54
prog.cz(input_qubit[2],input_qubit[0]) # number=55
prog.h(input_qubit[0]) # number=56
prog.h(input_qubit[1]) # number=4
prog.rx(2.664070570244145,input_qubit[1]) # number=39
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[2]) # number=49
prog.cz(input_qubit[3],input_qubit[2]) # number=50
prog.h(input_qubit[2]) # number=51
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[3]) # number=40
prog.y(input_qubit[4]) # number=35
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=25
prog.cz(input_qubit[1],input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=27
prog.h(input_qubit[0]) # number=36
prog.cz(input_qubit[1],input_qubit[0]) # number=37
prog.h(input_qubit[0]) # number=38
prog.cx(input_qubit[1],input_qubit[0]) # number=41
prog.x(input_qubit[0]) # number=42
prog.cx(input_qubit[1],input_qubit[0]) # number=43
prog.cx(input_qubit[1],input_qubit[0]) # number=34
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.cx(input_qubit[0],input_qubit[1]) # number=29
prog.cx(input_qubit[2],input_qubit[3]) # number=44
prog.x(input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=57
prog.cz(input_qubit[0],input_qubit[1]) # number=58
prog.h(input_qubit[1]) # number=59
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.z(input_qubit[1]) # number=52
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1759.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
7f95dc0c757ee5c602eda0c84f0a8b39f5e022ba
|
bc181d3e95743e498a1ec0cfbdac369a01d95218
|
/apps/accounts/migrations/0001_initial.py
|
7daca24efb48c0d09b39887195357f9e09d5df77
|
[] |
no_license
|
roman-oxenuk/welltory_test
|
09bbbd8502735adb3662318affa3df10ef47f5af
|
853dff24bbf38d5c2d6dce75dd5713ab6347a00d
|
refs/heads/master
| 2021-01-21T23:23:55.809175
| 2017-06-23T18:50:54
| 2017-06-23T18:50:54
| 95,241,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,255
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(unique=True, max_length=255, verbose_name='email')),
('first_name', models.CharField(max_length=255, null=True, verbose_name='\u0438\u043c\u044f', blank=True)),
('last_name', models.CharField(max_length=255, null=True, verbose_name='\u0444\u0430\u043c\u0438\u043b\u0438\u044f', blank=True)),
('is_active', models.BooleanField(default=True, verbose_name='\u0430\u043a\u0442\u0438\u0432\u043d\u044b\u0439')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='\u0434\u0430\u0442\u0430 \u0440\u0435\u0433\u0438\u0441\u0442\u0440\u0430\u0446\u0438\u0438')),
('is_staff', models.BooleanField(default=False, verbose_name='is staff')),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
),
]
|
[
"roman.oxenuk@gmail.com"
] |
roman.oxenuk@gmail.com
|
35d4289b0d5b7197676570e63cb452d1d2bfd5cb
|
be5b91588f198a665160a574e2eba2dd0be84783
|
/database/write_nlu.py
|
5a4e3c8356059d789dcec0516b42e509f4a727a7
|
[] |
no_license
|
swqsd218219/rasa_uncertain_slot
|
f60f9162cc629552f2deef0fb6cd6ea8cb93ae42
|
ec7a0912b9058e3b19acce6ae05b8e871d720141
|
refs/heads/main
| 2023-03-08T18:13:26.853865
| 2021-03-01T01:23:20
| 2021-03-01T01:23:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,783
|
py
|
'''
定义模板:
query attribute:
1. what is the cpu?
- what kind of entity do you want to ask?
- server
- please tell me about the ip of the entity
- 1.2.3.4
- 4 cores
2. what is the cpu of the 1.1.1.1?
- please tell me about the entity of the ip
- server
- 4 cores
3. what is the cpu of the server 1.2.3.1
- 5 cores
query relation
1. list all the server host in ?
- what kind of entity do you ask?(datacenter, cluster)
- cluster
- please tell me about the ip of entity
- 1.1.1.1
- dataframe of servers
2. list all the server host in datacenter?
- please tell me about the ip of entity
- 1.1.1.1
- dataframe of servers
3. list all the server host in datacenter 1.1.1.1
- dataframe of servers
'''
with open('cluster.csv','r',encoding='utf-8') as f1:
cluster = f1.readlines()
with open('datacenter.csv','r',encoding='utf-8') as f2:
datacenter = f2.readlines()
with open('server.csv','r',encoding='utf-8') as f3:
server = f3.readlines()
entity2attribute = {}
entity2ip = {}
entity2ip['cluster'] = []
entity2ip['datacenter'] = []
entity2ip['server'] = []
for index,line in enumerate(cluster):
if index == 0:
line = line.strip()
line = line.split(',')
ip = line[0]
name = line[1]
business = line[2]
city = line[3]
datacenter_ip = line[4]
entity2attribute['cluster'] = [name,business,city,datacenter_ip]
else:
line = line.strip()
line = line.split(',')
# print(line)
ip = line[0]
entity2ip['cluster'].append(ip)
for index,line in enumerate(datacenter):
if index == 0:
line = line.strip()
line = line.split(',')
ip = line[0]
name = line[1]
longitude = line[2]
latitude = line[3]
region = line[4]
cpu = line[5]
entity2attribute['datacenter'] = [name, longitude, latitude, region,cpu]
else:
line = line.strip()
line = line.split(',')
ip = line[0]
entity2ip['datacenter'].append(ip)
for index,line in enumerate(server):
if index == 0:
line = line.strip()
line = line.split(',')
ip = line[0]
name = line[1]
cpu = line[2]
memory = line[3]
disk = line[4]
server_ip = line[5]
datacenter_ip = line[6]
entity2attribute['server'] = [name, cpu, memory, disk,server_ip,datacenter_ip]
else:
line = line.strip()
line = line.split(',')
ip = line[0]
entity2ip['server'].append(ip)
relation2entity = {
'host in':{'server':['cluster','datacenter'],'cluster':['datacenter']},
'configuration by':{'datacenter':['cluster','server'],'cluster':['server']}
}
def write_query_attribute(f):
f.write('## intent: query_attribute' + '\n')
for entity,value in entity2attribute.items():
ips = entity2ip[entity]
for attribute in value:
for ip in ips:
temp1 = '- what is the ['+attribute+'](attribute) ?'
temp2 = '- what is the ['+attribute+'](attribute) of the ['+ip+'](ip) ?'
temp3 = '- what is the ['+attribute+'](attribute) of the [' +entity+'](entity) ['+ip+'](ip) ?'
f.write(temp1 + '\n')
f.write(temp2 + '\n')
f.write(temp3 + '\n')
def write_query_ralation(f):
for relation,entities in relation2entity.items():
relation_ = relation.replace(' ','_')
f.write('## intent: query_'+relation_ + '\n')
for s_entity,o_entities in entities.items():
for o_entity in o_entities:
ips = entity2ip[o_entity]
for ip in ips:
temp1 = '- list all the ['+s_entity+'](s_entity) '+relation + ' ?'
temp2 = '- list all the ['+s_entity+'](s_entity) '+relation+' ['+o_entity+'](o_entity) ?'
temp3 = '- list all the ['+s_entity+'](s_entity) '+relation+' ['+o_entity+'](o_entity) ['+ip+'](ip) ?'
f.write(temp1 + '\n')
f.write(temp2 + '\n')
f.write(temp3 + '\n')
def write_lookup(f):
f.write('## lookup:entity' + '\n')
f.write(' data/lookup/entity.txt' + '\n')
f.write('## lookup:attribute' + '\n')
f.write(' data/lookup/attribute.txt' + '\n')
f.write('## lookup:s_entity' + '\n')
f.write(' data/lookup/s_entity.txt' + '\n')
f.write('## lookup:o_entity' + '\n')
f.write(' data/lookup/o_entity.txt' + '\n')
f.write('## lookup:ip' + '\n')
f.write(' data/lookup/ip.txt' + '\n')
if __name__ == '__main__':
f = open('./nlu.md','a',encoding='utf-8')
write_query_attribute(f)
write_query_ralation(f)
write_lookup(f)
|
[
"zhangmw_play@163.com"
] |
zhangmw_play@163.com
|
545da2d80571e4c8539199e79b3b92fa018cd91d
|
8629b45d5cec27fa701c76644db2a1ac9a090b07
|
/016/16.py
|
e848effd4500e3781e5281f0b148d840ea536535
|
[
"MIT"
] |
permissive
|
bsamseth/project-euler
|
96e3a7a94cc605ded3edf7176a93147f9836350e
|
60d70b117960f37411935bc18eab5bb2fca220e2
|
refs/heads/master
| 2021-04-06T06:16:23.425225
| 2018-11-05T09:50:21
| 2018-11-05T09:50:21
| 59,105,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
"""
2^15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.
What is the sum of the digits of the number 2^1000?
"""
print sum([int(char) for char in str(2**1000)])
|
[
"b.samseth@gmail.com"
] |
b.samseth@gmail.com
|
852bcee70e02a31eea4fdda750582f430f99ea17
|
11ca0c393c854fa7212e783a34269f9dae84e8c7
|
/Python/226. 翻转二叉树.py
|
38463da19db06e4efb8634aea7b35a3f18030818
|
[] |
no_license
|
VictoriqueCQ/LeetCode
|
dc84d81163eed26fa9dbc2114bba0b5c2ea881f4
|
a77b3ead157f97f5d9599badb4d4c5da69de44ba
|
refs/heads/master
| 2021-06-05T06:40:24.659909
| 2021-03-31T08:31:51
| 2021-03-31T08:31:51
| 97,978,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if not root:
return None
def dfs(root):
if not root:
return
root.left, root.right = root.right, root.left
dfs(root.left)
dfs(root.right)
dfs(root)
return root
|
[
"1997Victorique0317"
] |
1997Victorique0317
|
0a57942b9958442ababf76cf5c5edea1a6dacd8a
|
13f4a06cd439f579e34bf38406a9d5647fe7a0f3
|
/nn_ns/parse/MyLL1L/ProcessMatchResult_MyLL1L_of_SRRTL.py
|
0aef1359eb2e3c4617306d57faaef7e442c70f50
|
[] |
no_license
|
edt-yxz-zzd/python3_src
|
43d6c2a8ef2a618f750b59e207a2806132076526
|
41f3a506feffb5f33d4559e5b69717d9bb6303c9
|
refs/heads/master
| 2023-05-12T01:46:28.198286
| 2023-05-01T13:46:32
| 2023-05-01T13:46:32
| 143,530,977
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,673
|
py
|
from .tools_for_id2infoID_SRRTL import *
from .SRRTL_in_MyLL1L import mainID_MyLL1L_of_SRRTL
from .ProcessMatchResult_MyLL1L import ProcessMatchResult_MyLL1L
from .raw_tokenize_SRRTL import RawTokenizer_SRRTL
#from .id2infoID_MyLL1L_of_MyLL1L import tIDDict_MyLL1L_of_MyLL1L
class ProcessMatchResult_MyLL1L_of_SRRTL(ProcessMatchResult_MyLL1L):
def __init__(self, tIDDict_MyLL1L_of_SRRTL, tokens, pos2rc = None):
super().__init__(tIDDict_MyLL1L_of_SRRTL, tokens, pos2rc)
return
def to_id2infoID(self, match_result):
info_ls = self.process(match_result)
id2infoID = {info.ID : info for info in info_ls}
return id2infoID
def to_raw_tokenizer(self, mainID, match_result):
id2infoID = self.to_id2infoID(match_result)
assert mainID in id2infoID
return RawTokenizer_SRRTL(mainID, id2infoID)
def _pre_process(self, match_result):pass
def _process_leaf(self, match_result):pass
# match_result2raw_id2info
def _get_result(self, match_result):
raw_id2info = {}
ns = match_result[-1]
info_ls = ns.data
return info_ls
def _post_process(self, match_result):
explain = self.explain
e = self.explain(match_result)
tID = e.tID
ID, *rID = tID
ns = e.ns
case = e.define_type
self.gen_ns_data(e)
self.outbox_optional_Item(e)
if case == 'Token':
if ID == 'string':
ns.data = eval(ns.data)
assert type(ns.data) == str
elif ID == 'idstring':
ns.data = eval(ns.data[2:])
assert type(ns.data) == str
elif tID == ('define', 'otherwise'):
ns.data = None # rex anything
elif ID == 'state_op':
assert tID == ('state_op', 'return')
ns.data = InfoReturn()
elif case == 'Item':
pass
elif case == 'Block':
pass
#print(ID, repr(ns.data))
elif ID == 'strings':
ns.data = ''.join(ns.data)
elif ID == 'name':
ID, = rID
assert ID == 'idstrings'
ns.data = ''.join(ns.data)
ns.data = repr(ns.data)
elif ID == 'if_clause':
assert not rID
ns.data = ns.data[1]
elif ID == 'state_op':
ID, = rID
#print(tID)
if ID == 'goto':
state_id = ns.data[1]
ns.data = InfoGoto(state_id)
elif ID == 'call':
state_id = ns.data[1]
ns.data = InfoCall(state_id)
else:
assert ID == 'error'
err = ns.data[1]
ns.data = InfoError(err)
## elif ID == 'define':
## ID, = rID
## assert ID == 'rex'
## rex, = e[0].ns.data
## ns.data = rex
elif ID == 'define_body':
ID, = rID
if ID == 'normal_define':
rex, _, children = ns.data
if not children:
children = []
ns.data = InfoNormalDefine(rex, children)
else:
assert ID == 'define_if_clause'
rex, state_op, _ = ns.data # rex - None - match all
#print(state_op, rex)
ns.data = InfoDefineIfClause(state_op, rex)
elif ID == 'name_eq':
assert not rID
ns.data, _ = ns.data
elif ID == 'define_token_type':
ID, = rID
_id = None
if ID == 'named_define':
_id, body = ns.data
else:
body = ns.data
ns.data = InfoDefineTypeID(_id, body)
elif ID == 'sub_define_block':
assert not rID
_, ns.data, _ = ns.data
elif ID == 'define_state':
assert not rID
_id, _, children = ns.data
ns.data = InfoDefineStateID(_id, children)
## elif ID in {'rex', 'state_id', 'type_id', 'id'}:
## assert not rID
## ns.data, = e[0].ns.data
## #print(ID, ns.data)
## elif ID in {mainID_MyLL1L_of_SRRTL, 'define_block'}:
## assert not rID
## ns.data = e[0].ns.data
## #print(ID, ns.data)
#def lang_text2raw_id2info():
def test_ProcessMatchResult_MyLL1L_of_SRRTL():
from .parser_MyLL1L_of_SRRTL import parser_MyLL1L_of_SRRTL
from .SRRTL_in_MyLL1L import SRRTL_in_MyLL1L, mainID_MyLL1L_of_SRRTL
from .raw_tokenize_SRRTL import raw_tokenize_SRRTL
from .id2infoID_SRRTL_of_SRRTL import id2infoID_SRRTL_of_SRRTL
from .SRRTL_in_SRRTL import mainID_SRRTL_of_SRRTL, SRRTL_in_SRRTL
raw_tokens = list(raw_tokenize_SRRTL(SRRTL_in_SRRTL, \
mainID_SRRTL_of_SRRTL, id2infoID_SRRTL_of_SRRTL))
_tokenize = parser_MyLL1L_of_SRRTL.tokenize
_parse = parser_MyLL1L_of_SRRTL.parse_tokens
tIDDict = parser_MyLL1L_of_SRRTL.tIDDict
tokens = _tokenize(SRRTL_in_SRRTL)
_match_result = _parse(tokens)
raw_tokenizer = ProcessMatchResult_MyLL1L_of_SRRTL(tIDDict, tokens)\
.to_raw_tokenizer(mainID_SRRTL_of_SRRTL, _match_result)
_raw_tokens = list(raw_tokenizer.raw_tokenize(SRRTL_in_SRRTL))
if not _raw_tokens == raw_tokens:
assert repr(_raw_tokens) == repr(raw_tokens)
print(_raw_tokens)
print(raw_tokens)
assert _raw_tokens == raw_tokens
if __name__ == '__main__':
test_ProcessMatchResult_MyLL1L_of_SRRTL()
|
[
"wuming_zher@zoho.com.cn"
] |
wuming_zher@zoho.com.cn
|
1ac9526b04e496e36c8caa591056247ab113c9a8
|
fea444217851a92510651da2b60035b73344d7da
|
/todo/setup.py
|
ee4284355e4449097dd3991ca5c42f45b5f04dbb
|
[] |
no_license
|
fuzzygwalchmei/scratchingPost
|
c70d4f3f37d3d4d6490edfbbae603305b2bb5764
|
b232c54aac975aebb0945d66a841db3f241b7cd2
|
refs/heads/master
| 2023-01-29T13:02:22.615813
| 2020-12-15T00:47:56
| 2020-12-15T00:47:56
| 176,823,898
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, Integer, String, create_engine
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('sqlite:///todo.db')
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()
class ToDo(Base):
__tablename__ = 'todos'
id = Column(Integer, primary_key=True)
subject = Column(String)
note = Column(String)
def __repr__(self):
return f'<ToDo(id: {self.id} - note: {self.note}'
Base.metadata.create_all(engine)
session.commit()
|
[
"marc.falzon@gmail.com"
] |
marc.falzon@gmail.com
|
ba801aca965089f72776e5998d259a23802b74e6
|
8f3336bbf7cd12485a4c52daa831b5d39749cf9b
|
/Python/sliding-puzzle.py
|
22a02e080a64c277d48f26a549604f17dc5dba51
|
[] |
no_license
|
black-shadows/LeetCode-Topicwise-Solutions
|
9487de1f9a1da79558287b2bc2c6b28d3d27db07
|
b1692583f7b710943ffb19b392b8bf64845b5d7a
|
refs/heads/master
| 2022-05-30T22:16:38.536678
| 2022-05-18T09:18:32
| 2022-05-18T09:18:32
| 188,701,704
| 240
| 110
| null | 2020-05-08T13:04:36
| 2019-05-26T15:41:03
|
C++
|
UTF-8
|
Python
| false
| false
| 4,140
|
py
|
# Time: O((m * n) * (m * n)!)
# Space: O((m * n) * (m * n)!)
import heapq
import itertools
# A* Search Algorithm
class Solution(object):
def slidingPuzzle(self, board):
"""
:type board: List[List[int]]
:rtype: int
"""
def dot(p1, p2):
return p1[0]*p2[0]+p1[1]*p2[1]
def heuristic_estimate(board, R, C, expected):
result = 0
for i in xrange(R):
for j in xrange(C):
val = board[C*i + j]
if val == 0: continue
r, c = expected[val]
result += abs(r-i) + abs(c-j)
return result
R, C = len(board), len(board[0])
begin = tuple(itertools.chain(*board))
end = tuple(range(1, R*C) + [0])
expected = {(C*i+j+1) % (R*C) : (i, j)
for i in xrange(R) for j in xrange(C)}
min_steps = heuristic_estimate(begin, R, C, expected)
closer, detour = [(begin.index(0), begin)], []
lookup = set()
while True:
if not closer:
if not detour:
return -1
min_steps += 2
closer, detour = detour, closer
zero, board = closer.pop()
if board == end:
return min_steps
if board not in lookup:
lookup.add(board)
r, c = divmod(zero, C)
for direction in ((-1, 0), (1, 0), (0, -1), (0, 1)):
i, j = r+direction[0], c+direction[1]
if 0 <= i < R and 0 <= j < C:
new_zero = i*C+j
tmp = list(board)
tmp[zero], tmp[new_zero] = tmp[new_zero], tmp[zero]
new_board = tuple(tmp)
r2, c2 = expected[board[new_zero]]
r1, c1 = divmod(zero, C)
r0, c0 = divmod(new_zero, C)
is_closer = dot((r1-r0, c1-c0), (r2-r0, c2-c0)) > 0
(closer if is_closer else detour).append((new_zero, new_board))
return min_steps
# Time: O((m * n) * (m * n)! * log((m * n)!))
# Space: O((m * n) * (m * n)!)
# A* Search Algorithm
class Solution2(object):
def slidingPuzzle(self, board):
"""
:type board: List[List[int]]
:rtype: int
"""
def heuristic_estimate(board, R, C, expected):
result = 0
for i in xrange(R):
for j in xrange(C):
val = board[C*i + j]
if val == 0: continue
r, c = expected[val]
result += abs(r-i) + abs(c-j)
return result
R, C = len(board), len(board[0])
begin = tuple(itertools.chain(*board))
end = tuple(range(1, R*C) + [0])
end_wrong = tuple(range(1, R*C-2) + [R*C-1, R*C-2, 0])
expected = {(C*i+j+1) % (R*C) : (i, j)
for i in xrange(R) for j in xrange(C)}
min_heap = [(0, 0, begin.index(0), begin)]
lookup = {begin: 0}
while min_heap:
f, g, zero, board = heapq.heappop(min_heap)
if board == end: return g
if board == end_wrong: return -1
if f > lookup[board]: continue
r, c = divmod(zero, C)
for direction in ((-1, 0), (1, 0), (0, -1), (0, 1)):
i, j = r+direction[0], c+direction[1]
if 0 <= i < R and 0 <= j < C:
new_zero = C*i+j
tmp = list(board)
tmp[zero], tmp[new_zero] = tmp[new_zero], tmp[zero]
new_board = tuple(tmp)
f = g+1+heuristic_estimate(new_board, R, C, expected)
if f < lookup.get(new_board, float("inf")):
lookup[new_board] = f
heapq.heappush(min_heap, (f, g+1, new_zero, new_board))
return -1
|
[
"noreply@github.com"
] |
black-shadows.noreply@github.com
|
7029d9404d228661a4e2e7d27618a58caefe3e98
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Latn.SHP/Sans_8/pdf_to_json_test_Latn.SHP_Sans_8.py
|
a9ff90b902a1274df32bc2575bf41aceb7fb70ec
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804
| 2020-02-27T15:54:48
| 2020-02-27T15:54:48
| 243,359,934
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.SHP/Sans_8/udhr_Latn.SHP_Sans_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
d2a3619d1a99b718458ffed7e6bdd3f373536969
|
04eaab6d9a6707b950d7ec4688707a883a009889
|
/where/cleaners/__init__.py
|
d225d68de08cd820429206b62eea119429a5ee10
|
[
"MIT"
] |
permissive
|
skjaeve/where
|
3eae1036419e5f9c6b824b5f9b1dcedbe9d4da93
|
690558f64d54ce46c55a0bc3ef26f6fd992a3737
|
refs/heads/master
| 2020-04-05T03:35:01.737430
| 2018-11-28T11:04:59
| 2018-11-28T11:04:59
| 156,520,078
| 0
| 0
| null | 2018-11-07T09:13:35
| 2018-11-07T09:13:35
| null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
"""Framework for cleaning data
Description:
------------
Each data cleaner should be defined in a one of two directories:
+ `editors` - Editors can add new fields to the dataset.
+ `removers` - These cleaners only remove observations.
"""
# Make the apply-functions in subpackages available
from where.cleaners.editors import apply_editors # noqa
from where.cleaners.removers import apply_removers # noqa
# Do not support * imports
__all__ = []
|
[
"geirarne@gmail.com"
] |
geirarne@gmail.com
|
ed5f6cde139950405c6ec1728493c26afb9a6799
|
9531e597cd3f865cc6b6f780498a18281c2413f8
|
/comments/models.py
|
956bf210ee9ab176d9e93f98dac9fd3202ac60d4
|
[] |
no_license
|
dpitkevics/DevNet
|
7133b80ce5d56b9c11aa4c500d530faed7cb13f4
|
98ebc3916346e6c2bda79711a3896f7c2a8e2ac8
|
refs/heads/master
| 2020-04-15T12:04:00.245848
| 2015-09-14T17:45:39
| 2015-09-14T17:45:39
| 41,320,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from model_utils.models import TimeStampedModel
class Comment(TimeStampedModel):
user = models.ForeignKey(User)
parent_comment = models.ForeignKey('Comment')
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
comment_text = models.TextField()
|
[
"daniels.pitkevics@gmail.com"
] |
daniels.pitkevics@gmail.com
|
1ec7d95d1793fcef3900410021a4866f130286d4
|
9e715dea01dc637ed91cde345df8ae81267f60a9
|
/webapp/apps/taxbrain/migrations/0069_auto_20150314_2139.py
|
ffaec203e84da216bcbc53279e1dc8272924d4d0
|
[
"MIT"
] |
permissive
|
kdd0211/webapp-public
|
f08b76201a6a59116bcfdc382ba995a46dd629cd
|
bcf94d5d6458ac5c6e89d0cf33d7fed06c85030d
|
refs/heads/master
| 2021-01-16T21:07:44.059049
| 2016-01-14T05:09:50
| 2016-01-14T05:09:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,078
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0068_auto_20150314_2137'),
]
operations = [
migrations.RenameField(
model_name='taxsaveinputs',
old_name='long_rate_one',
new_name='_CG_rt1',
),
migrations.RenameField(
model_name='taxsaveinputs',
old_name='long_threshold_one_single',
new_name='_CG_thd1_0',
),
migrations.RenameField(
model_name='taxsaveinputs',
old_name='long_threshold_one_jointly',
new_name='_CG_thd1_1',
),
migrations.RenameField(
model_name='taxsaveinputs',
old_name='long_threshold_one_head',
new_name='_CG_thd1_2',
),
migrations.RenameField(
model_name='taxsaveinputs',
old_name='long_threshold_one_separately',
new_name='_CG_thd1_3',
),
]
|
[
"tj.alumbaugh@continuum.io"
] |
tj.alumbaugh@continuum.io
|
91bf99a2e6bbb2f2dbb60eb172f61a1ec01f2632
|
a5a7a70348420b5815d4a49d74aa42e4ca41b4ba
|
/SAN/lib/utils/box_utils.py
|
ab81dc6b92ea5b149614c3bfed69a410239b17bb
|
[
"MIT"
] |
permissive
|
738654805/landmark-detection
|
18f8692b0f81bb4198cb6a5baca42a3f9ec89e59
|
70f647752147592fd5f62f99e64c685a6cf45b4a
|
refs/heads/master
| 2020-06-06T10:53:50.751520
| 2019-06-13T08:41:15
| 2019-06-13T08:41:15
| 192,720,661
| 1
| 0
|
MIT
| 2019-06-19T11:39:25
| 2019-06-19T11:39:25
| null |
UTF-8
|
Python
| false
| false
| 1,836
|
py
|
##############################################################
### Copyright (c) 2018-present, Xuanyi Dong ###
### Style Aggregated Network for Facial Landmark Detection ###
### Computer Vision and Pattern Recognition, 2018 ###
##############################################################
import numpy as np
def bboxcheck_TLBR(bbox):
'''
check the input bounding box to be TLBR format
parameter:
bbox: N x 4 numpy array, TLBR format
return:
True or False
'''
OK1 = isinstance(bbox, np.ndarray) and bbox.shape[1] == 4 and bbox.shape[0] > 0
OK2 = (bbox[:, 3] >= bbox[:, 1]).all() and (bbox[:, 2] >= bbox[:, 0]).all()
return OK1 and OK2
def bbox2center(bbox):
'''
convert a bounding box to a point, which is the center of this bounding box
parameter:
bbox: N x 4 numpy array, TLBR format
return:
center: 2 x N numpy array, x and y correspond to first and second row respectively
'''
assert bboxcheck_TLBR(bbox), 'the input bounding box should be TLBR format'
num_bbox = bbox.shape[0]
center = np.zeros((num_bbox, 2), dtype='float32')
center[:, 0] = (bbox[:, 0] + bbox[:, 2]) / 2.
center[:, 1] = (bbox[:, 1] + bbox[:, 3]) / 2.
return np.transpose(center)
def bbox_TLBR2TLWH(bbox):
'''
transform the input bounding box with TLBR format to TLWH format
parameter:
bbox: N X 4 numpy array, TLBR format
return
bbox: N X 4 numpy array, TLWH format
'''
assert bboxcheck_TLBR(bbox), 'the input bounding box should be TLBR format'
bbox_TLWH = np.zeros_like(bbox)
bbox_TLWH[:, 0] = bbox[:, 0]
bbox_TLWH[:, 1] = bbox[:, 1]
bbox_TLWH[:, 2] = bbox[:, 2] - bbox[:, 0]
bbox_TLWH[:, 3] = bbox[:, 3] - bbox[:, 1]
return bbox_TLWH
|
[
"280835372@qq.com"
] |
280835372@qq.com
|
8989148a1e906ae9fa35e8e5f99f07891fdd0d91
|
17e9441138f8ad09eab3d017c0fa13fa27951589
|
/blog17-networkx/test02.py
|
837cd862a077033de44e123fefe0dbd0a98117bc
|
[] |
no_license
|
My-lsh/Python-for-Data-Mining
|
159a09e76b35efd46ca3e32ad6dd2174847d5ec4
|
f2dd0b8f3c4f5f51a10613dff99041bca4fd64c5
|
refs/heads/master
| 2023-03-26T08:48:32.088713
| 2021-03-25T14:57:07
| 2021-03-25T14:57:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,187
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 02 10:33:58 2017
@author: eastmount CSDN 杨秀璋
"""
import pandas as pd
import numpy as np
import codecs
import networkx as nx
import matplotlib.pyplot as plt
""" 第一步:读取数据并获取姓名 """
data = pd.read_csv("data.csv",encoding ="gb2312") #中文乱码
print data[:4]
print data[u'姓名'] #获取某一列数据
print type(data[u'姓名'])
name = []
for n in data[u'姓名']:
name.append(n)
print name[0]
""" 第二步:计算共现矩阵 定义函数实现 """
a = np.zeros([2,3])
print a
print len(name)
word_vector = np.zeros([len(name),len(name)]) #共现矩阵
#1.计算学院共线矩阵
i = 0
while i<len(name): #len(name)
academy1 = data[u'学院'][i]
j = i + 1
while j<len(name):
academy2 = data[u'学院'][j]
if academy1==academy2: #学院相同
word_vector[i][j] += 1
word_vector[j][i] += 1
j = j + 1
i = i + 1
print word_vector
np_data = np.array(word_vector) #矩阵写入文件
pd_data = pd.DataFrame(np_data)
pd_data.to_csv('result.csv')
#2.计算大数据金融班级共线矩阵
#3.计算性别共线矩阵
#4.计算宿舍楼层共线矩阵
"""
i = 0
while i<len(name): #len(name)
academy1 = data[u'宿舍楼层'][i]
j = i + 1
while j<len(name):
academy2 = data[u'宿舍楼层'][j]
if academy1==academy2: #相同
word_vector[i][j] += 1
word_vector[j][i] += 1
j = j + 1
i = i + 1
print word_vector
"""
""" 第三步:共现矩阵计算(学生1 学生2 共现词频)文件 """
words = codecs.open("word_node.txt", "a+", "utf-8")
i = 0
while i<len(name): #len(name)
student1 = name[i]
j = i + 1
while j<len(name):
student2 = name[j]
#判断学生是否共现 共现词频不为0则加入
if word_vector[i][j]>0:
words.write(student1 + " " + student2 + " "
+ str(word_vector[i][j]) + "\r\n")
j = j + 1
i = i + 1
words.close()
""" 第四步:图形生成 """
a = []
f = codecs.open('word_node.txt','r','utf-8')
line = f.readline()
print line
i = 0
A = []
B = []
while line!="":
a.append(line.split()) #保存文件是以空格分离的
print a[i][0],a[i][1]
A.append(a[i][0])
B.append(a[i][1])
i = i + 1
line = f.readline()
elem_dic = tuple(zip(A,B))
print type(elem_dic)
print list(elem_dic)
f.close()
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['font.family']='sans-serif'
colors = ["red","green","blue","yellow"]
G = nx.Graph()
G.add_edges_from(list(elem_dic))
#nx.draw(G,with_labels=True,pos=nx.random_layout(G),font_size=12,node_size=2000,node_color=colors) #alpha=0.3
#pos=nx.spring_layout(G,iterations=50)
pos=nx.random_layout(G)
nx.draw_networkx_nodes(G, pos, alpha=0.2,node_size=1200,node_color=colors)
nx.draw_networkx_edges(G, pos, node_color='r', alpha=0.3) #style='dashed'
nx.draw_networkx_labels(G, pos, font_family='sans-serif', alpha=0.5) #font_size=5
plt.show()
|
[
"noreply@github.com"
] |
My-lsh.noreply@github.com
|
458e17eed0bc39f02d890a755f9aa6207076f831
|
2a9a136296e3d2abebf3a3dbfbbb091076e9f15f
|
/env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/sax.py
|
59b0a8ff79ffd1467ad8d32e2074685db1ed7e20
|
[] |
no_license
|
Lisukod/planet-tracker
|
a865e3920b858000f5d3de3b11f49c3d158e0e97
|
6714e6332b1dbccf7a3d44430620f308c9560eaa
|
refs/heads/master
| 2023-02-18T19:26:16.705182
| 2021-01-23T01:51:58
| 2021-01-23T01:51:58
| 328,032,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,697
|
py
|
from __future__ import absolute_import, division, unicode_literals
from xml.sax.xmlreader import AttributesNSImpl
from ..constants import adjustForeignAttributes, unadjustForeignAttributes
prefix_mapping = {}
for prefix, localName, namespace in adjustForeignAttributes.values():
if prefix is not None:
prefix_mapping[prefix] = namespace
def to_sax(walker, handler):
"""Call SAX-like content handler based on treewalker walker
:arg walker: the treewalker to use to walk the tree to convert it
:arg handler: SAX handler to use
"""
handler.startDocument()
for prefix, namespace in prefix_mapping.items():
handler.startPrefixMapping(prefix, namespace)
for token in walker:
type = token["type"]
if type == "Doctype":
continue
elif type in ("StartTag", "EmptyTag"):
attrs = AttributesNSImpl(token["data"], unadjustForeignAttributes)
handler.startElementNS(
(token["namespace"], token["name"]), token["name"], attrs
)
if type == "EmptyTag":
handler.endElementNS(
(token["namespace"], token["name"]), token["name"]
)
elif type == "EndTag":
handler.endElementNS(
(token["namespace"], token["name"]), token["name"]
)
elif type in ("Characters", "SpaceCharacters"):
handler.characters(token["data"])
elif type == "Comment":
pass
else:
assert False, "Unknown token type"
for prefix, namespace in prefix_mapping.items():
handler.endPrefixMapping(prefix)
handler.endDocument()
|
[
"45397160+Lisukod@users.noreply.github.com"
] |
45397160+Lisukod@users.noreply.github.com
|
4ed8aacb5d5e8e915a445cc8c33ffb7f42a8ec4c
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r8/Gen/DecFiles/options/11134020.py
|
d61de03b13fd229fd8d73ea102ddc4195d7175b6
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 947
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/11134020.py generated: Fri, 27 Mar 2015 15:47:57
#
# Event Type: 11134020
#
# ASCII decay Descriptor: {[[B0]nos -> (J/psi(1S) -> p+ p~-) (rho(770)0 -> pi+ pi-)]cc, [[B0]os -> (J/psi(1S) -> p+ p~-) (rho(770)0 -> pi- pi+)]cc}
#
from Configurables import Generation
Generation().EventType = 11134020
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bd_Jpsirho0,pp=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 511,-511 ]
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
1d14a6463b2ceaf9f8bc13e5d1c1c6450675751c
|
49b048b05330fcc7ebd1ea6d3b619085af46b433
|
/exe01.py
|
751128bf3dd018f9c1442b0a37477fe9a947ef8a
|
[] |
no_license
|
andreplacet/reinforcement-tasks-python-strings
|
a26e2c8544a2dbb161ffd27c4f806398c2096b8f
|
1ee8f16bbc97bca138feb41992205674a4e07a57
|
refs/heads/master
| 2023-01-08T23:09:40.872807
| 2020-11-06T17:54:51
| 2020-11-06T17:54:51
| 310,668,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
# Exercicio 01
print('Comparador de strings')
string_1 = str(input('Digite uma frase: ')).strip().split()
print(f'String 1: {string_1}')
string_1 = ''.join(string_1)
string_2 = str(input('Digite uma frase: ')).strip().split()
print(f'String 2: {string_2 = }')
string_2 = ''.join(string_2)
print(f'Tamanho da String 1 :{len(string_1)}\n'
f'Tamanho da String 2: {len(string_2)}')
if string_1 == string_2:
print('As strings possuem o mesmo conteudo!')
else:
print('As strings não possuem o mesmo conteudo!')
|
[
"andreplacet@gmail.com"
] |
andreplacet@gmail.com
|
71ef0ac38df7ff3711365479429d3a21f262af87
|
1b48b3980abbe11691310a7f35efef62bc0ae831
|
/_msic/py/_fp/rxpy/test_rx.py
|
7ae445bd4cd75655f4c4f14080afa7efe81709e5
|
[] |
no_license
|
FXTD-ODYSSEY/MayaScript
|
7619b1ebbd664988a553167262c082cd01ab80d5
|
095d6587d6620469e0f1803d59a506682714da17
|
refs/heads/master
| 2022-11-05T08:37:16.417181
| 2022-10-31T11:50:26
| 2022-10-31T11:50:26
| 224,664,871
| 45
| 11
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
from rx import Observable
from random import randint
three_emissions = Observable.range(1, 3)
(
three_emissions.map(lambda i: randint(1, 100000))
.subscribe(lambda i: print("Subscriber 1 Received: {0}".format(i)))
.subscribe(lambda i: print("Subscriber 2 Received: {0}".format(i)))
)
|
[
"timmyliang@tencent.com"
] |
timmyliang@tencent.com
|
dc081a3bdcb41c1fec957a206f7cd2c2a8b97677
|
3f6c16ea158a8fb4318b8f069156f1c8d5cff576
|
/.PyCharm2019.1/system/python_stubs/-1850396913/lxml/etree/_Comment.py
|
d1bc71dd97ac493d449fb08c86cc8fe73d2b8f6e
|
[] |
no_license
|
sarthak-patidar/dotfiles
|
08494170d2c0fedc0bbe719cc7c60263ce6fd095
|
b62cd46f3491fd3f50c704f0255730af682d1f80
|
refs/heads/master
| 2020-06-28T23:42:17.236273
| 2019-10-01T13:56:27
| 2019-10-01T13:56:27
| 200,369,900
| 0
| 0
| null | 2019-08-03T12:56:33
| 2019-08-03T11:53:29
|
Shell
|
UTF-8
|
Python
| false
| false
| 1,038
|
py
|
# encoding: utf-8
# module lxml.etree
# from /var/www/newsbytes/CricketPlayerDataScrapper/venv/lib/python3.6/site-packages/lxml/etree.cpython-36m-x86_64-linux-gnu.so
# by generator 1.147
""" The ``lxml.etree`` module implements the extended ElementTree API for XML. """
# imports
import builtins as __builtins__ # <module 'builtins' (built-in)>
from .__ContentOnlyElement import __ContentOnlyElement
class _Comment(__ContentOnlyElement):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
tag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__pyx_vtable__ = None # (!) real value is '<capsule object NULL at 0x7f578a4838d0>'
|
[
"sarthakpatidar15@gmail.com"
] |
sarthakpatidar15@gmail.com
|
db860acf670514cdb3a4a8ac172160bfbafee046
|
a8e3ddb269a8b959b3bce38e7b21aaa1a7e69dd4
|
/tensorpack/trainv1/config.py
|
abc02ccaa6e417f64904e35095a3b993806a2fc4
|
[
"Apache-2.0"
] |
permissive
|
myelintek/tensorpack
|
55945c7ea9d661b31f28c83e5477870d2f3dac86
|
fcbf5869d78cf7f3b59c46318b6c883a7ea12056
|
refs/heads/master
| 2018-10-25T05:50:15.302077
| 2018-04-09T03:24:27
| 2018-04-09T03:24:27
| 114,971,878
| 0
| 2
|
Apache-2.0
| 2022-09-29T03:16:20
| 2017-12-21T06:39:29
|
Python
|
UTF-8
|
Python
| false
| false
| 150
|
py
|
# -*- coding: utf-8 -*-
# File: config.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
__all__ = ['TrainConfig']
from ..train.config import TrainConfig
|
[
"ppwwyyxxc@gmail.com"
] |
ppwwyyxxc@gmail.com
|
9488d6f82af89e6350f8e311867f201ac9056640
|
06d882216885b4cc82ef131afc27baa8a797537a
|
/food_api/zomato_api/restaurant_url.py
|
f3399ee659e6f39d9d23973d5b8cccebc3ea0faa
|
[] |
no_license
|
bopopescu/restaurant_data_crawler
|
7de91844ae51b71b1c64af57cf82067f28996940
|
dd14839cabd114ab22c86eff15428143a310da5f
|
refs/heads/master
| 2022-11-06T21:52:22.941089
| 2017-10-09T12:10:41
| 2017-10-09T12:10:41
| 282,031,811
| 0
| 0
| null | 2020-07-23T18:54:44
| 2020-07-23T18:54:43
| null |
UTF-8
|
Python
| false
| false
| 3,633
|
py
|
from bs4 import BeautifulSoup
from urllib2 import Request, urlopen, URLError
import re
from errorhandler import typec
from re import search
from re import sub
import json
def crawlRestaurants(restaurant_url):
try:
menu_url = []
restaurant_menu_url_with_unicode = restaurant_url + "/menu#food"
restaurant_menu_url_with_unicode = restaurant_menu_url_with_unicode.replace(unichr(233),'e')
restaurant_menu_url = sub(r"[^\x00-\x7F]+","",restaurant_menu_url_with_unicode)
try:
response = urlopen(restaurant_menu_url)
html = response.read()
# print html
rest_soup = BeautifulSoup(html)
for javascript_code in rest_soup.find_all("script",{"type":"text/javascript"}):
text = javascript_code.text
pat = "zomato.menuPages"
index = text.find(pat)
if index >= 0:
menu_items = search("zomato.menuPages = (.+?);",text).group(1)
menu_dict = json.loads(menu_items)
for urls in menu_dict:
menu_url.append(str(urls['url']))
return menu_url
except URLError as error:
print restaurant_menu_url
return restaurantsDB
except URLError as error:
print error
# <<<<<<< HEAD
# def crawlRestaurants(city_name,locality_name):
# try:
# restaurantsDB = []
# searchUrl = "https://www.zomato.com/" + city_name + "/" + locality_name.replace(" ","-").lower() + "-restaurants"
# response = urlopen(searchUrl)
# html = response.read()
# soup = BeautifulSoup(html)
# # Extracting no. of pages
# for pages in soup.find("div",{"class":"col-l-3 mtop0 alpha tmargin pagination-number"}):
# text = pages.text
# tokens = text.split(" ")
# flag = 0
# page_no = 1
# for token in tokens:
# if token.isdigit():
# if flag == 1:
# page_no = int(token) + 1
# flag = 1
# # Crawling on each page of restaurant locality
# for page in range(1,page_no):
# searchUrl = "https://www.zomato.com/" + city_name + "/" + locality_name.replace(" ","-").lower() + "-restaurants?page="+str(page)
# response = urlopen(searchUrl)
# html = response.read()
# soup = BeautifulSoup(html)
# for rest_div in soup.find_all("li",{"class":"resZS mbot0 pbot0 bb even status1"}) + soup.find_all("li",{"class":"resZS mbot0 pbot0 bb even near status1"}):
# restDB = {}
# restDB['id'] = rest_div['data-res_id']
# rest_url_a = rest_div.find("a",{"class":"result-title"})
# rest_url = rest_url_a["href"]
# rest_url = rest_url.replace(unichr(233),'e')
# rest_url = sub(r"[^\x00-\x7F]+","",rest_url)
# restDB['url'] = str(rest_url)
# restaurant_menu_url_with_unicode = restDB['url'] + "/menu#food"
# restaurant_menu_url_with_unicode = restaurant_menu_url_with_unicode.replace(unichr(233),'e')
# restaurant_menu_url = sub(r"[^\x00-\x7F]+","",restaurant_menu_url_with_unicode)
# try:
# response = urlopen(restaurant_menu_url)
# html = response.read()
# # print html
# rest_soup = BeautifulSoup(html)
# for javascript_code in rest_soup.find_all("script",{"type":"text/javascript"}):
# text = javascript_code.text
# pat = "zomato.menuPages"
# index = text.find(pat)
# if index >= 0:
# menu_items = search("zomato.menuPages = (.+?);",text).group(1)
# menu_dict = json.loads(menu_items)
# menu_url = []
# for urls in menu_dict:
# menu_url.append(str(urls['url']))
# restDB['menu'] = menu_url
# restaurantsDB.append(restDB)
# except URLError as error:
# print restaurant_menu_url
# return restaurantsDB
# except URLError as error:
# print error
# print crawlRestaurants(city_name,locality_name)
# =======
|
[
"nitesh.surtani0606@gmail.com"
] |
nitesh.surtani0606@gmail.com
|
77007c1c919ffc67963fee14634b26ee9856e131
|
a1bffb2795728a6369c4447ca58e9a60620a1e7d
|
/intro/matplotlib/examples/plot_aliased.py
|
91281736e7c3d601518f28e84fe5b8b6f7ae0e36
|
[
"CC-BY-4.0",
"CC-BY-3.0"
] |
permissive
|
imieza/scipy-lecture-notes
|
03a4e0615f4fc4fdea3583d9557742fc1798ba65
|
74c8b7b491ceae0ce5be1745497b7adc0bad1406
|
refs/heads/master
| 2021-01-16T20:30:57.735341
| 2015-09-21T17:28:35
| 2015-09-21T17:28:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
"""
Aliased versus anti-aliased
=============================
This example demonstrates aliased versus anti-aliased text.
"""
import pylab as pl
size = 128, 16
dpi = 72.0
figsize= size[0] / float(dpi), size[1] / float(dpi)
fig = pl.figure(figsize=figsize, dpi=dpi)
fig.patch.set_alpha(0)
pl.axes([0, 0, 1, 1], frameon=False)
pl.rcParams['text.antialiased'] = False
pl.text(0.5, 0.5, "Aliased", ha='center', va='center')
pl.xlim(0, 1)
pl.ylim(0, 1)
pl.xticks(())
pl.yticks(())
pl.show()
|
[
"gael.varoquaux@normalesup.org"
] |
gael.varoquaux@normalesup.org
|
bde24f32962bc7daa1d653fc2bfc6b034f25a563
|
4f972877da14226125440b3da9bdb058764d8a54
|
/pythonDemo/argparseOpt/add_arg.py
|
e43e60028c712b282fd0fa4373dee4ad04ff9d48
|
[] |
no_license
|
ZhiYinZhang/study
|
16c29990cb371e7e278c437aa0abc7c348614063
|
8c085310b4f65e36f2d84d0acda4ca257b7389af
|
refs/heads/master
| 2021-07-09T16:05:02.925343
| 2020-06-30T07:53:05
| 2020-06-30T07:53:05
| 153,767,096
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,403
|
py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# datetime:2020/5/18 14:51
import argparse
if __name__=="__main__":
parser=argparse.ArgumentParser(description="在参数帮助文档之前显示的文本")
#参数需要使用--b或-b
parser.add_argument("--by","-b", #参数变量,‘-’表示缩写
action="store", #将命令行参数与action相关联,默认store:存储参数的值
type=str, #可以指定数据类型
help="b help", #帮助信息
const="1111", #给了-b/--b,但是-b后面没接参数值时,默认的值
default="2222", #没给-b/--b时,默认值,结合nargs使用
nargs="?", #"?"表示消耗一个参数,没有命令行参数,会使用default
required=False, #该参数是否可选,为True表示必须
dest="bb", #parse_args()返回的属性名,默认是和参数变量一样:by or b
metavar="3333", #参数示例
choices=["1111","2222","3333"] #参数范围
)
#按位置的参数,add_arg.py -b 1 2,a=2
parser.add_argument("a",type=int,help="a help",default=2)
# parser.print_help()
args=parser.parse_args()
print(args)
|
[
"2454099127@qq.com"
] |
2454099127@qq.com
|
f04c44f3b9373ead505307d3d465a8862a926482
|
d541422113225e372b2d645fb1e8731b24d12d75
|
/hello_name.py
|
86c938b89f7a20f62469916e2fcd9bdfa3724004
|
[] |
no_license
|
vdpham326/Python_Coding_Exercises
|
8c1d5d4cd87f57b94600138649dc865dc9282be3
|
9efd64b24671df1c56ccfac50582d6fd71bc14fc
|
refs/heads/master
| 2022-11-10T09:56:36.252673
| 2020-06-26T09:36:47
| 2020-06-26T09:36:47
| 274,311,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
# Given a string name, e.g. "Bob", return a greeting of the form "Hello Bob!".
def hello_name(name):
return "Hello " + name + '!'
print(hello_name('Bob'))
print(hello_name('Alice'))
print(hello_name('X'))
|
[
"vdpham326@gmail.com"
] |
vdpham326@gmail.com
|
d3f4f28a07d725a745058165f9fa71a5072d5e6b
|
c8b095adbbea29211d699f4113a91bc89fa54493
|
/jury/models.py
|
d1b52c51d89d1786c9bd0a9c7582d0bfc7f37143
|
[
"MIT"
] |
permissive
|
maribedran/speakerfight
|
9e554e7ea557c5bc44aafb616e46f0878fe8e2d5
|
26e3e70e1d06ec0be004a9b1598c2b55f9823a7d
|
refs/heads/master
| 2021-07-18T04:13:18.974661
| 2017-10-19T17:46:36
| 2017-10-19T17:46:36
| 106,606,011
| 2
| 0
| null | 2017-10-11T20:29:57
| 2017-10-11T20:29:55
|
Python
|
UTF-8
|
Python
| false
| false
| 371
|
py
|
from django.utils.translation import ugettext as _
from django.db import models
from django.conf import settings
class Jury(models.Model):
# relations
users = models.ManyToManyField(to=settings.AUTH_USER_MODEL,
related_name='juries')
class Meta:
verbose_name = _('Jury')
verbose_name_plural = _('Juries')
|
[
"luanfonceca@gmail.com"
] |
luanfonceca@gmail.com
|
738f5c35424d9fc3c0c2579254d86e5fe343b5e4
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/6c38443a9c1f79ebf131d120be0f36ccfbe963c6-<main>-bug.py
|
191038ac35ab613322148994a91dc9df1e97b7c1
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(names={
'default': [],
'type': 'list',
}))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if (not HAS_BOTO):
module.fail_json(msg='boto required for this module')
try:
(region, ec2_url, aws_connect_params) = get_aws_connection_info(module)
if (not region):
module.fail_json(msg='region must be specified')
names = module.params['names']
elb_information = ElbInformation(module, names, region, **aws_connect_params)
ec2_facts_result = dict(changed=False, elbs=elb_information.list_elbs())
except BotoServerError as err:
module.fail_json(msg='{0}: {1}'.format(err.error_code, err.error_message), exception=traceback.format_exc())
module.exit_json(**ec2_facts_result)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
327cab1f61b7fc63a691fa1106537977cd19c625
|
e273ac58c34f6a0fba8360aef75f52a7ef03d5bb
|
/ansiblemetrics/playbook/num_unique_names.py
|
86e83089e546f11c6247dd51d9b902c4e8b68bfe
|
[
"Apache-2.0"
] |
permissive
|
valeriapontillo/radon-ansible-metrics
|
e25b6c848fd40eb4b5802f540a6fd1ad20a77ce4
|
8a8e27d9b54fc1578d00526c8663184a2e686cb2
|
refs/heads/master
| 2023-09-06T06:21:43.417616
| 2021-11-04T14:28:04
| 2021-11-04T14:28:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,760
|
py
|
import re
from collections import Counter
from ansiblemetrics.ansible_metric import AnsibleMetric
from ansiblemetrics.utils import key_value_list
class NumUniqueNames(AnsibleMetric):
""" This class measures the number of plays and tasks with unique a name.
"""
def count(self):
"""Return the number of plays and tasks with a unique name.
Example
-------
.. highlight:: python
.. code-block:: python
from ansiblemetrics.general.num_unique_names import NumUniqueNames
playbook = '''
---
- name: demo the logic # unique name
hosts: localhost
gather_facts: false
vars:
num1: 10
num3: 10
tasks:
- name: logic and comparison # duplicate
debug:
msg: "Can you read me?"
when: num1 >= num3 and num1 is even and num2 is not defined
- name: logic and comparison # duplicate
debug:
msg: "Can you read me again?"
when: num3 >= num1
'''
NumUniqueNames(playbook).count()
>> 1
Returns
-------
int
number of plays and tasks with a unique name
"""
names = []
for item in key_value_list(self.playbook): # [(key, value)]
if item[0] == 'name':
item = re.sub(r'\s+', '', str(item[1]))
names.append(item.strip())
frequencies = Counter(names).values() # counts the elements' frequency
unique = sum(1 for v in frequencies if v == 1)
return unique
|
[
"stefano.dallapalma0@gmail.com"
] |
stefano.dallapalma0@gmail.com
|
a45a07dd66cbbfa57b6a3b8f8445747b4300de28
|
1d9e681b204e6ec2d7a710ef45b7dec082239491
|
/venv/Lib/site-packages/od_python/models/inline_response_200_33.py
|
2f87d5fa2b2a17141b43a2b9c133a4e168221558
|
[] |
no_license
|
1chimaruGin/DotaAnalysis
|
0e0b85805cc83e4cc491d46f7eadc014e8d6b1f1
|
6a74cde2ee400fc0dc96305203d60c5e56d7ecff
|
refs/heads/master
| 2020-07-21T20:48:07.589295
| 2019-09-07T12:20:15
| 2019-09-07T12:20:15
| 206,972,180
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,902
|
py
|
# coding: utf-8
"""
OpenDota API
# Introduction The OpenDota API provides Dota 2 related data including advanced match data extracted from match replays. Please keep request rate to approximately 1/s. **Begining 4/22/2018, the OpenDota API will be limited to 50,000 free calls per month.** We'll be offering a Premium Tier with unlimited API calls and higher rate limits. Check out the [API page](https://www.opendota.com/api-keys) to learn more.
OpenAPI spec version: 17.6.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class InlineResponse20033(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'table_name': 'str',
'column_name': 'str',
'data_type': 'str'
}
attribute_map = {
'table_name': 'table_name',
'column_name': 'column_name',
'data_type': 'data_type'
}
def __init__(self, table_name=None, column_name=None, data_type=None):
"""
InlineResponse20033 - a model defined in Swagger
"""
self._table_name = None
self._column_name = None
self._data_type = None
if table_name is not None:
self.table_name = table_name
if column_name is not None:
self.column_name = column_name
if data_type is not None:
self.data_type = data_type
@property
def table_name(self):
"""
Gets the table_name of this InlineResponse20033.
table_name
:return: The table_name of this InlineResponse20033.
:rtype: str
"""
return self._table_name
@table_name.setter
def table_name(self, table_name):
"""
Sets the table_name of this InlineResponse20033.
table_name
:param table_name: The table_name of this InlineResponse20033.
:type: str
"""
self._table_name = table_name
@property
def column_name(self):
"""
Gets the column_name of this InlineResponse20033.
column_name
:return: The column_name of this InlineResponse20033.
:rtype: str
"""
return self._column_name
@column_name.setter
def column_name(self, column_name):
"""
Sets the column_name of this InlineResponse20033.
column_name
:param column_name: The column_name of this InlineResponse20033.
:type: str
"""
self._column_name = column_name
@property
def data_type(self):
"""
Gets the data_type of this InlineResponse20033.
data_type
:return: The data_type of this InlineResponse20033.
:rtype: str
"""
return self._data_type
@data_type.setter
def data_type(self, data_type):
"""
Sets the data_type of this InlineResponse20033.
data_type
:param data_type: The data_type of this InlineResponse20033.
:type: str
"""
self._data_type = data_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, InlineResponse20033):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"kyitharhein18@gmail.com"
] |
kyitharhein18@gmail.com
|
7a203d32c16d289fef8f26566ec33d36956c6123
|
b11b16bf88d4d9be80986631ba161883cd9a28a4
|
/examples/rc/packages/gnu.py
|
1d096f2cec2adb7a006c51d7ab8534210bfb4da8
|
[
"Apache-2.0"
] |
permissive
|
simone-campagna/zapper
|
8ec11f68fdf6904cab3031789cd7553aa71f7869
|
fee2aaddcb13f789768a30761670c8c142d2b54d
|
refs/heads/master
| 2020-04-26T01:42:32.180173
| 2013-12-07T14:45:57
| 2013-12-07T14:45:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,317
|
py
|
from zapper.package_file import *
gnu = Suite('gnu', NULL_VERSION)
gnu.add_conflicting_tag('compiler-suite')
for version in '4.1.2', '4.5.2', '4.7.0':
version_name = version.replace('.', '_')
gnu_version = Suite(version_name, NULL_VERSION, suite=gnu)
gnu_version.add_conflicting_tag('gnu-suite')
libfoo = PackageFamily('libfoo', 'library')
libfoo_0_5 = Package(libfoo, '0.5', suite=gnu_version)
libfoo_0_5.var_set("FOO_HOME", "/gnu-{0}/foo-0.5".format(version))
libfoo_0_5_3 = Package(libfoo, '0.5.3', suite=gnu_version)
libfoo_0_5_3.var_set("FOO_HOME", "/gnu-{0}/foo-0.5.3".format(version))
libbar = PackageFamily('libbar', 'library')
libbar_1_0_2 = Package(libbar, '1.0.2', suite=gnu_version)
libbar_1_0_2.var_set("BAR_HOME", "/gnu-{0}/bar-1.0.2".format(version))
baz = PackageFamily('baz', 'tool')
baz_1_1 = Package(baz, '1.1', suite=gnu_version)
baz_1_1.var_set("BAZ_HOME", "/gnu-{0}/baz-1.1".format(version))
baz_1_1.requires('libfoo', VERSION > '0.5')
baz_1_1.requires(libbar_1_0_2)
hello_world = PackageFamily("hello_world", 'application')
hello_world_0_0_1_beta = Package(hello_world, '0.0.1-beta', suite=gnu_version)
hello_world_0_0_1_beta.var_set("HELLO_WORLD_HOME", "/gnu-{0}/hello_world-0.0.1-beta".format(version))
|
[
"simone.campagna@tiscali.it"
] |
simone.campagna@tiscali.it
|
80df44273e2f313dce7038b7329a31df34e2b601
|
7358fef64817a640f224f6a1b0ef22f7e4812d4b
|
/Materi/Materi 8 Fungsi/isGenap.py
|
926f99fc36b94ff6225596af70dc71181e8fc136
|
[] |
no_license
|
bimarakajati/Dasar-Pemrograman
|
8d4124701c61900c2cc41ec89be2b08c492c8541
|
af5e7abf122b8b151625504ac6739ab98996fb7f
|
refs/heads/master
| 2023-08-24T19:32:00.591820
| 2021-10-13T20:10:12
| 2021-10-13T20:10:12
| 302,336,101
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
def is_Genap(i):
"""
diberikan suatu bilangan i dengan tipe
integer untuk mengecek apakah bilangan
tersebut bilangan genap atau bukan
"""
print('keterangan didalam fungsi is_Genap')
return i%2 == 0
is_Genap(4)
|
[
"bimandugal@gmail.com"
] |
bimandugal@gmail.com
|
d27c1611bd3737bd6b4d8b6aecbf4c536cec23b3
|
015098bcb0e7b5595337c1b3e702942ed5b01272
|
/setup.py
|
6036b2725a5bdc962a01ccd158e2d7961c4446af
|
[] |
no_license
|
ASSNAKE/assnake-core-binning
|
f0e0676aa6bcdc4fc60fa98fcdb49d0f5fa960a7
|
7b7e539722e18226b1dd9cd23231a4fda4ba78c9
|
refs/heads/master
| 2022-12-25T18:39:22.303737
| 2020-10-09T09:53:07
| 2020-10-09T09:53:07
| 236,690,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
from setuptools import setup, find_packages
from setuptools.command.develop import develop
from setuptools.command.install import install
import os, shutil
setup(
name='assnake-core-binning',
version='0.0.1',
packages=find_packages(),
entry_points = {
'assnake.plugins': ['assnake-core-binning = assnake_core_binning.snake_module_setup:snake_module']
}
)
|
[
"fedorov.de@gmail.com"
] |
fedorov.de@gmail.com
|
b86275ae56f9d0014b5c3a45b2b8249d042a0397
|
c74b29b68211a51d7283d57b24d7cf83422a8ceb
|
/historischekranten2folia.py
|
49a1dadee9ba395be694155de271a6c80da1c684
|
[] |
no_license
|
proycon/nlpsandbox
|
63359e7cdd709dd81d66aed9bf1437f8ecf706a0
|
22e5f85852b7b2a658c6b94c3dedd425a5d6396f
|
refs/heads/master
| 2020-12-09T19:37:10.040962
| 2019-04-23T17:17:15
| 2019-04-23T17:17:15
| 2,347,265
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,678
|
py
|
#!/usr/bin/env python3
import csv
import sys
from bs4 import BeautifulSoup
from pynlpl.formats import folia
for filename in sys.argv[1:]:
with open(filename, 'r',encoding='utf-8') as f:
reader = csv.DictReader(f, delimiter='\t', quotechar='"')
for row in reader:
docid = "historischekranten_" + row['id'] + '_' + row['article_id'] + '_' + row['paper_id']
print("Processing " + docid,file=sys.stderr)
doc = folia.Document(id=docid)
for key in ('id', 'article_id', 'article_title', 'paper_id', 'paper_title', 'date','article', 'err_text_type', 'colophon', 'colophon_text'):
doc.metadata[key] = row[key]
doc.declare(folia.Paragraph, "https://raw.githubusercontent.com/proycon/folia/master/setdefinitions/nederlab-historischekranten-par.ttl")
body = doc.append(folia.Text(doc, id=docid+".text"))
div = body.append(folia.Division, id=docid+".div")
if row['header'].strip():
head = div.append(folia.Head, BeautifulSoup(row['header'].strip(),'lxml').text, id=docid+".text.head")
if row['subheader'].strip():
div.append(folia.Paragraph, BeautifulSoup(row['subheader'].strip(), 'lxml').text, id=docid+".text.subheader", cls="subheader")
for i, partext in enumerate(row['article_text'].split('\n\n')):
partext = BeautifulSoup(partext.replace("=\n","").replace("\n"," "), "lxml").text.strip()
if partext:
paragraph = div.append(folia.Paragraph, partext, id=docid+".text.p." + str(i+1), cls="normal")
doc.save(docid + ".folia.xml")
|
[
"proycon@anaproy.nl"
] |
proycon@anaproy.nl
|
b776e05c4aebbeae77ba412fb2ebf0fec81ef261
|
d3aef2ce0ee88c92516e64018f6d9f880911438c
|
/demo/urls.py
|
0137d9575b7afac8bf893f382ea0ac49ae67e9f8
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
Apkawa/django-material
|
648451d28a21270ddff937abf92931592ab9a56e
|
426e845ac27db0e1351bbb7f68377949581dfbd7
|
refs/heads/master
| 2021-01-15T17:51:49.304338
| 2016-02-26T10:34:15
| 2016-02-26T10:34:15
| 52,146,120
| 0
| 0
| null | 2016-02-20T09:29:25
| 2016-02-20T09:29:24
| null |
UTF-8
|
Python
| false
| false
| 6,434
|
py
|
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.views import generic
from django.shortcuts import render
from formtools.wizard.views import SessionWizardView
from material.frontend import urls as frontend_urls
from . import forms, widget_forms
def index_view(request):
context = {
'login': forms.LoginForm(),
'registration': forms.RegistrationForm(),
'checkout': forms.CheckoutForm(),
'order': forms.OrderForm(),
'comment': forms.CommentForm(),
'bank': forms.BankForm(),
'hospital': forms.HospitalRegistrationForm(),
}
return render(request, 'index.html', context)
class Wizard(SessionWizardView):
form_list = [forms.WizardForm1, forms.WizardForm2]
def done(self, form_list, **kwargs):
return render(self.request, 'formtools/wizard/wizard_done.html', {
'form_data': [form.cleaned_data for form in form_list],
})
class WidgetFormView(generic.FormView):
template_name = 'widgets_demo.html'
def form_valid(self, form):
return self.render_to_response(
self.get_context_data(form=form))
urlpatterns = [
url(r'^$', index_view),
# demo
url(r'^demo/login/$', generic.FormView.as_view(
form_class=forms.LoginForm, success_url='/demo/login/', template_name="demo.html")),
url(r'^demo/registration/$', generic.FormView.as_view(
form_class=forms.RegistrationForm, success_url='/demo/registration/', template_name="demo.html")),
url(r'^demo/contact/$', generic.FormView.as_view(
form_class=forms.ContactForm, success_url='/demo/contact/', template_name="demo.html")),
url(r'^demo/order/$', generic.FormView.as_view(
form_class=forms.OrderForm, success_url='/demo/order/', template_name="demo.html")),
url(r'^demo/checkout/$', generic.FormView.as_view(
form_class=forms.CheckoutForm, success_url='/demo/checkout/', template_name="demo.html")),
url(r'^demo/comment/$', generic.FormView.as_view(
form_class=forms.CommentForm, success_url='/demo/comment/', template_name="demo.html")),
url(r'^demo/bank/$', generic.FormView.as_view(
form_class=forms.BankForm, success_url='/demo/bank/', template_name="demo.html")),
url(r'^demo/wizard/$', Wizard.as_view()),
url(r'^demo/hospital/$', generic.FormView.as_view(
form_class=forms.HospitalRegistrationForm, success_url='/demo/hospital/', template_name="demo.html")),
url(r'^foundation/basic/', generic.RedirectView.as_view(url='/?cache=no', permanent=False)),
# widget test
url(r'^demo/widget/boolean/$', WidgetFormView.as_view(form_class=widget_forms.BooleanFieldForm)),
url(r'^demo/widget/char/$', WidgetFormView.as_view(form_class=widget_forms.CharFieldForm)),
url(r'^demo/widget/choice/$', WidgetFormView.as_view(form_class=widget_forms.ChoiceFieldForm)),
url(r'^demo/widget/date/$', WidgetFormView.as_view(form_class=widget_forms.DateFieldForm)),
url(r'^demo/widget/datetime/$', WidgetFormView.as_view(form_class=widget_forms.DateTimeFieldForm)),
url(r'^demo/widget/decimal/$', WidgetFormView.as_view(form_class=widget_forms.DecimalFieldForm)),
url(r'^demo/widget/duration/$', WidgetFormView.as_view(form_class=widget_forms.DurationFieldForm)),
url(r'^demo/widget/email/$', WidgetFormView.as_view(form_class=widget_forms.EmailFieldForm)),
url(r'^demo/widget/file/$', WidgetFormView.as_view(form_class=widget_forms.FileFieldForm)),
url(r'^demo/widget/filepath/$', WidgetFormView.as_view(form_class=widget_forms.FilePathFieldForm)),
url(r'^demo/widget/float/$', WidgetFormView.as_view(form_class=widget_forms.FloatFieldForm)),
url(r'^demo/widget/image/$', WidgetFormView.as_view(form_class=widget_forms.ImageFieldForm)),
url(r'^demo/widget/integer/$', WidgetFormView.as_view(form_class=widget_forms.IntegerFieldForm)),
url(r'^demo/widget/ipaddress/$', WidgetFormView.as_view(form_class=widget_forms.GenericIPAddressFieldForm)),
url(r'^demo/widget/multiplechoice/$', WidgetFormView.as_view(form_class=widget_forms.MultipleChoiceFieldForm)),
url(r'^demo/widget/nullbolean/$', WidgetFormView.as_view(form_class=widget_forms.NullBooleanFieldForm)),
url(r'^demo/widget/regex/$', WidgetFormView.as_view(form_class=widget_forms.RegexFieldForm)),
url(r'^demo/widget/slug/$', WidgetFormView.as_view(form_class=widget_forms.SlugFieldForm)),
url(r'^demo/widget/time/$', WidgetFormView.as_view(form_class=widget_forms.TimeFieldForm)),
url(r'^demo/widget/url/$', WidgetFormView.as_view(form_class=widget_forms.URLFieldForm)),
url(r'^demo/widget/uuid/$', WidgetFormView.as_view(form_class=widget_forms.UUIDField)),
url(r'^demo/widget/combo/$', WidgetFormView.as_view(form_class=widget_forms.ComboFieldForm)),
url(r'^demo/widget/splitdatetime/$', WidgetFormView.as_view(form_class=widget_forms.SplitDateTimeFieldForm)),
url(r'^demo/widget/modelchoice/$', WidgetFormView.as_view(form_class=widget_forms.ModelChoiceFieldForm)),
url(r'^demo/widget/modelmultichoice/$', WidgetFormView.as_view(form_class=widget_forms.ModelMultipleChoiceFieldForm)),
url(r'^demo/widget/password/$', WidgetFormView.as_view(form_class=widget_forms.PasswordInputForm)),
url(r'^demo/widget/hidden/$', WidgetFormView.as_view(form_class=widget_forms.HiddenInputForm)),
url(r'^demo/widget/textarea/$', WidgetFormView.as_view(form_class=widget_forms.TextareaForm)),
url(r'^demo/widget/radioselect/$', WidgetFormView.as_view(form_class=widget_forms.RadioSelectForm)),
url(r'^demo/widget/checkboxmultiple/$', WidgetFormView.as_view(
form_class=widget_forms.CheckboxSelectMultipleForm)),
url(r'^demo/widget/fileinput/$', WidgetFormView.as_view(form_class=widget_forms.FileInputForm)),
url(r'^demo/widget/splithiddendatetime/$', WidgetFormView.as_view(
form_class=widget_forms.SplitHiddenDateTimeWidgetForm)),
url(r'^demo/widget/selectdate/$', WidgetFormView.as_view(form_class=widget_forms.SelectDateWidgetForm)),
# admin
url(r'^admin/', include(admin.site.urls)),
# frontend
url(r'^frontend/$', generic.RedirectView.as_view(url='/frontend/accounting/', permanent=False), name="index"),
url(r'', include(frontend_urls)),
]
if 'zinnia' in settings.INSTALLED_APPS:
urlpatterns += [url(r'^weblog/', include('zinnia.urls', namespace='zinnia'))]
|
[
"kmmbvnr@gmail.com"
] |
kmmbvnr@gmail.com
|
8f6074736677b40ad3abc447c437659f71c7eb0f
|
fbf4f26a2b97d4fe35aa7b66e9cfed4cd0224e89
|
/chlamdb/eutils/sequence_exact_match.py
|
83e231040a3d492e29ca337400d07a339d4fe140
|
[] |
no_license
|
metagenlab/chlamdb
|
a100ab93407e15c33684b8d7175873adc6720d0b
|
f1829cf19ac1ded032d65689fbbff2d37489f739
|
refs/heads/master
| 2023-03-07T05:30:02.793914
| 2023-03-02T10:30:57
| 2023-03-02T10:30:57
| 179,291,344
| 6
| 1
| null | 2022-11-01T07:01:54
| 2019-04-03T13:02:40
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,816
|
py
|
#!/usr/bin/env python
def process_tag(tag):
return tag.split('}')[-1]
def get_UPI(seq):
for element in seq:
if element.tag == '{http://model.picr.ebi.ac.uk}UPI':
return element.text
def get_hit_attributes(hit):
accession = ''
version = ''
taxon_id = ''
db_name = ''
for element in hit:
if element.tag == '{http://model.picr.ebi.ac.uk}accession':
accession = element.text
if element.tag == '{http://model.picr.ebi.ac.uk}accessionVersion':
version = element.text
if element.tag == '{http://model.picr.ebi.ac.uk}databaseName':
db_name = element.text
if element.tag == '{http://model.picr.ebi.ac.uk}taxonId':
taxon_id = element.text
return {"%s.%s" % (accession, version) : [db_name, taxon_id]}
def accession2exact_matches(sequence, target_databases):
'''
Givent an input AA sequence and target(s) database name(s), return:
- the uniparc accession of the sequence (if exists)
- a dictionary with accession(s) of identical sequence(s) and their taxon ID and source database.
(Accession.version keys)
Return None if no identical squence was found.
:param sequence: input AA sequence
:param target_databases: Input database name (see http://www.ebi.ac.uk/Tools/picr/)
'''
import urllib2
import xml.etree.cElementTree as ElementTree
database_string = '&database=' .join(target_databases)
link = "http://www.ebi.ac.uk/Tools/picr/rest/getUPIForSequence?sequence=%s&database=%s&includeattributes=true" % (sequence,
database_string)
print link
req = urllib2.Request(link)
try:
page = urllib2.urlopen(req)
tree = ElementTree.parse(page)
except:
import time
print 'connexion problem, trying again...'
time.sleep(60)
db2seq = {}
root = tree.getroot()
seq = root.find('{http://www.ebi.ac.uk/picr/AccessionMappingService}getUPIForSequenceReturn')
if seq is None:
return None
UPI = get_UPI(seq)
identical_seqs = seq.findall('{http://model.picr.ebi.ac.uk}identicalCrossReferences')
for seq in identical_seqs:
db2seq.update(get_hit_attributes(seq))
return UPI, db2seq
def fasta_corresp(fasta_file, target_database, n_keep=1):
from Bio import SeqIO
import sys
print 'keep', n_keep
with open(fasta_file, 'r') as f:
records = SeqIO.parse(f, 'fasta')
for record in records:
picr = accession2exact_matches(record.seq,
target_database)
if picr is None:
sys.stdout.write('%s\t%s\t%s\t%s\n' % (record.name, 'None', 'None', 'None'))
else:
uniparc_accession, matches = picr
database2count = {}
for accession in matches:
if matches[accession][0] not in database2count:
database2count[matches[accession][0]] = 1
else:
if database2count[matches[accession][0]] < n_keep:
database2count[matches[accession][0]] += 1
else:
break
sys.stdout.write('%s\t%s\t%s\t%s\n' % (record.name,
uniparc_accession,
accession,
matches[accession][1]))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p", '--protein_seq', type=str, help="Protein sequence")
parser.add_argument("-d", '--database', type=str, help="Target database(s): 'REFSEQ', 'TREMBL', ...", nargs='+', default= ['TREMBL', 'SWISSPROT'])
parser.add_argument("-f", '--fasta_file', type=str, help="Fasta file")
parser.add_argument("-k", '--keep', type=int, help="Number of hit(s) to keep (default: 1)", default=1)
args = parser.parse_args()
if args.protein_seq and args.fasta_file:
raise(IOError('Input either a fasta file or a protein seqience, not both!'))
elif args.protein_seq:
picr = accession2exact_matches(args.protein_seq,
args.database)
if picr is not None:
uniparc_accession, matches = picr
print uniparc_accession, matches
else:
if len(args.database) > 1:
raise(IOError('Fasta file match is only possible for a single database!'))
else:
fasta_corresp(args.fasta_file, args.database, n_keep=args.keep)
|
[
"trestan.pillonel@gmail.com"
] |
trestan.pillonel@gmail.com
|
077d1f303b1d8e4453ccf710e00fdc43e75bd68c
|
1f38f3cd0ba6d42dd73f273e3dc9df4ebdc0dc9d
|
/BuzzScoreSite/manage.py
|
5463761455fffd46cd530a6382b889c5bc5c5ee1
|
[] |
no_license
|
MenshovSergey/BuzzScore
|
2a5f8cfd9b46a85665455c2a5cfa298c9a3a698b
|
348d1b2feb76a892e489016682f16e7a70a504a9
|
refs/heads/master
| 2021-01-16T19:49:40.996213
| 2013-10-12T09:43:46
| 2013-10-12T09:43:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "BuzzScoreSite.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"snailandmail@gmail.com"
] |
snailandmail@gmail.com
|
9fdf1fcd02e82e69e482cbdf80c02a24fcb02aef
|
01200401ef046a917df1205268fa92f23cfd28d8
|
/tests/test_histogram.py
|
f59279d6ed0771159428df32ce8d3a52d8e06d3d
|
[
"BSD-3-Clause"
] |
permissive
|
murodin/pyclesperanto_prototype
|
5fa8922dcbbc98aa69e1aab779c62a326a6937d7
|
4687e3085a5f8bc12e798bf25acd295ee249fb5e
|
refs/heads/master
| 2023-01-20T14:34:47.858014
| 2020-11-30T11:56:47
| 2020-11-30T11:56:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,534
|
py
|
import pyclesperanto_prototype as cle
import numpy as np
def test_histogram():
test = cle.push_zyx(np.asarray([
[1, 2, 4, 4, 2, 3],
[3, 3, 4, 4, 5, 5]
]))
ref_histogram = [1, 2, 3, 4, 2]
my_histogram = cle.histogram(test, num_bins = 5)
print(my_histogram)
a = cle.pull(my_histogram)
assert (np.allclose(a, ref_histogram))
def test_histogram_3d():
test = cle.push_zyx(np.asarray([
[
[1, 2, 4, 4, 2, 3]
], [
[3, 3, 4, 4, 5, 5]
]
]))
ref_histogram = [1, 2, 3, 4, 2]
my_histogram = cle.histogram(test, num_bins = 5)
print(my_histogram)
a = cle.pull(my_histogram)
assert (np.allclose(a, ref_histogram))
def test_histogram_3d_2():
test = cle.push_zyx(np.asarray([
[
[1, 2, 4],
[4, 2, 3]
], [
[3, 3, 4],
[4, 5, 5]
]
]))
ref_histogram = [1, 2, 3, 4, 2]
my_histogram = cle.histogram(test, num_bins = 5)
print(my_histogram)
a = cle.pull(my_histogram)
assert (np.allclose(a, ref_histogram))
def test_histogram_against_scikit_image():
from skimage.data import camera
image = camera()
from skimage import exposure
hist, bc = exposure.histogram(image.ravel(), 256, source_range='image')
print(str(hist))
gpu_image = cle.push(image)
gpu_hist = cle.histogram(gpu_image, num_bins=256)
print(str(cle.pull_zyx(gpu_hist)))
assert (np.allclose(hist, cle.pull_zyx(gpu_hist)))
|
[
"haesleinhuepf@users.noreply.github.com"
] |
haesleinhuepf@users.noreply.github.com
|
8e6a40aabb5d98acecdf713ba9a997923ae08b27
|
7bf617f77a55d8ec23fa8156c1380b563a5ac7f6
|
/CG/SciPy/mm_color_cluster.py
|
c069d4d292db408ca47cdbeff36617ac590abb43
|
[] |
no_license
|
anyatran/school
|
c06da0e08b148e3d93aec0e76329579bddaa85d5
|
24bcfd75f4a6fe9595d790808f8fca4f9bf6c7ec
|
refs/heads/master
| 2021-06-17T10:45:47.648361
| 2017-05-26T12:57:23
| 2017-05-26T12:57:23
| 92,509,148
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,066
|
py
|
# Auto-clustering, suggested by Matt Terry
from skimage import io, color, exposure
from sklearn import cluster, preprocessing
import numpy as np
import matplotlib.pyplot as plt
url = 'http://blogs.mathworks.com/images/steve/2010/mms.jpg'
import os
if not os.path.exists('mm.png'):
print "Downloading M&M's..."
import urllib2
u = urllib2.urlopen(url)
f = open('mm.png', 'w')
f.write(u.read())
f.close()
print "Image I/O..."
mm = io.imread('mm.png')
mm_lab = color.rgb2lab(mm)
ab = mm_lab[..., 1:]
print "Mini-batch K-means..."
X = ab.reshape(-1, 2)
kmeans = cluster.MiniBatchKMeans(n_clusters=6)
y = kmeans.fit(X).labels_
labels = y.reshape(mm.shape[:2])
N = labels.max()
def no_ticks(ax):
ax.set_xticks([])
ax.set_yticks([])
# Display all clusters
for i in range(N):
mask = (labels == i)
mm_cluster = mm_lab.copy()
mm_cluster[..., 1:][~mask] = 0
ax = plt.subplot2grid((2, N), (1, i))
ax.imshow(color.lab2rgb(mm_cluster))
no_ticks(ax)
ax = plt.subplot2grid((2, N), (0, 0), colspan=2)
ax.imshow(mm)
no_ticks(ax)
# Display histogram
L, a, b = mm_lab.T
left, right = -100, 100
bins = np.arange(left, right)
H, x_edges, y_edges = np.histogram2d(a.flatten(), b.flatten(), bins,
normed=True)
ax = plt.subplot2grid((2, N), (0, 2))
H_bright = exposure.rescale_intensity(H, in_range=(0, 5e-4))
ax.imshow(H_bright,
extent=[left, right, right, left], cmap=plt.cm.gray)
ax.set_title('Histogram')
ax.set_xlabel('b')
ax.set_ylabel('a')
# Voronoi diagram
mid_bins = bins[:-1] + 0.5
L = len(mid_bins)
yy, xx = np.meshgrid(mid_bins, mid_bins)
Z = kmeans.predict(np.column_stack([xx.ravel(), yy.ravel()]))
Z = Z.reshape((L, L))
ax = plt.subplot2grid((2, N), (0, 3))
ax.imshow(Z, interpolation='nearest',
extent=[left, right, right, left],
cmap=plt.cm.Spectral, alpha=0.8)
ax.imshow(H_bright, alpha=0.2,
extent=[left, right, right, left],
cmap=plt.cm.gray)
ax.set_title('Clustered histogram')
no_ticks(ax)
plt.show()
|
[
"panhtran249@gmail.com"
] |
panhtran249@gmail.com
|
e2fa124d83cd3c760b0eff2d53eef09fec49c3aa
|
7e266469a84e06e3551a7ba0dca25e894f2f3111
|
/Bloomy_Core_CreateQualityInspection_TestCase/test_createqualityinspection_testcase.py
|
7f7b7aeeb1520353cfd2c98e5bd56a96ac51aa33
|
[] |
no_license
|
Bloomstack-Test-Automation/Bloomstack-Test-Automation
|
43862b6761951effee5f17d7428f5be0c34b4499
|
2450df2018715cf6f0ec080ca1dc0751a230d969
|
refs/heads/main
| 2023-06-06T10:52:57.695175
| 2021-06-30T11:33:30
| 2021-06-30T11:33:30
| 368,438,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,647
|
py
|
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from src.testproject.classes import DriverStepSettings, StepSettings
from src.testproject.decorator import report_assertion_errors
from src.testproject.enums import SleepTimingType
from src.testproject.sdk.drivers import webdriver
import pytest
"""
This pytest test was automatically generated by TestProject
Project: Bloomy_Core
Package: TestProject.Generated.Tests.BloomyCore
Test: CreateQualityInspection_TestCase
Generated by: Rahul Prakash (rahulprakash0862@gmail.com)
Generated on 05/26/2021, 10:11:04
"""
@pytest.fixture()
def driver():
driver = webdriver.Chrome(token="5o-UXmLZug6gaKmDcoeI6tT7NM19XyG1qnolFybLul4",
project_name="Bloomy_Core",
job_name="CreateQualityInspection_TestCase")
step_settings = StepSettings(timeout=15000,
sleep_time=500,
sleep_timing_type=SleepTimingType.Before)
with DriverStepSettings(driver, step_settings):
yield driver
driver.quit()
@report_assertion_errors
def test_main(driver):
"""Generated By: Rahul."""
# Test Parameters
# Auto generated application URL parameter
ApplicationURL = "https://epitest-demo.bloomstack.io/"
# 1. Navigate to '{ApplicationURL}'
# Navigates the specified URL (Auto-generated)
driver.get(f'{ApplicationURL}')
# 2. Is 'Login' visible?
login = driver.find_element(By.XPATH,
"//a[. = 'Login']")
assert login.is_displayed()
# 3. Click 'Login'
login = driver.find_element(By.XPATH,
"//a[. = 'Login']")
login.click()
# 4. Click 'Email Address'
email_address = driver.find_element(By.CSS_SELECTOR,
"#login_email")
email_address.click()
# 5. Type 'testautomationuser@bloomstack.com' in 'Email Address'
email_address = driver.find_element(By.CSS_SELECTOR,
"#login_email")
email_address.send_keys("testautomationuser@bloomstack.com")
# 6. Click 'Password'
password = driver.find_element(By.CSS_SELECTOR,
"#login_password")
password.click()
# 7. Type 'epi@123' in 'Password'
password = driver.find_element(By.CSS_SELECTOR,
"#login_password")
password.send_keys("epi@123")
# 8. Click 'Login1'
login1 = driver.find_element(By.XPATH,
"//button[. = '\n\t\t\t\tLogin']")
login1.click()
# 9. Click 'Search or type a command (Ctrl + G)'
search_or_type_a_command_ctrl_g_ = driver.find_element(By.CSS_SELECTOR,
"#navbar-search")
search_or_type_a_command_ctrl_g_.click()
# 10. Type 'quality ins' in 'Search or type a command (Ctrl + G)'
search_or_type_a_command_ctrl_g_ = driver.find_element(By.CSS_SELECTOR,
"#navbar-search")
search_or_type_a_command_ctrl_g_.send_keys("quality ins")
# 11. Click 'Quality Inspection List'
quality_inspection_list = driver.find_element(By.XPATH,
"//span[. = 'Quality Inspection List']")
quality_inspection_list.click()
# 12. Does 'Quality Inspection1' contain 'Quality Inspection'?
quality_inspection1 = driver.find_element(By.XPATH,
"//div[. = 'Quality Inspection']")
step_output = quality_inspection1.text
assert step_output and ("Quality Inspection" in step_output)
time.sleep(2)
# 13. Click 'New6'
new6 = driver.find_element(By.XPATH,
"//button[. = 'New']")
new6.click()
# 14. Is 'New Quality Inspection4' visible?
new_quality_inspection4 = driver.find_element(By.XPATH,
"//h4[. = 'New Quality Inspection']")
assert new_quality_inspection4.is_displayed()
# 15. Click 'SELECT19'
select19 = driver.find_element(By.XPATH,
"//div[3]/div/div[2]//select")
select19.click()
# 16. Select the 'Incoming' option in 'SELECT19'
select19 = driver.find_element(By.XPATH,
"//div[3]/div/div[2]//select")
Select(select19).select_by_value("Incoming")
# 17. Click 'SELECT19'
select19 = driver.find_element(By.XPATH,
"//div[3]/div/div[2]//select")
select19.click()
# 18. Click 'INPUT84'
input84 = driver.find_element(By.XPATH,
"//div[4]/div/div[2]//input")
input84.click()
# 19. Click 'P15'
p15 = driver.find_element(By.XPATH,
"//div/div/div/ul/li[1]/a/p")
p15.click()
# 20. Click 'INPUT12'
input12 = driver.find_element(By.XPATH,
"//div[5]/div/div[2]//input")
input12.click()
# 21. Type '3.00' in 'INPUT12'
input12 = driver.find_element(By.XPATH,
"//div[5]/div/div[2]//input")
input12.send_keys("3.00")
# 22. Click 'SELECT2'
select2 = driver.find_element(By.XPATH,
"//div[7]//select")
select2.click()
# 23. Select the 'Internal' option in 'SELECT2'
select2 = driver.find_element(By.XPATH,
"//div[7]//select")
Select(select2).select_by_value("Internal")
# 24. Click 'SELECT2'
select2 = driver.find_element(By.XPATH,
"//div[7]//select")
select2.click()
# 25. Click 'Save12'
save12 = driver.find_element(By.XPATH,
"//button[. = 'Save']")
save12.click()
# 26. Click 'Submit7'
submit7 = driver.find_element(By.XPATH,
"//button[. = 'Submit']")
submit7.click()
# 27. Click 'Settings1'
settings1 = driver.find_element(By.XPATH,
"//span[. = ' Settings']")
settings1.click()
# 28. Click 'Logout'
logout = driver.find_element(By.XPATH,
"//a[. = ' Logout']")
logout.click()
# 29. Does 'Login' contain 'Login'?
login = driver.find_element(By.XPATH,
"//a[. = 'Login']")
step_output = login.text
assert step_output and ("Login" in step_output)
|
[
"noreply@github.com"
] |
Bloomstack-Test-Automation.noreply@github.com
|
b9ad5d3f538a6de721c9603acde868d0da3788d0
|
bc167f434158921bcf2c678155c5cdfec1c9b0c9
|
/PI_code/simulator/behaviourGeneration/firstGenScripts_preyHunter/behav388.py
|
2b4f02b7722c2854864b1dddca35aacd975e7d93
|
[] |
no_license
|
s0217391/DifferentProjects
|
6450efc89c64ecd21b86c705737e89e5c69433a6
|
7f4da153660817b6cbf72d2e823aa29c0c2f95a9
|
refs/heads/master
| 2021-01-17T02:58:46.219240
| 2015-05-26T22:45:46
| 2015-05-26T22:45:46
| 34,995,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
#!/usr/bin/python
import sys
def compute(prey):
temp0 = prey[0] + prey[1]
if temp0 > prey[0]:
temp1 = min(temp0, temp0)
else:
if prey[1] != 0:
temp1 = prey[1] % prey[1]
else:
temp1 = prey[1]
temp0 = max(prey[0], temp0)
temp0 = temp0 - temp1
if prey[1] != 0:
temp2 = prey[0] / prey[1]
else:
temp2 = prey[1]
temp0 = prey[0] - prey[0]
temp1 = min(temp1, temp0)
temp0 = min(prey[1], prey[0])
temp3 = min(prey[0], temp0)
temp0 = min(prey[0], temp2)
temp0 = prey[1] + temp1
if prey[0] > temp1:
temp2 = prey[0] + temp0
else:
temp2 = -1 * prey[1]
if temp2 != 0:
temp2 = prey[0] / temp2
else:
temp2 = temp2
return [temp1, temp1]
|
[
"i7674211@bournemouth.ac.uk"
] |
i7674211@bournemouth.ac.uk
|
721c16210b081c6ce406706a8bf7b814db33d02e
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayDataAiserviceHellobikeSiteQueryModel.py
|
dc06ebbcb58713b563f9fd0994a388c9ab3da002
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 896
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayDataAiserviceHellobikeSiteQueryModel(object):
def __init__(self):
self._plan_id = None
@property
def plan_id(self):
return self._plan_id
@plan_id.setter
def plan_id(self, value):
self._plan_id = value
def to_alipay_dict(self):
params = dict()
if self.plan_id:
if hasattr(self.plan_id, 'to_alipay_dict'):
params['plan_id'] = self.plan_id.to_alipay_dict()
else:
params['plan_id'] = self.plan_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayDataAiserviceHellobikeSiteQueryModel()
if 'plan_id' in d:
o.plan_id = d['plan_id']
return o
|
[
"jiandong.jd@antfin.com"
] |
jiandong.jd@antfin.com
|
92aaa9f2c0851bde5ed7572fb8b8c62845c4c814
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/R4D59C9CQbJvqWaKd_6.py
|
ed52bb6e52badf15ab27956a07eb2844ef6a368d
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,240
|
py
|
"""
A baseball player's batting average is calculated by the following formula:
BA = (number of hits) / (number of official at-bats)
Batting averages are always expressed rounded to the nearest thousandth with
no leading zero. The top 3 MLB batting averages of all-time are:
1. Ty Cobb .366
2. Rogers Hornsby .358
3. Shoeless Joe Jackson .356
The given list represents a season of games. Each list item indicates a
player's `[hits, official at bats]` per game. Return a string with the
player's seasonal batting average rounded to the nearest thousandth.
### Examples
batting_avg([[0, 0], [1, 3], [2, 2], [0, 4], [1, 5]]) ➞ ".286"
batting_avg([[2, 5], [2, 3], [0, 3], [1, 5], [2, 4]]) ➞ ".350"
batting_avg([[2, 3], [1, 5], [2, 4], [1, 5], [0, 5]]) ➞ ".273"
### Notes
* The number of hits will not exceed the number of official at-bats.
* The list includes official at-bats only. No other plate-appearances (walks, hit-by-pitches, sacrifices, etc.) are included in the list.
* HINT: Think in terms of total hits and total at-bats.
"""
def batting_avg(lst):
x = str(round(sum(i[0] for i in lst)/sum(i[1] for i in lst),3))[1:]
if len(x) != 4:
x += '0'*(4 - len(x))
return x
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
861619f37d3f45ca55feb13d85f1c0ec4990fcef
|
52a3beeb07ad326115084a47a9e698efbaec054b
|
/horizon/.venv/bin/pyscss
|
80baac9d659e74f232c739e2139c1b9408819faa
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/sample_scripts
|
3dade0710ecdc8f9251dc60164747830f8de6877
|
f9edce63c0a4d636f672702153662bd77bfd400d
|
refs/heads/master
| 2022-11-17T19:19:34.210886
| 2018-06-11T04:14:27
| 2018-06-11T04:14:27
| 282,088,840
| 0
| 0
| null | 2020-07-24T00:57:31
| 2020-07-24T00:57:31
| null |
UTF-8
|
Python
| false
| false
| 319
|
#!/home/horizon/horizon/.venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pyScss==1.3.5','console_scripts','pyscss'
__requires__ = 'pyScss==1.3.5'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('pyScss==1.3.5', 'console_scripts', 'pyscss')()
)
|
[
"Suhaib.Chishti@exponential.com"
] |
Suhaib.Chishti@exponential.com
|
|
df91c9a9b9937a18b50fc7a7be16c73b905500d8
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_true.py
|
ada6ea76ff35da675f31a995503e77327d2954a1
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
#calss header
class _TRUE():
def __init__(self,):
self.name = "TRUE"
self.definitions = [u'to not be in the correct position or to be slightly bent out of the correct shape: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
7d9b8ff5c86e7a469c4e54991a98f844dbd57066
|
e4cab6feadcee618f092f23020a157c8ded42ffc
|
/Basics/Matrix/homework.py
|
524046cb9ab52c37bb822c2aedc925bed9786d01
|
[] |
no_license
|
Larionov0/Group3_Lessons
|
7c314898a70c61aa445db37383076e211692b56b
|
628bc7efe6817d107cb39d3017cb7cee44b86ba4
|
refs/heads/master
| 2023-08-22T07:14:44.595963
| 2021-10-17T11:48:06
| 2021-10-17T11:48:06
| 339,141,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]
]
while True:
i = int(input('enter number from 0 to 4: i = '))
j = int(input('enter number from 0 to 4: j = '))
print(matrix[i][j])
|
[
"larionov1001@gmail.com"
] |
larionov1001@gmail.com
|
4aa619c5f0da271cf82f1c1c1edb77fb610b3181
|
4b17d98ad2a3ef018cfb33f7f1d645ede72eb808
|
/models.py
|
317ff17f52dc2e3d03d3556e07facbc26924d19b
|
[
"MIT"
] |
permissive
|
poshan0126/Facial-Keypoint-Detection
|
932ce0b85d7b1b0b893376537a5cf7c148704ee7
|
fc52574b4c006e3afd86f209369e1a3e704a65fa
|
refs/heads/master
| 2020-09-02T19:43:20.650541
| 2019-11-03T11:53:30
| 2019-11-03T11:53:30
| 219,292,492
| 0
| 0
|
MIT
| 2020-01-19T09:34:06
| 2019-11-03T11:47:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,030
|
py
|
## TODO: define the convolutional neural network architecture
num_output = 136 # As it's suggest final linear layer have to output 136 values, 2 for each of the 68 keypoint (x,y) pairs.
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
## TODO: Define all the layers of this CNN, the only requirements are:
## 1. This network takes in a square (same width and height), grayscale image as input
## 2. It ends with a linear layer that represents the keypoints
## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs
# As an example, you've been given a convolutional layer, which you may (but don't have to) change:
## Note that among the layers to add, consider including:
# maxpooling layers, multiple conv layers, fully-connected layers, and other layers (such as dropout or batch normalization) to avoid overfitting
## Define layers of a CNN
## 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
self.conv1 = nn.Conv2d(1, 32, kernel_size=(3,3), stride=1, padding=1)
self.conv2 = nn.Conv2d(32, 32, kernel_size=(3,3), stride=1, padding=1)
self.conv3 = nn.Conv2d(32, 64, kernel_size=(3,3), stride=1, padding=1)
self.conv4 = nn.Conv2d(64, 64, kernel_size=(3,3), stride=1, padding=1)
self.conv5 = nn.Conv2d(64, 128, kernel_size=(3,3), stride=1, padding=1)
self.pool = nn.MaxPool2d(kernel_size=(2,2), stride=2)
# Output of convulation layer would have height and width of 3 and depth of 128
self.fc1 = nn.Linear(28*28*128, num_output)
#self.fc2 = nn.Linear(10000, num_output)
self.dropout = nn.Dropout(0.5)
def forward(self, x):
#print("Enters Forward")
## TODO: Define the feedforward behavior of this model
## x is the input image and, as an example, here you may choose to include a pool/conv step:
## x = self.pool(F.relu(self.conv1(x)))
# a modified x, having gone through all the layers of your model, should be returned
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = self.pool(x)
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = self.pool(x)
# flatten the input image
#x = x.view(x.size(0), -1) same as x.view(-1, 28x28x128)
x = x.view(-1, 28*28*128)
# First hidden layer
x = F.relu(self.fc1(x))
x = self.dropout(x)
#x = self.fc2(x)
#print(x.shape)
#print("Forwarded")
return x
|
[
"aakrist666@gmail.com"
] |
aakrist666@gmail.com
|
c11ee44633ac855b635d80da88d711d373e23c60
|
59886a1143cc4043b19e398fae1fddb5742b4b55
|
/src/main/python/rlbot/agents/base_java_agent.py
|
558144395485290f687591f9f3c43416c417fb28
|
[
"MIT"
] |
permissive
|
RLBot/RLBot
|
a6c4f502403f02822b3e4078b27583226584432e
|
c2f7c9a07911691b112b5338008e2ec932e7aee0
|
refs/heads/master
| 2023-08-16T06:04:35.384448
| 2023-07-01T11:21:26
| 2023-07-01T11:21:26
| 80,671,678
| 482
| 138
|
MIT
| 2023-07-01T11:21:28
| 2017-02-01T22:36:52
|
Python
|
UTF-8
|
Python
| false
| false
| 542
|
py
|
from rlbot.agents.base_independent_agent import BaseIndependentAgent
class BaseJavaAgent(BaseIndependentAgent):
def __init__(self, name, team, index):
super().__init__(name, team, index)
raise NotImplementedError(
f"Cannot run {name} because BaseJavaAgent is deprecated! "
f"Please migrate to ExecutableWithSocketAgent! For more details see "
f"https://github.com/RLBot/RLBotJavaExample/wiki/Py4j-Deprecation")
def run_independently(self, terminate_request_event):
pass
|
[
"noreply@github.com"
] |
RLBot.noreply@github.com
|
795c71f40d7e4b7b4ba2a1d84f255eb7b5f64b2d
|
faa965776fb422437332440a169d9980437e4fce
|
/text/cleaners.py
|
b2c8c9d1e2e3a65a3eb3e110beec2fb2eb299138
|
[] |
no_license
|
IMLHF/lpc-tracotron
|
752ac707568098c870bf5db107dc9d184a7f853d
|
5994f84bf828afe11da845fb5153080f673a653e
|
refs/heads/master
| 2020-07-02T16:50:18.803338
| 2019-09-03T03:20:41
| 2019-09-03T03:20:41
| 201,594,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,398
|
py
|
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from unidecode import unidecode
from .numbers import normalize_numbers
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecode(text)
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
# text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
#text = convert_to_ascii(text)
# text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, including number and abbreviation expansion.'''
#text = convert_to_ascii(text)
# text = lowercase(text)
#text = expand_numbers(text)
#text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
|
[
"red_wind@foxmail.com"
] |
red_wind@foxmail.com
|
fec187d97af48673db9a3cd1cb57dbaa81a53c2d
|
28a462a28f443c285ca5efec181ebe36b147c167
|
/tests/compile/basic/es2020/DeclarativeEnvironmentRecord.CreateImmutableBinding.spec
|
32137e54076b90b69a23ec493062ece2b21f0272
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
kaist-plrg/jstar
|
63e71f9156860dc21cccc33a9f6c638dfee448ea
|
1282919127ea18a7e40c7a55e63a1ddaaf7d9db4
|
refs/heads/main
| 2022-07-22T08:12:34.947712
| 2022-02-27T04:19:33
| 2022-02-27T11:06:14
| 384,045,526
| 6
| 4
|
NOASSERTION
| 2022-02-27T11:05:26
| 2021-07-08T07:53:21
|
Python
|
UTF-8
|
Python
| false
| false
| 398
|
spec
|
1. Let _envRec_ be the declarative Environment Record for which the method was invoked.
1. Assert: _envRec_ does not already have a binding for _N_.
1. Create an immutable binding in _envRec_ for _N_ and record that it is uninitialized. If _S_ is *true*, record that the newly created binding is a strict binding.
1. Return NormalCompletion(~empty~).
|
[
"h2oche22@gmail.com"
] |
h2oche22@gmail.com
|
922afb74fdeb65bf3a731c7e2f814a52234e3f75
|
8fd07ea363ba4263bafe25d213c72cc9a93e2b3e
|
/devops/Day1_fork_thread/Thread/5.凑够一定数量才能继续执行.py
|
d73b1743ccd66793d4ab5dc684274cdd8d96cd03
|
[] |
no_license
|
ml758392/python_tedu
|
82e12ae014f0fc81230386fab07f901510fc8837
|
9f20798604db0ac8cd7b69d8c7a52ee361ebc7a7
|
refs/heads/master
| 2020-04-12T08:30:42.354663
| 2019-03-29T11:55:30
| 2019-03-29T11:55:30
| 162,386,878
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
# -*-coding:utf-8-*-
import threading
import time
bar = threading.Barrier(6)
def run():
print('%s---start' % threading.current_thread().name)
time.sleep(1)
bar.wait()
print('%s---end' % threading.current_thread().name)
if __name__ == '__main__':
for i in range(5):
threading.Thread(target=run).start()
|
[
"yy.tedu.cn"
] |
yy.tedu.cn
|
59f0627ece60217800e3c91abd0f3269841b99de
|
a3354726b126b85987a1455bd4b1ed0a4d05f5bb
|
/apps/posts/templatetags/urlify.py
|
dbcef20ecbbcf46d92a98266c40fc00add8b6040
|
[] |
no_license
|
RonaldTheodoro/django-blog
|
cea90ab619e69560013a995c8d67d65e4593e0a9
|
92b64aa93c495fef835e64a98c9619cba3f518c4
|
refs/heads/master
| 2020-04-01T06:05:08.492523
| 2018-10-20T15:28:53
| 2018-10-20T15:28:53
| 152,932,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
from urllib.parse import quote_plus
from django import template
register = template.Library()
@register.filter
def urlify(value):
return quote_plus(value)
|
[
"ronald.silva4@fatec.sp.gov.br"
] |
ronald.silva4@fatec.sp.gov.br
|
31642fa7ef14844e7529c37bd4b42f313d0a69bc
|
32f7392217c50e1ee5a41db0414cbd6ca2427753
|
/Tencent2020/txbase/emb.py
|
2bf25c681898d4882c0639e0dc5cc6a532c10b48
|
[] |
no_license
|
Stella2019/KDD2020
|
0f315cd14c26bbcedc69b3982ca58d848d5d4a13
|
2604208d8bcac47ef097e6469633430637149b31
|
refs/heads/main
| 2023-07-02T02:22:07.707798
| 2021-08-14T06:15:04
| 2021-08-14T06:15:04
| 395,909,304
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,577
|
py
|
from . import Cache
import numpy as np
class EmbBatchLoader:
def __init__(self,
all_emb_cols,
emb_base_dir=None,
key2index=None,
outer_emb=False):
"""
outer_emb: 设置该参数为True,如果是导入外部的embedding,
会造成key2index和word_emb_dict对不齐,词表不一样。
默认会认为用最大的值来填充低频词。
"""
self.all_emb_cols = all_emb_cols
self.all_emb_cols_backup = all_emb_cols
self.emb_base_dir = emb_base_dir
self.key2index = key2index
self.outer_emb = outer_emb
def _get_max_index(self, word_emb_dict):
'''
原先功能定位oov = max_index 后改为 =-1 此函数弃用
:param word_emb_dict:
:return:
'''
return str(sorted(map(int, list(word_emb_dict.keys())))[-1])
def get_emb_matrix(self, word_emb_dict, key2index_col): # modify by zlh
"""
prepare embedding for NN
initializing the embedding... id => emb vectors
the id is your own label encoding mapping...which stored in the self.key2index[col]
"""
if self.outer_emb:
# self._get_max_index(word_emb_dict) # 阿郑的是“max“为低频词
key_to_represent_rare = '-1'
else:
key_to_represent_rare = '-1' # 我的是”-1“为低频词
for _, k in word_emb_dict.items():
break
emb_size = k.shape[0]
voc_size = len(key2index_col)
# 真实的词表,编码始于1,你准备input sequence的时候编码的词表,GetSeqFeas.ipynb
# 100个词,编码就是1-100,所以初始化要+1
emb_matrix = np.zeros((voc_size + 1, emb_size)) # 0如何优化,mean?
# emb 中必须要有'-1'
if '-1' not in word_emb_dict.keys():
# emb中无-1 为全词表数据!需要自行计算均值emb vec
# 为embi 添加一个embedding
# 求词表与embi key的差
set_drop_words = list(
set(word_emb_dict.keys()).difference(set(
key2index_col.keys())))
if len(set_drop_words) > 0:
# 这些词的vector求均值作为这个oov词的embedding vector
vector_low_frequency_words = np.zeros((emb_size, ))
for w in set_drop_words:
vector_low_frequency_words += word_emb_dict[w]
vector_low_frequency_words = vector_low_frequency_words / len(
set_drop_words)
# emb添加一个key value
word_emb_dict['-1'] = vector_low_frequency_words
print(' file has ' + str(len(set_drop_words)) + \
' low frequency words and fill vector as:', vector_low_frequency_words)
for k, idx in key2index_col.items():
try:
emb_matrix[idx, :] = word_emb_dict[k]
except KeyError: # 如果k不在不在word_emb_dict中,则默认用max_key_to_represent_rare填充
# print('find oov:',(k, idx))
emb_matrix[idx, :] = word_emb_dict[key_to_represent_rare]
emb_matrix = np.float32(emb_matrix)
return emb_matrix
def load_batch_embedding(self, emb_base_name, pure_nm):
"""
批量导入embedding,目前对于一组embedding就self.all_emb_cols个变量
"""
emb_dict = {}
for col in self.all_emb_cols:
file_nm = F'{emb_base_name}_{col}'
try:
emb_dict[col] = Cache.reload_cache(
file_nm=file_nm,
pure_nm=pure_nm,
base_dir=self.emb_base_dir)['word_emb_dict']
except FileNotFoundError as e:
print("[Error]" + " = =" * 30)
print("ErrorMessage: ", e)
print("col: ", col)
print("file_nm:", file_nm)
print("[Error]" + " = =" * 30)
print(f"Raw self.all_emb_cols: {self.all_emb_cols}")
self.all_emb_cols = list(emb_dict.keys())
print(f"Updated self.all_emb_cols: {self.all_emb_cols}")
return emb_dict
def load_emb_dict_with_raw_embs(self,
marker=None,
emb_base_name=None,
sentence_id='user_id',
pure_nm=True):
if emb_base_name is None:
if marker is None:
raise ValueError(
"marker can't be None if emb_base_name is None!!")
else:
if marker.endswith("_advertiser_id") or marker.endswith(
"_user_id"):
# marker中包括了sentence_id,目前sentence_id只有_advertiser_id和_user_id
emb_base_name = F'EMB_DICT_{marker}'
else:
# marker中不包括sentence_id,需要添加
emb_base_name = F'EMB_DICT_{marker}_{sentence_id}'
else:
emb_base_name = emb_base_name.rstrip('_') # 对于一组embedding一致的名称
emb_dict_with_raw_embs = self.load_batch_embedding(
emb_base_name, pure_nm)
return emb_dict_with_raw_embs
def get_batch_emb_matrix(self,
marker=None,
emb_base_name=None,
sentence_id='user_id',
pure_nm=True):
emb_dict_with_raw_embs = self.load_emb_dict_with_raw_embs(
marker=marker,
emb_base_name=emb_base_name,
sentence_id=sentence_id,
pure_nm=pure_nm)
emb_matrix_ready_dict = {}
for col in self.all_emb_cols:
emb_matrix_ready_dict[col] = self.get_emb_matrix(
emb_dict_with_raw_embs[col], key2index_col=self.key2index[col])
print("-" * 6)
print("Done!")
# restore all_emb_cols to all_emb_cols_backup
self.all_emb_cols = self.all_emb_cols_backup
return emb_matrix_ready_dict
def get_batch_emb_matrix_by_absolute_path(self,
absolute_path_with_placeholder):
emb_matrix_ready_dict = {}
for col in self.all_emb_cols:
path = absolute_path_with_placeholder.format(col)
try:
i_raw_embs = Cache.reload_cache(
file_nm=path, base_dir=self.emb_base_dir)['word_emb_dict']
emb_matrix_ready_dict[col] = self.get_emb_matrix(
i_raw_embs, key2index_col=self.key2index[col])
except FileNotFoundError as e:
print("[Error]" + " = =" * 30)
print("ErrorMessage: ", e)
print("col: ", col)
print("file_nm:", path)
print("[Error]" + " = =" * 30)
print(f"Raw self.all_emb_cols: {self.all_emb_cols}")
self.all_emb_cols = list(emb_matrix_ready_dict.keys())
print(f"Updated self.all_emb_cols: {self.all_emb_cols}")
print("-" * 6)
print("Done!")
# restore all_emb_cols to all_emb_cols_backup
self.all_emb_cols = self.all_emb_cols_backup
return emb_matrix_ready_dict
|
[
"noreply@github.com"
] |
Stella2019.noreply@github.com
|
1a9627a465aa1f53187fe367e69589eff0cf6a31
|
a59d1faced9fe7348ca7143d2a8643e0ebad2132
|
/pyvisdk/do/application_quiesce_fault.py
|
cf32f58e3304afc4abc1a5f0d39e25a272834537
|
[
"MIT"
] |
permissive
|
Infinidat/pyvisdk
|
c55d0e363131a8f35d2b0e6faa3294c191dba964
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
refs/heads/master
| 2023-05-27T08:19:12.439645
| 2014-07-20T11:49:16
| 2014-07-20T11:49:16
| 4,072,898
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,288
|
py
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def ApplicationQuiesceFault(vim, *args, **kwargs):
'''This fault is thrown when creating a quiesced snapshot failed because the
(user-supplied) custom pre-freeze script in the virtual machine exited with a
non-zero return code.This indicates that the script failed to perform its
quiescing task, which causes us to fail the quiesced snapshot operation.'''
obj = vim.client.factory.create('{urn:vim25}ApplicationQuiesceFault')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'dynamicProperty', 'dynamicType', 'faultCause', 'faultMessage' ]
optional = [ ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
[
"guy@rzn.co.il"
] |
guy@rzn.co.il
|
63f33a87835b8770a6f52247450c589c170448cc
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/kusto/v20210827/get_data_connection.py
|
10c402e25115ff6f6d20ff26cd07edd7efef74c5
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,654
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetDataConnectionResult',
'AwaitableGetDataConnectionResult',
'get_data_connection',
'get_data_connection_output',
]
warnings.warn("""Please use one of the variants: EventGridDataConnection, EventHubDataConnection, IotHubDataConnection.""", DeprecationWarning)
@pulumi.output_type
class GetDataConnectionResult:
"""
Class representing an data connection.
"""
def __init__(__self__, id=None, kind=None, location=None, name=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
Kind of the endpoint for the data connection
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetDataConnectionResult(GetDataConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDataConnectionResult(
id=self.id,
kind=self.kind,
location=self.location,
name=self.name,
type=self.type)
def get_data_connection(cluster_name: Optional[str] = None,
data_connection_name: Optional[str] = None,
database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDataConnectionResult:
"""
Class representing an data connection.
:param str cluster_name: The name of the Kusto cluster.
:param str data_connection_name: The name of the data connection.
:param str database_name: The name of the database in the Kusto cluster.
:param str resource_group_name: The name of the resource group containing the Kusto cluster.
"""
pulumi.log.warn("""get_data_connection is deprecated: Please use one of the variants: EventGridDataConnection, EventHubDataConnection, IotHubDataConnection.""")
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['dataConnectionName'] = data_connection_name
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:kusto/v20210827:getDataConnection', __args__, opts=opts, typ=GetDataConnectionResult).value
return AwaitableGetDataConnectionResult(
id=__ret__.id,
kind=__ret__.kind,
location=__ret__.location,
name=__ret__.name,
type=__ret__.type)
@_utilities.lift_output_func(get_data_connection)
def get_data_connection_output(cluster_name: Optional[pulumi.Input[str]] = None,
data_connection_name: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDataConnectionResult]:
"""
Class representing an data connection.
:param str cluster_name: The name of the Kusto cluster.
:param str data_connection_name: The name of the data connection.
:param str database_name: The name of the database in the Kusto cluster.
:param str resource_group_name: The name of the resource group containing the Kusto cluster.
"""
pulumi.log.warn("""get_data_connection is deprecated: Please use one of the variants: EventGridDataConnection, EventHubDataConnection, IotHubDataConnection.""")
...
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
4b8eff8148ed0ac19a6ac1759eb66417d0b8a4a0
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part010263.py
|
46c4f1fd19764edd4977285229bf635d77cfbf13
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,300
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher51281(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i3.2.1.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i3.2.1.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher51281._instance is None:
CommutativeMatcher51281._instance = CommutativeMatcher51281()
return CommutativeMatcher51281._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 51280
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
4c98e08132aeae3e18d23763c7ba5bf9f7915f22
|
3970706a16be81a63b2476222c1b061da9f11b70
|
/estimator/download_data.py
|
4cf480e781e68353a149d1325da327b6ec2ae348
|
[] |
no_license
|
sfujiwara/tensorflow-examples
|
3de3fb90c6204bec2c455f8f1b9aa98a14f393b9
|
6b9dd3ba27e1b0d021c322f5504e888b6b7ed4fb
|
refs/heads/master
| 2023-04-18T11:33:43.271751
| 2020-12-17T20:49:57
| 2020-12-17T20:49:57
| 126,787,804
| 1
| 0
| null | 2023-03-25T00:25:33
| 2018-03-26T07:06:44
|
Python
|
UTF-8
|
Python
| false
| false
| 426
|
py
|
import argparse
import tensorflow_datasets as tfds
parser = argparse.ArgumentParser()
parser.add_argument('--tfds_dir', type=str)
parser.add_argument('--dataset_name', type=str)
args = parser.parse_args()
TFDS_DIR = args.tfds_dir
DATASET_NAME = args.dataset_name
def main():
builder = tfds.builder(DATASET_NAME, data_dir=TFDS_DIR)
builder.download_and_prepare()
return
if __name__ == '__main__':
main()
|
[
"shuhei.fujiwara@gmail.com"
] |
shuhei.fujiwara@gmail.com
|
a015fd2835d1017c32b4f5d5ad8ec3e72eb99d16
|
a78f0d96c33d8e3399bffa85ffba5c8e598e8492
|
/Array/55_sort_wave.py
|
94cb0b44a844fba62865e284c91e58d6ea58cb23
|
[] |
no_license
|
ANKITPODDER2000/data-structure
|
78203fabf9ea7ef580d41d4d44cbff1e6c9f397d
|
3c1542562e74c0888718273e16206a755b193d4e
|
refs/heads/main
| 2023-02-04T15:40:21.017573
| 2020-12-31T10:45:18
| 2020-12-31T10:45:18
| 325,778,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
from get_array_helper import take_array_user
def sort_wave(arr , n):
for i in range(0 , n , 2):
if arr[i]<arr[i-1] and i > 0:
arr[i] , arr[i-1] = arr[i-1] , arr[i]
if arr[i]<arr[i+1] and i<n-1:
arr[i] , arr[i+1] = arr[i+1] , arr[i]
arr , n = take_array_user()
print("Sorting in wave form .....")
sort_wave(arr , n)
print("Sorting done .....")
print("Array after sorting in wave form : ",arr)
|
[
"ankitpodder0211@gmail.com"
] |
ankitpodder0211@gmail.com
|
98cb85d1402933244f795a346bdc4fd0313236fe
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/scrapy_scrapy/scrapy-master/scrapy/commands/startproject.py
|
5941066326a89f8907da69a7681f54c726320d4d
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,863
|
py
|
from __future__ import print_function
import re
import os
import string
from importlib import import_module
from os.path import join, exists, abspath
from shutil import ignore_patterns, move, copy2, copystat
import scrapy
from scrapy.commands import ScrapyCommand
from scrapy.utils.template import render_templatefile, string_camelcase
from scrapy.exceptions import UsageError
TEMPLATES_TO_RENDER = (
('scrapy.cfg',),
('${project_name}', 'settings.py.tmpl'),
('${project_name}', 'items.py.tmpl'),
('${project_name}', 'pipelines.py.tmpl'),
('${project_name}', 'middlewares.py.tmpl'),
)
IGNORE = ignore_patterns('*.pyc', '.svn')
class Command(ScrapyCommand):
requires_project = False
default_settings = {'LOG_ENABLED': False}
def syntax(self):
return "<project_name> [project_dir]"
def short_desc(self):
return "Create new project"
def _is_valid_name(self, project_name):
def _module_exists(module_name):
try:
import_module(module_name)
return True
except ImportError:
return False
if not re.search(r'^[_a-zA-Z]\w*$', project_name):
print('Error: Project names must begin with a letter and contain'\
' only\nletters, numbers and underscores')
elif _module_exists(project_name):
print('Error: Module %r already exists' % project_name)
else:
return True
return False
def _copytree(self, src, dst):
"""
Since the original function always creates the directory, to resolve
the issue a new function had to be created. It's a simple copy and
was reduced for this case.
More info at:
https://github.com/scrapy/scrapy/pull/2005
"""
ignore = IGNORE
names = os.listdir(src)
ignored_names = ignore(src, names)
if not os.path.exists(dst):
os.makedirs(dst)
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if os.path.isdir(srcname):
self._copytree(srcname, dstname)
else:
copy2(srcname, dstname)
copystat(src, dst)
def run(self, args, opts):
if len(args) not in (1, 2):
raise UsageError()
project_name = args[0]
project_dir = args[0]
if len(args) == 2:
project_dir = args[1]
if exists(join(project_dir, 'scrapy.cfg')):
self.exitcode = 1
print('Error: scrapy.cfg already exists in %s' % abspath(project_dir))
return
if not self._is_valid_name(project_name):
self.exitcode = 1
return
self._copytree(self.templates_dir, abspath(project_dir))
move(join(project_dir, 'module'), join(project_dir, project_name))
for paths in TEMPLATES_TO_RENDER:
path = join(*paths)
tplfile = join(project_dir,
string.Template(path).substitute(project_name=project_name))
render_templatefile(tplfile, project_name=project_name,
ProjectName=string_camelcase(project_name))
print("New Scrapy project %r, using template directory %r, created in:" % \
(project_name, self.templates_dir))
print(" %s\n" % abspath(project_dir))
print("You can start your first spider with:")
print(" cd %s" % project_dir)
print(" scrapy genspider example example.com")
@property
def templates_dir(self):
_templates_base_dir = self.settings['TEMPLATES_DIR'] or \
join(scrapy.__path__[0], 'templates')
return join(_templates_base_dir, 'project')
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
8e3433cc468d8d0c729fe477b522903a60d3acd2
|
e27333261b8e579564016c71d2061cc33972a8b8
|
/.history/api/UnigramLanguageModelImplementation_20210809170904.py
|
6ea53ba81f55833e5414f6e86eea471894cdaf2c
|
[] |
no_license
|
Dustyik/NewsTweet_InformationRetrieval
|
882e63dd20bc9101cbf48afa6c3302febf1989b1
|
d9a6d92b51c288f5bcd21ea1cc54772910fa58f7
|
refs/heads/master
| 2023-07-01T09:12:53.215563
| 2021-08-12T08:28:33
| 2021-08-12T08:28:33
| 382,780,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,153
|
py
|
import math
from IPython.display import display
import sys
from BM25implementation import QueryParsers
ALPHA = 0.75
NORMALIZE_PROBABILITY = True
class UnigramLanguageModel:
def __init__(self, tweets_data): #tweets is a pandas dataframe
self.tweets_data = tweets_data
self.wordsCollectionFrequencyDictionary = self.create_words_frequency_dict(tweets_data)
def create_words_frequency_dict(self, tweets_data, collection = True):
word_frequency_dictionary = {}
if collection:
tweets = tweets_data.clean_text.tolist()
for sentence in tweets:
sentence_list = list(sentence.split(" "))
for word in sentence_list:
if word in word_frequency_dictionary:
word_frequency_dictionary[word] += 1
else:
word_frequency_dictionary[word] = 1
else:
for word in tweets_data:
if word in word_frequency_dictionary:
word_frequency_dictionary[word] += 1
else:
word_frequency_dictionary[word] = 1
return word_frequency_dictionary
def calculate_total_no_of_words(self, wordsCollectionFrequencyDictionary):
values = wordsCollectionFrequencyDictionary.values()
total = sum(values)
return total
def calculate_unigram_probability(self, word: str, wordCollectionFrequencyDictionary):
totalNumberOfWords = self.calculate_total_no_of_words(wordCollectionFrequencyDictionary)
try:
value = wordCollectionFrequencyDictionary[word]/totalNumberOfWords
except KeyError as ke:
value = 1/totalNumberOfWords #add one smoothing for documents
print (word)
print (wordCollectionFrequencyDictionary)
print (value, totalNumberOfWords)
return value
def calculate_interpolated_sentence_probability(self, querySentence:list, document, alpha=ALPHA, normalize_probability=NORMALIZE_PROBABILITY):
total_score = 1
list_of_strings = list(document.split(" "))
print (list_of_strings)
documentWordFrequencyDictionary = self.create_words_frequency_dict(list_of_strings, collection = False)
for word in querySentence:
score_of_word = alpha*(self.calculate_unigram_probability(word, documentWordFrequencyDictionary)) + (1 - alpha)*(self.calculate_unigram_probability(word, self.wordsCollectionFrequencyDictionary))
total_score *= score_of_word
sys.exit()
if normalize_probability == True:
return total_score
else:
return (math.log(total_score)/math.log(2))
def getQueryLikelihoodModelScore(self, querySentence:list):
querySentenceList = QueryParsers(querySentence).query
self.tweets_data["QueryLikelihoodModelScore"] = self.tweets_data.apply(lambda row: self.calculate_interpolated_sentence_probability(querySentenceList, row.clean_text), axis = 1)
#display(self.tweets_data)
return
|
[
"chiayik_tan@mymail.sutd.edu.sg"
] |
chiayik_tan@mymail.sutd.edu.sg
|
c563ebf7c8f48e07c6f75e980fe4f341bf47c19f
|
25ebc03b92df764ff0a6c70c14c2848a49fe1b0b
|
/daily/20180703/example_resumable/05handler_cli.py
|
391b49df85d96e1dc81fa2dd64d1562ecb57edaa
|
[] |
no_license
|
podhmo/individual-sandbox
|
18db414fafd061568d0d5e993b8f8069867dfcfb
|
cafee43b4cf51a321f4e2c3f9949ac53eece4b15
|
refs/heads/master
| 2023-07-23T07:06:57.944539
| 2023-07-09T11:45:53
| 2023-07-09T11:45:53
| 61,940,197
| 6
| 0
| null | 2022-10-19T05:01:17
| 2016-06-25T11:27:04
|
Python
|
UTF-8
|
Python
| false
| false
| 489
|
py
|
import csv
import sys
def main(args):
yield from run(args.input)
def run(itr):
yield ["x", "x*x"]
for x in itr:
x = int(x)
yield {"x": x, "x*x": x * x}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--input", action="append", default=["1", "2", "3", "4", "5"])
args = parser.parse_args()
itr = main(args)
w = csv.DictWriter(sys.stdout, fieldnames=next(itr))
w.writerows(itr)
|
[
"ababjam61+github@gmail.com"
] |
ababjam61+github@gmail.com
|
e771087dda9f75a0335919a1fb638e8c0f758ab6
|
8fd07ea363ba4263bafe25d213c72cc9a93e2b3e
|
/nsd2018-master/nsd1804/python/day05/u2d.py
|
5bab4f12fb972e1d039dbb0c1b0ab2b1eb7c6dc5
|
[] |
no_license
|
ml758392/python_tedu
|
82e12ae014f0fc81230386fab07f901510fc8837
|
9f20798604db0ac8cd7b69d8c7a52ee361ebc7a7
|
refs/heads/master
| 2020-04-12T08:30:42.354663
| 2019-03-29T11:55:30
| 2019-03-29T11:55:30
| 162,386,878
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
import sys
def unix2dos(fname):
dst_fname = fname + '.txt'
with open(fname) as src_fobj:
with open(dst_fname, 'w') as dst_fobj:
for line in src_fobj:
dst_fobj.write(line.rstrip() + '\r\n')
if __name__ == '__main__':
unix2dos(sys.argv[1])
|
[
"yy.tedu.cn"
] |
yy.tedu.cn
|
e2e24e924dd08430e582554e7321d4125ec6c862
|
c40e84f6ca54fd85fc4f91740f6d35b9e693584a
|
/LeetCode/Python/073 Set Matrix Zeroes.py
|
2626b6ad540243c985a9763a59b8dc676a17801a
|
[] |
no_license
|
arif-hanif/Algorithm
|
8b4d7b7e1c32524558f35bcca2f70b6283b16370
|
84b5be24f7b083b6fab6228a49eb279ab764ccda
|
refs/heads/master
| 2021-01-15T16:42:29.079179
| 2016-09-10T11:32:25
| 2016-09-10T11:32:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
# -*- coding: utf-8 -*-
'''
Set Matrix Zeroes
=================
Given a m x n matrix, if an element is 0, set its entire row and column to 0.
Do it in place.
Follow up:
Did you use extra space?
A straight forward solution using O(mn) space is probably a bad idea.
A simple improvement uses O(m + n) space, but still not the best solution.
Could you devise a constant space solution?
'''
class Solution(object):
'''算法思路:
把有 0 的行和列保存起来,然后遍历把相关的列和行设置为 0 即可
'''
def setZeroes(self, matrix):
if not matrix:
return
rows, cols = set(), set()
for i, row in enumerate(matrix):
for j, v in enumerate(row):
if v == 0:
rows.add(i)
cols.add(j)
for r in rows:
for j in xrange(len(matrix[0])):
matrix[r][j] = 0
for c in cols:
for i in xrange(len(matrix)):
matrix[i][c] = 0
|
[
"shiyanhui66@gmail.com"
] |
shiyanhui66@gmail.com
|
d992dc6e406ab8fbad3aebc90fc1b8a3592c3027
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/sdk/servicebus/azure-servicebus/examples/async_examples/example_queue_send_receive_batch_async.py
|
2ae76d4e5a94a9d9b0c3c20ade55f474df3daa07
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,914
|
py
|
# ------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
import asyncio
import conftest
from azure.servicebus.aio import ServiceBusClient, Message
from azure.servicebus.common.constants import ReceiveSettleMode
async def sample_queue_send_receive_batch_async(sb_config, queue):
client = ServiceBusClient(
service_namespace=sb_config['hostname'],
shared_access_key_name=sb_config['key_name'],
shared_access_key_value=sb_config['access_key'],
debug=True)
queue_client = client.get_queue(queue)
async with queue_client.get_sender() as sender:
for i in range(100):
message = Message("Sample message no. {}".format(i))
await sender.send(message)
await sender.send(Message("shutdown"))
async with queue_client.get_receiver(idle_timeout=1, mode=ReceiveSettleMode.PeekLock, prefetch=10) as receiver:
# Receive list of messages as a batch
batch = await receiver.fetch_next(max_batch_size=10)
await asyncio.gather(*[m.complete() for m in batch])
# Receive messages as a continuous generator
async for message in receiver:
print("Message: {}".format(message))
print("Sequence number: {}".format(message.sequence_number))
await message.complete()
if __name__ == '__main__':
live_config = conftest.get_live_servicebus_config()
queue_name = conftest.create_standard_queue(live_config)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(sample_queue_send_receive_batch_async(live_config, queue_name))
finally:
conftest.cleanup_queue(live_config, queue_name)
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
75aa760a5335cac72dbbcde939f818d0c5ecf3ac
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_366/ch27_2019_03_05_20_56_38_513299.py
|
0e3a0ae523a08345d0ba9fc83d035fa90b50cc99
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
qtde_cigarros = int(input('Quantos cigarros você fuma por dia?'))
qtde_anos = float(input('Há quantos anos você fuma?'))
def tempo_perdido(qtde_cigarros, qtde_anos):
y = qtde_cigarros*365*qtde_anos/144
return y
c = tempo_perdido(qtde_cigarros, qtde_anos)
print(c)
|
[
"you@example.com"
] |
you@example.com
|
35d99c94d8fbf0df2eb3e6cc2c0ef0d44c95e3dd
|
6b3e8b4291c67195ad51e356ba46602a15d5fe38
|
/test_v2/core/test_config.py
|
311cc073a68e5459dfd6c8c248fdf2f4f5fda633
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
csaybar/raster-vision
|
4f5bb1125d4fb3ae5c455db603d8fb749221dd74
|
617ca15f64e3b8a391432306a743f7d0dfff352f
|
refs/heads/master
| 2021-02-26T19:02:53.752971
| 2020-02-27T17:25:31
| 2020-02-27T17:25:31
| 245,547,406
| 2
| 1
|
NOASSERTION
| 2020-03-07T01:24:09
| 2020-03-07T01:24:08
| null |
UTF-8
|
Python
| false
| false
| 3,493
|
py
|
from typing import List
import unittest
import copy
from pydantic.error_wrappers import ValidationError
from rastervision2.pipeline.config import (Config, register_config, build_config,
upgrade_config, Upgrader)
class AConfig(Config):
x: str = 'x'
@register_config('asub1')
class ASub1Config(AConfig):
y: str = 'y'
@register_config('asub2')
class ASub2Config(AConfig):
y: str = 'y'
class BConfig(Config):
x: str = 'x'
class UpgradeC1(Upgrader):
def upgrade(self, cfg_dict):
cfg_dict = copy.deepcopy(cfg_dict)
cfg_dict['x'] = cfg_dict['y']
del cfg_dict['y']
return cfg_dict
@register_config('c', version=1, upgraders=[UpgradeC1()])
class CConfig(Config):
al: List[AConfig]
bl: List[BConfig]
a: AConfig
b: BConfig
x: str = 'x'
class TestConfig(unittest.TestCase):
def test_to_from(self):
cfg = CConfig(
al=[AConfig(), ASub1Config(),
ASub2Config()],
bl=[BConfig()],
a=ASub1Config(),
b=BConfig())
exp_dict = {
'type_hint':
'c',
'version':
1,
'a': {
'type_hint': 'asub1',
'x': 'x',
'y': 'y'
},
'al': [{
'x': 'x'
}, {
'type_hint': 'asub1',
'x': 'x',
'y': 'y'
}, {
'type_hint': 'asub2',
'x': 'x',
'y': 'y'
}],
'b': {
'x': 'x'
},
'bl': [{
'x': 'x'
}],
'x':
'x'
}
self.assertDictEqual(cfg.dict(), exp_dict)
self.assertEqual(build_config(exp_dict), cfg)
def test_no_extras(self):
with self.assertRaises(ValidationError):
BConfig(zz='abc')
def test_upgrade(self):
c_dict_v0 = {
'type_hint':
'c',
'version':
0,
'a': {
'type_hint': 'asub1',
'x': 'x',
'y': 'y'
},
'al': [{
'x': 'x'
}, {
'type_hint': 'asub1',
'x': 'x',
'y': 'y'
}, {
'type_hint': 'asub2',
'x': 'x',
'y': 'y'
}],
'b': {
'x': 'x'
},
'bl': [{
'x': 'x'
}],
'y':
'x'
}
c_dict_v1 = {
'type_hint':
'c',
'version':
1,
'a': {
'type_hint': 'asub1',
'x': 'x',
'y': 'y'
},
'al': [{
'x': 'x'
}, {
'type_hint': 'asub1',
'x': 'x',
'y': 'y'
}, {
'type_hint': 'asub2',
'x': 'x',
'y': 'y'
}],
'b': {
'x': 'x'
},
'bl': [{
'x': 'x'
}],
'x':
'x'
}
upgraded_c_dict = upgrade_config(c_dict_v0)
self.assertDictEqual(upgraded_c_dict, c_dict_v1)
if __name__ == '__main__':
unittest.main()
|
[
"lewfish@gmail.com"
] |
lewfish@gmail.com
|
4825299ada1c314576b5b7d6ef81e6e9a85796e6
|
14c8434f6a4f09b84bc7dae3b6b225e7e13b156d
|
/app/errors.py
|
abd12d1d19f7af2be57ad88f68ab3f628692e411
|
[] |
no_license
|
mingming2513953126/flack
|
07299d5cc62aa4ced0734f2b00db587a24261d69
|
dbc793c0908629ae7fee87250f2e0f4456e76f33
|
refs/heads/master
| 2021-05-10T09:11:24.354831
| 2018-01-25T13:38:02
| 2018-01-25T13:38:02
| 118,917,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
# encoding: utf-8
'''
@author: lileilei
@file: errors.py
@time: 2017/5/22 20:50
'''
from flask import render_template,jsonify,request
|
[
"2513953126@qq.com"
] |
2513953126@qq.com
|
af6f8fa01e3dd3c3a068bcce200fc48515571e7f
|
c237d854f2fc78a7583f2bf0528355c8b14912f8
|
/tests/test_example.py
|
099b0812c779fffcc65bb463803178d1b6192432
|
[
"MIT"
] |
permissive
|
azridev/flask-dashboard-shards
|
da072e7406e9be3b85f31a9dff6167a0d87a7496
|
c6833e6d55c7dd065b4c6e9b677288e9fe9aa344
|
refs/heads/master
| 2021-05-19T09:08:00.079436
| 2020-03-26T19:04:00
| 2020-03-26T19:04:00
| 251,620,836
| 0
| 1
|
MIT
| 2020-03-31T14:04:41
| 2020-03-31T14:04:40
| null |
UTF-8
|
Python
| false
| false
| 584
|
py
|
# -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from tests.test_base import check_pages, check_blueprints
@check_pages('/', '/home/index')
def test_pages(base_client):
# do something
base_client.post('/', data={})
# the pages are tested (GET request: 200) afterwards by the
# @check_pages decorator
@check_blueprints('/forms', '/ui')
def test_blueprints(base_client):
# do something
base_client.post('/', data={})
# the blueprints are tested (GET request: 200) afterwards by the
# @check_blueprints decorator
|
[
"developer@rosoftware.ro"
] |
developer@rosoftware.ro
|
1d29dee387b69c8558912e9c4fd3c2013e88be9a
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_118/2550.py
|
28c00dfbc78318b375a16931dd8cc2af6d52486d
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
#!/usr/bin/env python
import sys
from math import sqrt
def pal(x):
x = str(x)
return x == x[::-1]
if __name__ == "__main__":
t = int(sys.stdin.readline())
for case in range(1, t+1):
count = 0
i, j = [long(c) for c in sys.stdin.readline().split(" ")]
for n in range(i, j+1):
r = sqrt(n)
if r - int(r) != 0.0:
continue
if pal(n) and pal(int(r)):
count += 1
print "Case #%d: %d" % (case, count)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
d32be4c5c1aa79bae358160228b4b8ad3f289a4f
|
28ef7c65a5cb1291916c768a0c2468a91770bc12
|
/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/mobilenetv2_coco_384x288.py
|
b7b54f086ffee4d0e83d3a2fe04f5cf10f68a7ec
|
[
"Apache-2.0"
] |
permissive
|
bit-scientist/mmpose
|
57464aae1ca87faf5a4669991ae1ea4347e41900
|
9671a12caf63ae5d15a9bebc66a9a2e7a3ce617e
|
refs/heads/master
| 2023-08-03T17:18:27.413286
| 2021-09-29T03:48:37
| 2021-09-29T03:48:37
| 411,549,076
| 0
| 0
|
Apache-2.0
| 2021-09-29T06:01:27
| 2021-09-29T06:01:26
| null |
UTF-8
|
Python
| false
| false
| 4,196
|
py
|
_base_ = ['../../../../_base_/datasets/coco.py']
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', save_best='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
# model settings
model = dict(
type='TopDown',
pretrained='mmcls://mobilenet_v2',
backbone=dict(type='MobileNetV2', widen_factor=1., out_indices=(7, )),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=1280,
out_channels=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[288, 384],
heatmap_size=[72, 96],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=3),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline,
dataset_info={{_base_.dataset_info}}),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
)
|
[
"noreply@github.com"
] |
bit-scientist.noreply@github.com
|
f2746e2381a95d740cf3cd4036e8a08a7bb02ad3
|
eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429
|
/data/input/aldryn/django-simple-sso/simple_sso/sso_server/server.py
|
aa568278cfa75a6e2015dc8bf8be3712ae4e86da
|
[] |
no_license
|
bopopescu/pythonanalyzer
|
db839453bde13bf9157b76e54735f11c2262593a
|
8390a0139137574ab237b3ff5fe8ea61e8a0b76b
|
refs/heads/master
| 2022-11-22T02:13:52.949119
| 2019-05-07T18:42:52
| 2019-05-07T18:42:52
| 282,079,884
| 0
| 0
| null | 2020-07-23T23:46:09
| 2020-07-23T23:46:08
| null |
UTF-8
|
Python
| false
| false
| 6,308
|
py
|
# -*- coding: utf-8 -*-
import urlparse
from django.conf.urls import patterns, url
from django.contrib import admin
from django.contrib.admin.options import ModelAdmin
from django.core.urlresolvers import reverse
from django.http import (HttpResponseForbidden, HttpResponseBadRequest, HttpResponseRedirect, QueryDict)
from django.utils import timezone
from django.views.generic.base import View
from itsdangerous import URLSafeTimedSerializer
from simple_sso.sso_server.models import Token, Consumer
import datetime
import urllib
from webservices.models import Provider
from webservices.sync import provider_for_django
class BaseProvider(Provider):
max_age = 5
def __init__(self, server):
self.server = server
def get_private_key(self, public_key):
try:
self.consumer = Consumer.objects.get(public_key=public_key)
except Consumer.DoesNotExist:
return None
return self.consumer.private_key
class RequestTokenProvider(BaseProvider):
def provide(self, data):
redirect_to = data['redirect_to']
token = Token.objects.create(consumer=self.consumer, redirect_to=redirect_to)
return {'request_token': token.request_token}
class AuthorizeView(View):
"""
The client get's redirected to this view with the `request_token` obtained
by the Request Token Request by the client application beforehand.
This view checks if the user is logged in on the server application and if
that user has the necessary rights.
If the user is not logged in, the user is prompted to log in.
"""
server = None
def get(self, request):
request_token = request.GET.get('token', None)
if not request_token:
return self.missing_token_argument()
try:
self.token = Token.objects.select_related('consumer').get(request_token=request_token)
except Token.DoesNotExist:
return self.token_not_found()
if not self.check_token_timeout():
return self.token_timeout()
self.token.refresh()
if request.user.is_authenticated():
return self.handle_authenticated_user()
else:
return self.handle_unauthenticated_user()
def missing_token_argument(self):
return HttpResponseBadRequest('Token missing')
def token_not_found(self):
return HttpResponseForbidden('Token not found')
def token_timeout(self):
return HttpResponseForbidden('Token timed out')
def check_token_timeout(self):
delta = timezone.now() - self.token.timestamp
if delta > self.server.token_timeout:
self.token.delete()
return False
else:
return True
def handle_authenticated_user(self):
if self.server.has_access(self.request.user, self.token.consumer):
return self.success()
else:
return self.access_denied()
def handle_unauthenticated_user(self):
next = '%s?%s' % (self.request.path, urllib.urlencode([('token', self.token.request_token)]))
url = '%s?%s' % (reverse(self.server.auth_view_name), urllib.urlencode([('next', next)]))
return HttpResponseRedirect(url)
def access_denied(self):
return HttpResponseForbidden("Access denied")
def success(self):
self.token.user = self.request.user
self.token.save()
serializer = URLSafeTimedSerializer(self.token.consumer.private_key)
parse_result = urlparse.urlparse(self.token.redirect_to)
query_dict = QueryDict(parse_result.query, mutable=True)
query_dict['access_token'] = serializer.dumps(self.token.access_token)
url = urlparse.urlunparse((parse_result.scheme, parse_result.netloc, parse_result.path, '', query_dict.urlencode(), ''))
return HttpResponseRedirect(url)
class VerificationProvider(BaseProvider, AuthorizeView):
def provide(self, data):
token = data['access_token']
try:
self.token = Token.objects.select_related('user').get(access_token=token, consumer=self.consumer)
except Token.DoesNotExist:
return self.token_not_found()
if not self.check_token_timeout():
return self.token_timeout()
if not self.token.user:
return self.token_not_bound()
extra_data = data.get('extra_data', None)
return self.server.get_user_data(
self.token.user, self.consumer, extra_data=extra_data)
def token_not_bound(self):
return HttpResponseForbidden("Invalid token")
class ConsumerAdmin(ModelAdmin):
readonly_fields = ['public_key', 'private_key']
class Server(object):
request_token_provider = RequestTokenProvider
authorize_view = AuthorizeView
verification_provider = VerificationProvider
token_timeout = datetime.timedelta(minutes=5)
client_admin = ConsumerAdmin
auth_view_name = 'django.contrib.auth.views.login'
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
self.register_admin()
def register_admin(self):
admin.site.register(Consumer, self.client_admin)
def has_access(self, user, consumer):
return True
def get_user_extra_data(self, user, consumer, extra_data):
raise NotImplementedError()
def get_user_data(self, user, consumer, extra_data=None):
user_data = {
'username': user.username,
'email': user.email,
'first_name': user.first_name,
'last_name': user.last_name,
'is_staff': False,
'is_superuser': False,
'is_active': user.is_active,
}
if extra_data:
user_data['extra_data'] = self.get_user_extra_data(
user, consumer, extra_data)
return user_data
def get_urls(self):
return patterns('',
url(r'^request-token/$', provider_for_django(self.request_token_provider(server=self)), name='simple-sso-request-token'),
url(r'^authorize/$', self.authorize_view.as_view(server=self), name='simple-sso-authorize'),
url(r'^verify/$', provider_for_django(self.verification_provider(server=self)), name='simple-sso-verify'),
)
|
[
"rares.begu@gmail.com"
] |
rares.begu@gmail.com
|
1bc5bfe0093dafca4e694e1f48a3517bedeab02c
|
5cd6a7fa7be3b00ff63e60935bc1be9fa1cfebf4
|
/projects/mid_atlantic/study/plot_FigS3_Distance_v_Depth_By_State.py
|
32b02836b5905138d01abb24b202eb0527cf62b4
|
[
"MIT"
] |
permissive
|
EnergyModels/caes
|
214e1c7cded4498f33670da7eeebccbaa665e930
|
5e994c198657226925161db1980ebfa704d0c90b
|
refs/heads/master
| 2023-08-23T15:05:53.594530
| 2021-11-06T01:00:57
| 2021-11-06T01:00:57
| 261,201,284
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,026
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
df = pd.read_csv('all_analysis.csv')
# f, a = plt.subplots(2,1)
# a = a.ravel()
#
# sns.scatterplot(data=df, x='NEAR_DIST',y='feasible_fr', hue='NEAR_FC', ax=a[0])
#
# sns.scatterplot(data=df, x='NEAR_DIST',y='RASTERVALU', hue='NEAR_FC', ax=a[1])
# conversions and column renaming
df.loc[:, 'Distance to shore (km)'] = df.loc[:, 'NEAR_DIST'] / 1000.0
df.loc[:, 'Water depth (m)'] = df.loc[:, 'RASTERVALU']
df.loc[:, 'Feasibility (%)'] = df.loc[:, 'feasible_fr'] * 100.0
df.loc[:, 'Formation (-)'] = df.loc[:, 'formation']
df.loc[:, 'Nearest State (-)'] = df.loc[:, 'NEAR_FC']
loc_dict = {'VA_shore': 'Virginia', 'MD_shore': 'Maryland', 'NJ_shore': 'New Jersey', 'DE_shore': 'Delaware',
'NY_shore': 'New York', 'MA_shore': 'Massachusetts', 'RI_shore': 'Rhode Island'}
formation_dict = {'LK1': 'Lower Cretaceous', 'MK1-3': 'Middle Cretaceous', 'UJ1': 'Upper Jurassic'}
# rename
for loc in df.loc[:, 'Nearest State (-)'].unique():
ind = df.loc[:, 'Nearest State (-)'] == loc
df.loc[ind, 'Nearest State (-)'] = loc_dict[loc]
# rename
for formation in df.loc[:, 'Formation (-)'].unique():
ind = df.loc[:, 'Formation (-)'] == formation
df.loc[ind, 'Formation (-)'] = formation_dict[formation]
# Filter data with feasibility greater than 0.8
# df = df[df.loc[:,'Feasibility (%)']>=0.8]
# Filter data with mean RTE greater than 0.5
df = df[df.loc[:, 'RTE_mean'] >= 0.5]
# sns.scatterplot(data=df, x='Distance to shore (km)', y='Water depth (m)', hue='Nearest State (-)',
# size='Feasibility (%)', style='Formation (-)')
#
# # a[1].set_ylim(top=0.0,bottom=-100.0)
#
# sns.scatterplot(data=df, x='Distance to shore (km)', y='Water depth (m)', hue='Nearest State (-)',
# size='Feasibility (%)', style='Formation (-)', ax=a[1])
#
# a[1].set_xlim(left=0.0,right=100.0)
# a[1].set_ylim(top=0.0,bottom=-100.0)
# create figure
f, a = plt.subplots(1, 1)
axins = zoomed_inset_axes(a, zoom=2.2, loc='upper center', bbox_to_anchor=(0.5, -0.2), bbox_transform=a.transAxes)
# Main plot
sns.scatterplot(data=df, x='Distance to shore (km)', y='Water depth (m)', hue='Nearest State (-)',
style='Formation (-)', ax=a)
a.set_xlim(left=0.0, right=300.0)
a.set_ylim(top=0, bottom=-400.0)
# a.set_yscale('symlog')
# Inset
x_lims = [0.0, 100.0]
y_lims = [0, -60.0]
rect = plt.Rectangle((x_lims[0] + 1, y_lims[0]), x_lims[1] - x_lims[0] + 1, y_lims[1] - y_lims[0], fill=False,
facecolor="black",
edgecolor='black', linestyle='--')
a.add_patch(rect)
sns.scatterplot(data=df, x='Distance to shore (km)', y='Water depth (m)', hue='Nearest State (-)',
style='Formation (-)', legend=False, ax=axins)
axins.set_xlim(left=x_lims[0], right=x_lims[1])
axins.set_ylim(top=y_lims[0], bottom=y_lims[1])
# axins.set_yscale('symlog')
axins.yaxis.set_major_locator(plt.MaxNLocator(3))
a.legend(bbox_to_anchor=(1.025, 0.0), loc="center left", ncol=1)
a.text(-0.1, 1.0, 'a', horizontalalignment='center', verticalalignment='center',
transform=a.transAxes, fontsize='medium', fontweight='bold')
axins.text(-0.3, 1.0, 'b', horizontalalignment='center', verticalalignment='center',
transform=axins.transAxes, fontsize='medium', fontweight='bold')
# Add rectangle that represents subplot2
# Column width guidelines https://www.elsevier.com/authors/author-schemas/artwork-and-media-instructions/artwork-sizing
# Single column: 90mm = 3.54 in
# 1.5 column: 140 mm = 5.51 in
# 2 column: 190 mm = 7.48 i
width = 7.48 # inches
height = 7.0 # inches
# Set size
f.set_size_inches(width, height)
plt.subplots_adjust(top=0.95,
bottom=0.5,
left=0.12,
right=0.7,
hspace=0.2,
wspace=0.2)
# save
plt.savefig('FigS3_Distance_v_Depth_By_State.png', dpi=300)
|
[
"jab6ft@virginia.edu"
] |
jab6ft@virginia.edu
|
164e5493f6758c339a9e2ad856a3766537c455d0
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-sblp-obt/sblp_ut=3.5_rd=1_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=65/params.py
|
1784956005fe6b3ad3c6eecf934f47a007d14984
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.662643',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'RUN',
'trial': 65,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
ac3142959ea8cad01113bded21db613df639e564
|
9da8754002fa402ad8e6f25659978bd269bbcec8
|
/src/326A/test_cdf_326A.py
|
64e36fcaf81aaefde3dcff7e62890268fa2c84a8
|
[
"MIT"
] |
permissive
|
kopok2/CodeforcesSolutionsPython
|
a00f706dbf368ba0846c8ae86d4145b5dd3e1613
|
35bec0dbcff47765b123b5fe60476014376153df
|
refs/heads/master
| 2023-02-02T03:08:22.097651
| 2020-12-17T22:00:50
| 2020-12-17T22:00:50
| 196,035,812
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 185
|
py
|
import unittest
from unittest.mock import patch
from cdf_326A import CodeforcesTask326ASolution
class TestCDF326A(unittest.TestCase):
if __name__ == "__main__":
unittest.main()
|
[
"oleszek.karol@gmail.com"
] |
oleszek.karol@gmail.com
|
30965eb40de98acf331d58db74af0f8f602f227d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03085/s149035990.py
|
1c1c597445598c94f69ba81db215767cac8dae30
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
# A - Double Helix
# A:アデニン T:チミン G:グアニン C:シトシン
# 対になる組み合わせ A-T G-C
# 標準入力
base = input()
# print(base)
# 条件分岐し、結果を answer に代入
if base == 'A':
# print('T')
answer = 'T'
elif base == 'T':
# print('A')
answer = 'A'
elif base == 'G':
# print('C')
answer = 'C'
elif base == 'C':
# print('G')
answer = 'G'
# 結果の出力
print(answer)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
72385aef0f88fb44670c62fe09108881b5ca1cdd
|
a934a51f68592785a7aed1eeb31e5be45dd087d3
|
/Learning/Network_process_WA/Day1/2020_Jul23/subprocess_old/run_ls01.py
|
ba2d553ef730e9191baf52a2201f2e782ccafa17
|
[] |
no_license
|
nsshayan/Python
|
9bf0dcb9a6890419873428a2dde7a802e715be2b
|
0cf5420eecac3505071326c90b28bd942205ea54
|
refs/heads/master
| 2021-06-03T18:41:06.203334
| 2020-09-28T07:28:48
| 2020-09-28T07:28:48
| 35,269,825
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
from subprocess import Popen
#p = Popen("echo $PATH", shell=True)
with open("ls.out", "w") as lsout:
p = Popen(["ls", "-l", "/usr"], stdout=lsout)
ret = p.wait()
print("ls exited with code =", ret)
|
[
"nsshayan89@gmail.com"
] |
nsshayan89@gmail.com
|
a14c03bc628896e88a3a715353f4b5c93d9778c3
|
98e1716c1c3d071b2fedef0ac029eb410f55762c
|
/part13-introduction-data-visualization/No07-Using-legend.py
|
bd1fef91573279e954aa0db684577e0a61040372
|
[] |
no_license
|
iamashu/Data-Camp-exercise-PythonTrack
|
564531bcf1dff119949cbb75e1fd63d89cb2779f
|
c72a4e806494f0e263ced9594597dc8882c2131c
|
refs/heads/master
| 2020-07-22T00:23:12.024386
| 2019-04-12T09:24:42
| 2019-04-12T09:24:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,461
|
py
|
#Using legend()
'''
Legends are useful for distinguishing between multiple datasets displayed on common axes. The relevant data are created using specific line colors or markers in various plot commands. Using the keyword argument label in the plotting function associates a string to use in a legend.
For example, here, you will plot enrollment of women in the Physical Sciences and in Computer Science over time. You can label each curve by passing a label argument to the plotting call, and request a legend using plt.legend(). Specifying the keyword argument loc determines where the legend will be placed.
Instructions
Modify the plot command provided that draws the enrollment of women in Computer Science over time so that the curve is labelled 'Computer Science' in the legend.
Modify the plot command provided that draws the enrollment of women in the Physical Sciences over time so that the curve is labelled 'Physical Sciences' in the legend.
Add a legend at the lower center (i.e., loc='lower center').
'''
# Code
# Specify the label 'Computer Science'
plt.plot(year, computer_science, color='red', label='Computer Science')
# Specify the label 'Physical Sciences'
plt.plot(year, physical_sciences, color='blue', label='Physical Sciences')
# Add a legend at the lower center
plt.legend(loc='lower center')
# Add axis labels and title
plt.xlabel('Year')
plt.ylabel('Enrollment (%)')
plt.title('Undergraduate enrollment of women')
plt.show()
|
[
"beiran@hotmail.com"
] |
beiran@hotmail.com
|
328ecc8c6a133314695a3f5e71fe57df6876cc9c
|
bdb206758815fa598285e05c23d81829f3ad60a9
|
/addons/at2166/controllers/controllers.py
|
4e907a688dc832b0d2a90c58e410c59f80a51a82
|
[] |
no_license
|
kulius/odoo10_test
|
75a9645fbd64ba5fd6901fb441f2e7141f610032
|
5a01107e2337fd0bbe35d87d53a0fe12eff7c59e
|
refs/heads/master
| 2021-07-26T15:05:58.074345
| 2017-11-08T09:04:11
| 2017-11-08T09:04:11
| 109,943,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 684
|
py
|
# -*- coding: utf-8 -*-
from odoo import http
# class At2166(http.Controller):
# @http.route('/at2166/at2166/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/at2166/at2166/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('at2166.listing', {
# 'root': '/at2166/at2166',
# 'objects': http.request.env['at2166.at2166'].search([]),
# })
# @http.route('/at2166/at2166/objects/<model("at2166.at2166"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('at2166.object', {
# 'object': obj
# })
|
[
"kulius@gmail.com"
] |
kulius@gmail.com
|
99f2714c3fba9228c05928fad3b4c365ac9aa7b1
|
356151747d2a6c65429e48592385166ab48c334c
|
/backend/customer/threads/order_now/th_get_menu.py
|
5e181b84ea83adbffe87077606958d72b475afed
|
[] |
no_license
|
therealrahulsahu/se_project
|
c82b2d9d467decd30a24388f66427c7805c23252
|
c9f9fd5594191ab7dce0504ca0ab3025aa26a0c1
|
refs/heads/master
| 2020-06-25T02:51:30.355677
| 2020-04-20T13:01:36
| 2020-04-20T13:01:36
| 199,175,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,666
|
py
|
from PyQt5.QtCore import QThread, pyqtSignal
class ThreadGetMenu(QThread):
signal = pyqtSignal('PyQt_PyObject')
def __init__(self, parent_class):
super().__init__()
self.parent_class = parent_class
def run(self):
if self.check_for_veg():
food_query = {
'veg': True,
'region': self.check_for_region(),
'type': self.check_for_type(),
'available': True
}
else:
food_query = {
'region': self.check_for_region(),
'type': self.check_for_type(),
'available': True
}
myc = self.parent_class.MW.DB.food
from pymongo.errors import AutoReconnect
from errors import FoodNotFoundError
try:
data_list = list(myc.find(food_query, {'_id': 1, 'name': 1, 'price': 1}))
if data_list:
self.parent_class.searched_food_list = data_list
self.signal.emit(True)
else:
raise FoodNotFoundError
except FoodNotFoundError as ob:
self.parent_class.MW.mess(str(ob))
except AutoReconnect:
self.parent_class.MW.mess('-->> Network Error <<--')
finally:
self.parent_class.curr_wid.bt_get.setEnabled(True)
def check_for_veg(self):
return self.parent_class.curr_wid.rbt_veg.isChecked()
def check_for_region(self):
if self.parent_class.curr_wid.rbt_north_ind.isChecked():
return 'nid'
elif self.parent_class.curr_wid.rbt_italian.isChecked():
return 'ita'
elif self.parent_class.curr_wid.rbt_south_ind.isChecked():
return 'sid'
elif self.parent_class.curr_wid.rbt_conti.isChecked():
return 'conti'
elif self.parent_class.curr_wid.rbt_thai.isChecked():
return 'thi'
elif self.parent_class.curr_wid.rbt_china.isChecked():
return 'chi'
elif self.parent_class.curr_wid.rbt_rajas.isChecked():
return 'raj'
elif self.parent_class.curr_wid.rbt_none.isChecked():
return 'none'
def check_for_type(self):
if self.parent_class.curr_wid.rbt_starter.isChecked():
return 'sta'
elif self.parent_class.curr_wid.rbt_main.isChecked():
return 'mcs'
elif self.parent_class.curr_wid.rbt_refresh.isChecked():
return 'ref'
elif self.parent_class.curr_wid.rbt_dessert.isChecked():
return 'des'
elif self.parent_class.curr_wid.rbt_bread.isChecked():
return 'bre'
|
[
"43601158+therealrahulsahu@users.noreply.github.com"
] |
43601158+therealrahulsahu@users.noreply.github.com
|
e46e39a01e13cb2eea5a6f5add4fb61accae3bf1
|
c99be9a7a55c6dc3dade46147f116ee6729a19d1
|
/tikzplotlib/__about__.py
|
4d3b2067e4529f6a610d20626f3fcbed193b58ca
|
[
"MIT"
] |
permissive
|
theRealSuperMario/tikzplotlib
|
3001cbe11856b1e7d87aa308c0ef99bbd28d1bec
|
3c1e08e78cb87ecf4b475f506244813bf99ac705
|
refs/heads/master
| 2020-12-11T09:36:37.399842
| 2020-11-01T10:27:21
| 2020-11-01T10:27:21
| 233,809,790
| 2
| 0
|
MIT
| 2020-01-14T09:54:53
| 2020-01-14T09:54:53
| null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
try:
# Python 3.8
from importlib import metadata
except ImportError:
import importlib_metadata as metadata
try:
__version__ = metadata.version("tikzplotlib")
except Exception:
__version__ = "unknown"
|
[
"nico.schloemer@gmail.com"
] |
nico.schloemer@gmail.com
|
5f5a0e831a68c5ef684d354ca570acf953792cea
|
f75ec2c20c3208350d310038a2cd0a67253b44df
|
/src/swagger_codegen/api/response_deserializer.py
|
5abc7163627b96210e3b445db6500c9493171408
|
[] |
no_license
|
vichooz/swagger_codegen
|
e53f59f3cd2c080157863698f932a606705db4e4
|
8238356075eea4218b2e6a645c7ea2b8826b1044
|
refs/heads/master
| 2022-08-03T04:32:49.291426
| 2020-05-27T06:09:28
| 2020-05-27T06:09:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 850
|
py
|
import abc
from typing import Any
from typing import Optional
import pydantic
from swagger_codegen.api.types import ResponseType
class ResponseDeserializer(abc.ABC):
@abc.abstractmethod
def deserialize(self, deserialize_to: ResponseType, model_body):
pass
class DefaultResponseDeserializer(ResponseDeserializer):
def deserialize(self, deserialize_to: ResponseType, model_body) -> Optional[Any]:
if deserialize_to is None:
return None
if model_body is None:
return None
class Config(pydantic.BaseConfig):
arbitrary_types_allowed = True
pydantic_validator_model = pydantic.create_model(
"PydanticValidatorModel", __root__=(deserialize_to, ...), __config__=Config
)
return pydantic_validator_model(__root__=model_body).__root__
|
[
"n10101010@gmail.com"
] |
n10101010@gmail.com
|
4a67d8771aca07434a51aa7be4bb84f2c069a433
|
34745a8d54fa7e3d9e4237415eb52e507508ad79
|
/Python Fundamentals/Final exam/02_emoji_detector.py
|
2561bef627873327e10a68ef7de9312ae81415d8
|
[] |
no_license
|
DilyanTsenkov/SoftUni-Software-Engineering
|
50476af0dc88b267d72c56fa87eeb88d841164b2
|
fe446e3a50a00bb2e48d71ab8f783e0a4a406094
|
refs/heads/main
| 2023-08-12T18:18:42.144210
| 2021-09-25T11:10:38
| 2021-09-25T11:10:38
| 317,235,419
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
import re
def threshold(input_string):
c_threshold = 1
digit_regex = r"[0-9]"
digits = re.findall(digit_regex, input_string)
for digit in digits:
c_threshold *= int(digit)
return c_threshold
def emoji_checker(input_string, cool):
all_of_emojis = []
cool_of_emojis = []
emoji_regex = r"(?P<symbols>\:\:|\*\*)(?P<emoji>[A-Z][a-z][a-z]+)(?P=symbols)"
emojis = re.finditer(emoji_regex, input_string)
for data in emojis:
coolness = 0
d = data.groupdict()
for char in d["emoji"]:
coolness += ord(char)
emoji_found = d["symbols"] + d["emoji"] + d["symbols"]
all_of_emojis.append(emoji_found)
if coolness > cool:
cool_of_emojis.append(emoji_found)
return all_of_emojis, cool_of_emojis
string = input()
cool_threshold = threshold(string)
all_emojis, cool_emojis = emoji_checker(string, cool_threshold)
print(f"Cool threshold: {cool_threshold}")
print(f"{len(all_emojis)} emojis found in the text. The cool ones are:")
cool_emojis = [print(_, end="\n") for _ in cool_emojis]
|
[
"noreply@github.com"
] |
DilyanTsenkov.noreply@github.com
|
36859f62160f94e4c4d427461f1f1f7aaa00bab4
|
5efc7ab8a298a026bad44596e18de104985a4b71
|
/fn_wiki/tests/test_funct_fn_wiki_create_update.py
|
9aa1fce69272eadbcde79a40b9ddf69ec5e91908
|
[
"MIT"
] |
permissive
|
RomanDerkach/resilient-community-apps
|
4cf0abe443411582e9f57491364ecc2d844ba30d
|
1f60fb100e6a697df7b901d7a4aad707fea3dfee
|
refs/heads/master
| 2023-03-11T21:56:18.307942
| 2021-03-02T16:09:33
| 2021-03-02T16:09:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,815
|
py
|
# -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
import pytest
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
PACKAGE_NAME = "fn_wiki"
FUNCTION_NAME = "fn_wiki_create_update"
# Read the default configuration-data section from the package
config_data = get_config_data(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def call_fn_wiki_create_update_function(circuits, function_params, timeout=5):
# Create the submitTestFunction event
evt = SubmitTestFunction("fn_wiki_create_update", function_params)
# Fire a message to the function
circuits.manager.fire(evt)
# circuits will fire an "exception" event if an exception is raised in the FunctionComponent
# return this exception if it is raised
exception_event = circuits.watcher.wait("exception", parent=None, timeout=timeout)
if exception_event is not False:
exception = exception_event.args[1]
raise exception
# else return the FunctionComponent's results
else:
event = circuits.watcher.wait("fn_wiki_create_update_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestFnWikiCreateUpdate:
""" Tests for the fn_wiki_create_update function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
mock_fail_path = {
"wiki_path": None,
"wiki_body": "sample text",
"wiki_create_if_missing": False
}
mock_fail_page_not_found = {
"wiki_path": "not found",
"wiki_body": "sample text",
"wiki_create_if_missing": False,
}
mock_fail_parent_not_found = {
"wiki_path": "parent not found/new page",
"wiki_body": "sample text",
"wiki_create_if_missing": False
}
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_fail_path, None),
(mock_fail_page_not_found, None),
(mock_fail_parent_not_found, None),
])
def test_fail_update(self, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
with pytest.raises(ValueError):
results = call_fn_wiki_create_update_function(circuits_app, mock_inputs)
assert(results['success'] == False)
assert(results['reason'])
mock_success_title = {
"wiki_path": "ΣΤ",
"wiki_body": "ΣΤ",
"wiki_create_if_missing": True
}
mock_success_w_parent_title = {
"wiki_path": "ΣΤ3/new3",
"wiki_body": "new3",
"wiki_create_if_missing": True
}
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_success_title, None),
(mock_success_w_parent_title, None),
])
def test_create_success(self, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
results = call_fn_wiki_create_update_function(circuits_app, mock_inputs)
assert(results['success'])
mock_success_update_title = {
"wiki_path": "parent1/json2",
"wiki_body": "new3 ΣΤ3",
"wiki_create_if_missing": False
}
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_success_update_title, None)
])
def test_update_success(self, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
results = call_fn_wiki_create_update_function(circuits_app, mock_inputs)
assert(results['success'])
mock_success_update_parent_title = {
"wiki_path": "ΣΤ3/ΣΤ4",
"wiki_body": "ΣΤ4",
"wiki_create_if_missing": True
}
mock_success_update_parent_subparent = {
"wiki_path": "parent1/json2/ΣΤ5",
"wiki_body": "ΣΤ5",
"wiki_create_if_missing": True
}
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_success_update_parent_title, None),
(mock_success_update_parent_subparent, None)
])
def test_update_parent_success(self, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
results = call_fn_wiki_create_update_function(circuits_app, mock_inputs)
assert(results['success'])
|
[
"ihor.husar@ibm.com"
] |
ihor.husar@ibm.com
|
c8a8f30335dc23ab837f1ee123fbde87fd5009b9
|
49900ba50d4f6c979d6d433577828c8007973125
|
/data_utils/ner.py
|
597d37cdb973d60f1d05f4fd35b70b226eb1faac
|
[] |
no_license
|
weizhenzhao/cs224d_nlp_problem_set2
|
9661414965a58b97113f828a47932c5b9d8411df
|
302f0e53cdd88147a5c1727d06f0be18270d8a2a
|
refs/heads/master
| 2021-10-22T18:22:31.063591
| 2019-03-12T14:03:36
| 2019-03-12T14:03:36
| 104,356,708
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
##
# Utility functions for NER assignment
# Assigment 2, part 1 for CS224D
##
from data_utils.utils import invert_dict
from numpy import *
def load_wv(vocabfile, wvfile):
wv = loadtxt(wvfile, dtype=float)
with open(vocabfile) as fd:
words = [line.strip() for line in fd]
num_to_word = dict(enumerate(words))
word_to_num = invert_dict(num_to_word)
return wv, word_to_num, num_to_word
def save_predictions(y, filename):
"""Save predictions, one per line."""
with open(filename, 'w') as fd:
fd.write("\n".join(map(str, y)))
fd.write("\n")
|
[
"958904120@qq.com"
] |
958904120@qq.com
|
f225babc8403680d28288ebf49150e7c4d9c2893
|
f8da830331428a8e1bbeadf23345f79f1750bd98
|
/msgraph-cli-extensions/v1_0/usersactions_v1_0/azext_usersactions_v1_0/vendored_sdks/usersactions/operations/_user_onenote_notebook_section_group_section_page_parent_notebook_operations.py
|
9cbe41f6e394adb19707b01f0c335867f7b93f53
|
[
"MIT"
] |
permissive
|
ezkemboi/msgraph-cli
|
e023e1b7589461a738e42cbad691d9a0216b0779
|
2ceeb27acabf7cfa219c8a20238d8c7411b9e782
|
refs/heads/main
| 2023-02-12T13:45:03.402672
| 2021-01-07T11:33:54
| 2021-01-07T11:33:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,768
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UserOnenoteNotebookSectionGroupSectionPageParentNotebookOperations(object):
"""UserOnenoteNotebookSectionGroupSectionPageParentNotebookOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~users_actions.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def copy_notebook(
self,
user_id, # type: str
notebook_id, # type: str
section_group_id, # type: str
onenote_section_id, # type: str
onenote_page_id, # type: str
group_id=None, # type: Optional[str]
rename_as=None, # type: Optional[str]
notebook_folder=None, # type: Optional[str]
site_collection_id=None, # type: Optional[str]
site_id=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphOnenoteOperation"
"""Invoke action copyNotebook.
Invoke action copyNotebook.
:param user_id: key: id of user.
:type user_id: str
:param notebook_id: key: id of notebook.
:type notebook_id: str
:param section_group_id: key: id of sectionGroup.
:type section_group_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param onenote_page_id: key: id of onenotePage.
:type onenote_page_id: str
:param group_id:
:type group_id: str
:param rename_as:
:type rename_as: str
:param notebook_folder:
:type notebook_folder: str
:param site_collection_id:
:type site_collection_id: str
:param site_id:
:type site_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphOnenoteOperation, or the result of cls(response)
:rtype: ~users_actions.models.MicrosoftGraphOnenoteOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphOnenoteOperation"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.PathsFm3Zd0UsersUserIdOnenoteNotebooksNotebookIdSectiongroupsSectiongroupIdSectionsOnenotesectionIdPagesOnenotepageIdParentnotebookMicrosoftGraphCopynotebookPostRequestbodyContentApplicationJsonSchema(group_id=group_id, rename_as=rename_as, notebook_folder=notebook_folder, site_collection_id=site_collection_id, site_id=site_id)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.copy_notebook.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'notebook-id': self._serialize.url("notebook_id", notebook_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'PathsFm3Zd0UsersUserIdOnenoteNotebooksNotebookIdSectiongroupsSectiongroupIdSectionsOnenotesectionIdPagesOnenotepageIdParentnotebookMicrosoftGraphCopynotebookPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenoteOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
copy_notebook.metadata = {'url': '/users/{user-id}/onenote/notebooks/{notebook-id}/sectionGroups/{sectionGroup-id}/sections/{onenoteSection-id}/pages/{onenotePage-id}/parentNotebook/microsoft.graph.copyNotebook'} # type: ignore
|
[
"japhethobalak@gmail.com"
] |
japhethobalak@gmail.com
|
8cbeef5ea8b3d7e0aa7655c31e01ef7d0da11446
|
8541f4118c6093c84e78d768285e7007ee5f6a6c
|
/apps/tax/migrations/0005_auto_20160306_1353.py
|
8077c34962cafafda6e7398270db44898639bd2a
|
[] |
no_license
|
iraycd/awecounting
|
c81a8ca6b7a4a942e63cf6b7d723f9883e57a107
|
388df4de63146e0a9a211afa522ec50e0f3df443
|
refs/heads/master
| 2021-01-15T23:30:27.439759
| 2016-03-16T10:34:40
| 2016-03-16T10:34:40
| 57,046,467
| 1
| 0
| null | 2016-04-25T14:03:40
| 2016-04-25T14:03:40
| null |
UTF-8
|
Python
| false
| false
| 574
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-06 08:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tax', '0004_partytaxpreference'),
]
operations = [
migrations.AlterField(
model_name='partytaxpreference',
name='party',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='tax_preference', to='ledger.Party'),
),
]
|
[
"roshanshrestha01@gmail.com"
] |
roshanshrestha01@gmail.com
|
31fafff04d22006623a7bc672c81339d13885407
|
22ccc673a522b52f2678b6ac96e3ff2a104864ff
|
/jobs/migrations/0005_auto_20150902_0600.py
|
e6e2f4f6043f8e5e142666aeaf0c677401c0f62a
|
[] |
no_license
|
ivlevdenis/pythondigest
|
07e448da149d92f37b8ce3bd01b645ace1fa0888
|
f8ccc44808a26960fb69a4c4c3491df3e6d3d24e
|
refs/heads/master
| 2021-01-18T02:09:42.121559
| 2016-05-15T22:44:34
| 2016-05-15T22:44:34
| 58,350,368
| 0
| 0
| null | 2016-05-09T05:21:39
| 2016-05-09T05:21:39
| null |
UTF-8
|
Python
| false
| false
| 1,435
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0004_jobfeed_is_activated'),
]
operations = [
migrations.RemoveField(
model_name='jobitem',
name='salary_currency',
),
migrations.RemoveField(
model_name='jobitem',
name='salary_from',
),
migrations.RemoveField(
model_name='jobitem',
name='salary_till',
),
migrations.RemoveField(
model_name='jobitem',
name='url_api',
),
migrations.RemoveField(
model_name='jobitem',
name='url_logo',
),
migrations.AddField(
model_name='jobitem',
name='description',
field=models.TextField(null=True, blank=True, verbose_name='Описание вакансии'),
),
migrations.AlterField(
model_name='jobitem',
name='employer_name',
field=models.CharField(null=True, max_length=255, blank=True, verbose_name='Работодатель'),
),
migrations.AlterField(
model_name='jobitem',
name='place',
field=models.CharField(null=True, max_length=255, blank=True, verbose_name='Место'),
),
]
|
[
"sapronov.alexander92@gmail.com"
] |
sapronov.alexander92@gmail.com
|
b923127047254c84445608e989311a4fb0eb0b40
|
4f4776eb69cbea9ee1c87a22732c5d778855c83a
|
/leetcode/Number_Complement.py
|
6f4cc7d18ff977b72e60674c3283b67bee1f0ecb
|
[] |
no_license
|
k4u5h4L/algorithms
|
4a0e694109b8aadd0e3b7a66d4c20692ecdef343
|
b66f43354792b1a6facff90990a7685f5ed36a68
|
refs/heads/main
| 2023-08-19T13:13:14.931456
| 2021-10-05T13:01:58
| 2021-10-05T13:01:58
| 383,174,341
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
'''
Number Complement
Easy
The complement of an integer is the integer you get when you flip all the 0's to 1's and all the 1's to 0's in its binary representation.
For example, The integer 5 is "101" in binary and its complement is "010" which is the integer 2.
Given an integer num, return its complement.
Example 1:
Input: num = 5
Output: 2
Explanation: The binary representation of 5 is 101 (no leading zero bits), and its complement is 010. So you need to output 2.
Example 2:
Input: num = 1
Output: 0
Explanation: The binary representation of 1 is 1 (no leading zero bits), and its complement is 0. So you need to output 0.
'''
class Solution:
def findComplement(self, num: int) -> int:
binary = bin(num)[2:]
b = ""
for bit in binary:
if bit == '1':
b += '0'
else:
b += '1'
dec = 0
for i, char in enumerate(reversed(b)):
if char == '1':
dec += (2 ** i)
return dec
|
[
"noreply@github.com"
] |
k4u5h4L.noreply@github.com
|
7bccdd943219008b9ab87f2c0d3a9f60a25927c6
|
9b20743ec6cd28d749a4323dcbadb1a0cffb281b
|
/03_Linear_Algebra_for_Machine_Learning/04/05_vector_division.py
|
7a5cbfcb42a4a5db9dfe2635df84d2ce76f0ddf3
|
[] |
no_license
|
jggrimesdc-zz/MachineLearningExercises
|
6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178
|
ee265f1c6029c91daff172b3e7c1a96177646bc5
|
refs/heads/master
| 2023-03-07T19:30:26.691659
| 2021-02-19T08:00:49
| 2021-02-19T08:00:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
# vector division
from numpy import array
# define first vector
a = array([1, 2, 3])
print(a)
# define second vector
b = array([1, 2, 3])
print(b)
# divide vectors
c = a / b
print(c)
|
[
"jgrimes@jgrimes.tech"
] |
jgrimes@jgrimes.tech
|
47010cc8029ce26f087bc5af210729e2ad8964d0
|
6062dc6c23a4013a879617cd9dd8d60fba582964
|
/day23/machine.py
|
16384f67fcfce888fea5c9cb2095b5fec6a57bfc
|
[] |
no_license
|
grey-area/advent-of-code-2017
|
8134a1213e69460e24a821ff96e38cbc7f83b480
|
87c213277e4535fff0a1dcf7ad26e182e20b8165
|
refs/heads/master
| 2020-04-13T05:38:36.852721
| 2018-12-30T23:31:00
| 2018-12-30T23:31:00
| 162,997,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,514
|
py
|
from collections import defaultdict
from collections import namedtuple
from collections import deque
import operator
from functools import partial
Instruction = namedtuple('Instruction', ['op', 'args'])
class Machine():
def __init__(self, filename):
self.registers = defaultdict(int)
self.load_program(filename)
self.ip = 0
self.terminated = False
self.mul_called = 0
def cast(self, X):
try:
return int(X)
except ValueError:
return self.registers[X]
def sub(self, X, Y):
self.registers[X] = self.registers[X] - self.cast(Y)
def mul(self, X, Y):
self.registers[X] = self.registers[X] * self.cast(Y)
self.mul_called += 1
def jnz(self, X, Y):
if self.cast(X) != 0:
self.ip += self.cast(Y) - 1
def set(self, X, Y):
self.registers[X] = self.cast(Y)
def load_program(self, filename):
ops = {}
self.program = []
ops['jnz'] = self.jnz
ops['set'] = self.set
ops['sub'] = self.sub
ops['mul'] = self.mul
with open(filename) as f:
text = f.read().splitlines()
for line in text:
op_str, *args = line.split(' ')
self.program.append(Instruction(ops[op_str], args))
def step(self):
op, args = self.program[self.ip]
op(*args)
self.ip += 1
if self.ip < 0 or self.ip >= len(self.program):
self.terminated = True
|
[
"andrew@awebb.info"
] |
andrew@awebb.info
|
7084db3062d66581c38fbdc43d86b9d20a9172c9
|
4926667354fa1f5c8a93336c4d6e2b9f6630836e
|
/1534.py
|
13cc2b14de3bdc53a7c3d07c0f26668d8b35111d
|
[] |
no_license
|
nascarsayan/lintcode
|
343b3f6e7071479f0299dd1dd1d8068cbd7a7d9e
|
4da24b9f5f182964a1bdf4beaa8afc17eb7a70f4
|
refs/heads/master
| 2021-07-13T12:31:45.883179
| 2020-07-20T02:27:53
| 2020-07-20T02:27:53
| 185,825,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 825
|
py
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: root of a tree
@return: head node of a doubly linked list
"""
def treeToDoublyList(self, root):
# Write your code here.
def recurse(root):
if root is None:
return (None, None)
st, fl = root, root
if root.left is not None:
lst, lfl = recurse(root.left)
lfl.right = root
root.left = lfl
st = lst
if root.right is not None:
rst, rfl = recurse(root.right)
root.right = rst
rst.left = root
fl = rfl
return (st, fl)
if root is None:
return None
hd, tl = recurse(root)
hd.left = tl
tl.right = hd
return hd
|
[
"nascarsayan@iitkgp.ac.in"
] |
nascarsayan@iitkgp.ac.in
|
e07f624ea0d255df65ac483eff918d2f319b22b5
|
afea9757be324c8def68955a12be11d71ce6ad35
|
/willyanealves/services/migrations/0009_auto_20201209_1404.py
|
b2e2a364f136f17ebd91a275a705f8061d4ef9ea
|
[] |
no_license
|
bergpb/willyane-alves
|
c713cac3ec3a68005f3b8145985693d2477ba706
|
8b2b9922ba35bf2043f2345228f03d80dbd01098
|
refs/heads/master
| 2023-02-10T19:57:50.893172
| 2021-01-11T16:17:14
| 2021-01-11T16:17:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
# Generated by Django 3.1.2 on 2020-12-09 17:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('services', '0008_auto_20201209_1400'),
]
operations = [
migrations.AlterField(
model_name='service',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Valor'),
),
]
|
[
"jocsadm@gmail.com"
] |
jocsadm@gmail.com
|
101feda1a0f140f3e9c0891e6c61e0269a85ac2e
|
dda862418770f3885256d96e9bdb13d0759c5f43
|
/codeforces/div-2/nastya-and-rice.py
|
a2c25afd65589336a3210b2dd8ff1e66d0aefc44
|
[
"MIT"
] |
permissive
|
bellatrixdatacommunity/data-structure-and-algorithms
|
d56ec485ebe7a5117d4922caeb0cd44c5dddc96f
|
d24c4001a797c12347973263a0f4f98939e86900
|
refs/heads/master
| 2022-12-03T00:51:07.944915
| 2020-08-13T20:30:51
| 2020-08-13T20:30:51
| 270,268,375
| 4
| 0
|
MIT
| 2020-08-13T20:30:53
| 2020-06-07T10:19:36
|
Python
|
UTF-8
|
Python
| false
| false
| 3,299
|
py
|
"""
[A. Nastya and Rice](https://codeforces.com/contest/1341/problem/A)
time limit per test1 second
memory limit per test256 megabytes
inputstandard input
outputstandard output
Nastya just made a huge mistake and dropped a whole package of rice on the floor. Mom will come soon. If she sees this,
then Nastya will be punished.
In total, Nastya dropped 𝑛 grains. Nastya read that each grain weighs some integer number of grams from 𝑎−𝑏 to 𝑎+𝑏,
inclusive (numbers 𝑎 and 𝑏 are known), and the whole package of 𝑛 grains weighs from 𝑐−𝑑 to 𝑐+𝑑 grams, inclusive
(numbers 𝑐 and 𝑑 are known). The weight of the package is the sum of the weights of all 𝑛 grains in it.
Help Nastya understand if this information can be correct. In other words, check whether each grain can have such a
mass that the 𝑖-th grain weighs some integer number 𝑥𝑖 (𝑎−𝑏≤𝑥𝑖≤𝑎+𝑏), and in total they weigh from 𝑐−𝑑 to 𝑐+𝑑,
inclusive (𝑐−𝑑≤∑𝑖=1𝑛𝑥𝑖≤𝑐+𝑑).
Input
The input consists of multiple test cases. The first line contains a single integer 𝑡 (1≤𝑡≤1000) — the number of test
cases.
The next 𝑡 lines contain descriptions of the test cases, each line contains 5 integers: 𝑛 (1≤𝑛≤1000) — the number of
grains that Nastya counted and 𝑎,𝑏,𝑐,𝑑 (0≤𝑏<𝑎≤1000,0≤𝑑<𝑐≤1000) — numbers that determine the possible weight of
one grain of rice (from 𝑎−𝑏 to 𝑎+𝑏) and the possible total weight of the package (from 𝑐−𝑑 to 𝑐+𝑑).
Output
For each test case given in the input print "Yes", if the information about the weights is not inconsistent, and print
"No" if 𝑛 grains with masses from 𝑎−𝑏 to 𝑎+𝑏 cannot make a package with a total mass from 𝑐−𝑑 to 𝑐+𝑑.
Example
inputCopy
5
7 20 3 101 18
11 11 10 234 2
8 9 7 250 122
19 41 21 321 10
3 10 8 6 1
outputCopy
Yes
No
Yes
No
Yes
Note
In the first test case of the example, we can assume that each grain weighs 17 grams, and a pack 119 grams, then really
Nastya could collect the whole pack.
In the third test case of the example, we can assume that each grain weighs 16 grams, and a pack 128 grams, then really
Nastya could collect the whole pack.
In the fifth test case of the example, we can be assumed that 3 grains of rice weigh 2, 2, and 3 grams, and a pack is 7
grams, then really Nastya could collect the whole pack.
In the second and fourth test cases of the example, we can prove that it is impossible to determine the correct weight
of all grains of rice and the weight of the pack so that the weight of the pack is equal to the total weight of all collected grains.
"""
import sys
if __name__ == "__main__":
input = sys.stdin.read()
data = list(map(int, input.split()))
T = int(data[0])
it = 1
while T > 0:
n = data[it]
a = data[it + 1]
b = data[it + 2]
c = data[it + 3]
d = data[it + 4]
mini = c - d
maxi = c + d
min_rice = mini / n if n != 0 else 0
max_rice = maxi / n if n != 0 else 0
if max_rice < (a - b) or min_rice > (a + b):
print("No")
else:
print("Yes")
it += 5
T -= 1
|
[
"adityaraman96@gmail.com"
] |
adityaraman96@gmail.com
|
edfa698a2b59a1f3f4933f667ae163d842cb428d
|
f06ddca5258290a1e7448a18e1d24a9d20226fbd
|
/pytext/common/constants.py
|
3b0c31b01f7bc57811441b3f5a267b920e948602
|
[
"BSD-3-Clause"
] |
permissive
|
mruberry/pytext
|
6d64bc37429e3dd5581e5b3b6bf60bd216b6f445
|
3bba58a048c87d7c93a41830fa7853896c4b3e66
|
refs/heads/master
| 2022-07-16T07:41:47.781126
| 2020-05-14T04:52:35
| 2020-05-14T04:54:33
| 263,892,770
| 2
| 0
|
NOASSERTION
| 2020-05-14T11:11:33
| 2020-05-14T11:11:32
| null |
UTF-8
|
Python
| false
| false
| 2,357
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from enum import Enum
class DatasetFieldName:
DOC_LABEL_FIELD = "doc_label"
WORD_LABEL_FIELD = "word_label"
UTTERANCE_FIELD = "utterance"
TEXT_FIELD = "word_feat"
SEQ_FIELD = "seq_word_feat"
DICT_FIELD = "dict_feat"
RAW_DICT_FIELD = "sparsefeat"
CHAR_FIELD = "char_feat"
DENSE_FIELD = "dense_feat"
CONTEXTUAL_TOKEN_EMBEDDING = "contextual_token_embedding"
DOC_WEIGHT_FIELD = "doc_weight"
WORD_WEIGHT_FIELD = "word_weight"
RAW_WORD_LABEL = "raw_word_label"
TOKEN_INDICES = "token_indices"
TOKEN_RANGE = "token_range"
TOKENS = "tokens"
LANGUAGE_ID_FIELD = "lang"
SEQ_LENS = "seq_lens"
TARGET_SEQ_LENS = "target_seq_lens"
RAW_SEQUENCE = "raw_sequence"
SOURCE_SEQ_FIELD = "source_sequence"
TARGET_SEQ_FIELD = "target_sequence"
NUM_TOKENS = "num_tokens"
class PackageFileName:
SERIALIZED_EMBED = "pretrained_embed_pt_serialized"
RAW_EMBED = "pretrained_embed_raw"
class DFColumn:
DOC_LABEL = "doc_label"
WORD_LABEL = "word_label"
UTTERANCE = "text"
ALIGNMENT = "alignment"
DICT_FEAT = "dict_feat"
DENSE_FEAT = "dense_feat"
RAW_FEATS = "raw_feats"
MODEL_FEATS = "model_feats"
DOC_WEIGHT = "doc_weight"
WORD_WEIGHT = "word_weight"
TOKEN_RANGE = "token_range"
LANGUAGE_ID = "lang"
SOURCE_SEQUENCE = "source_sequence"
CONTEXT_SEQUENCE = "context_sequence"
TARGET_SEQUENCE = "target_sequence"
SOURCE_FEATS = "source_feats"
TARGET_TOKENS = "target_tokens"
SEQLOGICAL = "seqlogical"
TARGET_PROBS = "target_probs"
TARGET_LOGITS = "target_logits"
TARGET_LABELS = "target_labels"
class Padding:
WORD_LABEL_PAD = "PAD_LABEL"
WORD_LABEL_PAD_IDX = 0
DEFAULT_LABEL_PAD_IDX = -1
class VocabMeta:
UNK_TOKEN = "<unk>"
UNK_NUM_TOKEN = f"{UNK_TOKEN}-NUM"
PAD_TOKEN = "<pad>"
EOS_TOKEN = "</s>"
INIT_TOKEN = "<s>"
PAD_SEQ = "<pad_seq>"
EOS_SEQ = "</s_seq>"
INIT_SEQ = "<s_seq>"
class BatchContext:
IGNORE_LOSS = "ignore_loss"
INDEX = "row_index"
TASK_NAME = "task_name"
class Stage(Enum):
TRAIN = "Training"
EVAL = "Evaluation"
TEST = "Test"
OTHERS = "Others"
class RawExampleFieldName:
ROW_INDEX = "row_index"
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
4fcdf50c43cf0c0a802c7899882d88c66afb5521
|
e70b678712a355a0b51632728c7781b0bdcf29f4
|
/Algorithms/Python/Best-Time-to-Buy-and-Sell-Stock.py
|
aa4097ebb2db64fb2c8d11bb08368e8d97f353a7
|
[] |
no_license
|
keyi/Leetcode_Solutions
|
b3e3c6835ed335d7d4ad53a1b37e59ac15fcf3af
|
69e4e969b435ff2796bd7c4b5dad9284a853ab54
|
refs/heads/master
| 2020-05-21T23:36:20.450053
| 2018-11-11T03:45:28
| 2018-11-11T03:45:28
| 33,714,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if len(prices) < 2:
return 0
ans, minNum = 0, prices[0]
for i in range(1, len(prices)):
if prices[i] > minNum:
ans = max(prices[i] - minNum, ans)
else:
minNum = prices[i]
return ans
|
[
"yike921012@gmail.com"
] |
yike921012@gmail.com
|
cdbd67b1a12b3c7320da2aafaa87a06508e9b4de
|
5ef19fdf04970ed0481ff29234a11b812b55a257
|
/OS/SRT.py
|
89a0d94dba43339334289d424480551cfb9d8b02
|
[] |
no_license
|
priyamshah112/Study
|
636bfadee2384b39399b8b2c03349c9faf8853df
|
2ea4341d8497573b014a5275d548289696fe3662
|
refs/heads/master
| 2021-06-28T15:19:24.002518
| 2020-10-02T07:36:24
| 2020-10-02T07:36:24
| 149,157,682
| 0
| 0
| null | 2020-10-02T07:36:25
| 2018-09-17T16:48:41
|
Java
|
UTF-8
|
Python
| false
| false
| 1,762
|
py
|
class Process:
def __init__(self, p_no, at, bt,wt,tat,nt,ct,rt):
self.p_no = p_no
self.at = at
self.bt = bt
self.wt =wt
self.tat =tat
self.nt =nt
self.ct=ct
self.rt=rt
def Shift(alist):
alist.sort(key=lambda x:x.rt)
return alist
def main():
n=int(input("Enter number of processes : "))
q=1
pt = []
chart = []
queue=[]
time=0
ap=0 #arrived processes
rp=0 #ready processes
done=0
start=0
avgwt=0
avgtat=0
avgnt=0
for i in range(0,n):
pt.insert(i,Process(i,int(input("Enter Arrival Time : ")),int(input("Enter Burst Time :")),0.0,0.0,0.0,0,0))
pt[i].rt=pt[i].bt
while(done<n):
for i in range(ap,n):
if time>=pt[i].at:
queue.append(pt[i])
ap+=1
rp+=1
if rp<1:
chart.append(pt[0].p_no)
time+=1
continue
if start:
queue = Shift(queue)
if queue[0].rt > 0:
for g in range(time, time+q):
chart.append(queue[0].p_no)
time+=q
queue[0].rt-=q
else:
pt[queue[0].p_no].ct=time
queue.pop(0)
done+=1
rp-=1
start=1
print(chart)
for i in range(0,n):
pt[i].tat = pt[i].ct-pt[i].at
avgtat+=pt[i].tat
pt[i].wt = pt[i].tat - pt[i].bt
avgwt+=pt[i].wt
pt[i].nt = pt[i].tat / pt[i].bt
avgnt+=pt[i].nt
print("Process no.\t AT\t BT\t WT\t TAT\t NT\t CT\t")
for i in range(0,n):
print(str(pt[i].p_no)+" \t\t "+str(pt[i].at)+" \t "+str(pt[i].bt)+" \t "+str(round(pt[i].wt,2))+" \t "+str(round(pt[i].tat,2))+" \t "+str(round(pt[i].nt,2))+" \t "+str(pt[i].ct))
print("Average Waiting time",avgwt/n)
print("Average TAT",avgtat/n)
print("Average Normalized Time",avgnt/n)
main()
|
[
"priyamshah112@gmail.com"
] |
priyamshah112@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.