blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0d93842ad2b9ac62adc555b2ee2d299216b6633c
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_5/gdljam001/mymath.py
|
16fc26c21b16d5f866005d3e2ae831c8a1999015
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
#get Integer
#James Godlonton
#12 April 2014
def get_integer(choice):
ans=input("Enter "+choice+":\n")
while not ans.isdigit():
ans=input("Enter "+choice+":\n")
return eval(ans)
def calc_factorial(x):
retVal=1
for i in range(1,x+1):
retVal=retVal*i
return retVal
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
7f8d9c571f666fc5e9028d7c61e5a706929ef742
|
ab5392cc0fc17cdc3feca3eb7b32e79b6be60ef7
|
/hrp/internal/scaffold/templates/testcases/demo_ref_testcase_test.py
|
714030cdab14a9b2a13fba8363f41bb01715bb3d
|
[
"Apache-2.0"
] |
permissive
|
Jason-Fu/HttpRunner
|
3ad5def3e5aa49f073d8cef75e75a4654ac3ec38
|
69495c9eb3e19eaf6f7af438b6a0437050f32315
|
refs/heads/master
| 2022-06-04T07:28:47.570096
| 2022-05-05T13:52:01
| 2022-05-05T13:52:01
| 195,747,138
| 0
| 0
| null | 2019-07-08T06:06:36
| 2019-07-08T06:06:36
| null |
UTF-8
|
Python
| false
| false
| 1,678
|
py
|
# NOTE: Generated By HttpRunner v4.0.0
# FROM: testcases/demo_ref_testcase.yml
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent))
from httprunner import HttpRunner, Config, Step, RunRequest, RunTestCase
from testcases.demo_requests_test import TestCaseDemoRequests as DemoRequests
class TestCaseDemoRefTestcase(HttpRunner):
config = (
Config("request methods testcase: reference testcase")
.variables(
**{
"foo1": "testsuite_config_bar1",
"expect_foo1": "testsuite_config_bar1",
"expect_foo2": "config_bar2",
}
)
.base_url("https://postman-echo.com")
.verify(False)
)
teststeps = [
Step(
RunTestCase("request with functions")
.with_variables(
**{"foo1": "testcase_ref_bar1", "expect_foo1": "testcase_ref_bar1"}
)
.call(DemoRequests)
.export(*["foo3"])
),
Step(
RunRequest("post form data")
.with_variables(**{"foo1": "bar1"})
.post("/post")
.with_headers(
**{
"User-Agent": "funplugin/${get_version()}",
"Content-Type": "application/x-www-form-urlencoded",
}
)
.with_data("foo1=$foo1&foo2=$foo3")
.validate()
.assert_equal("status_code", 200)
.assert_equal("body.form.foo1", "bar1")
.assert_equal("body.form.foo2", "bar21")
),
]
if __name__ == "__main__":
TestCaseDemoRefTestcase().test_start()
|
[
"mail@debugtalk.com"
] |
mail@debugtalk.com
|
a3422ab27af13b221285949eb7b8f385e80b3318
|
7cc0e0ae806a4f580100a1ae0d120cab37ccddff
|
/Atividades1/At1Q45.py
|
a7ef1b6a4c6ab72526f6383ee1548fc08c44860c
|
[] |
no_license
|
osmarsalesjr/AtividadesProfFabioGomesEmPython3
|
2ac10cebb7887798a39d9029fe205619f3fd481a
|
a8f2536e34ed8897011536135a1937689d6c3144
|
refs/heads/master
| 2021-01-01T06:43:21.301461
| 2017-07-17T16:22:09
| 2017-07-17T16:22:09
| 97,496,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,108
|
py
|
def main():
valor = float(input("Qual o Valor a Sacar em R$? "))
conta_notas(valor)
def conta_notas(valor):
notas_cem, notas_cinquenta, notas_vinte, notas_dez = 0, 0, 0, 0
notas_cinco, notas_dois, notas_um = 0, 0, 0
while valor >= 100:
valor = valor - 100
notas_cem = notas_cem + 1
while valor >= 50:
valor = valor - 50
notas_cinquenta = notas_cinquenta + 1
while valor >= 20:
valor = valor - 20
notas_vinte = notas_vinte + 1
while valor >= 10:
valor = valor - 10
notas_dez = notas_dez + 1
while valor >= 5:
valor = valor - 5
notas_cinco = notas_cinco + 1
while valor >= 2:
valor = valor - 2
notas_dois = notas_dois + 1
notas_um = int(valor)
print(">> Quantidade de notas a serem recebidas: ")
print("Notas de R$ 100: %d\nNotas de R$ 50: %d\nNotas de R$ 20: %d"%(notas_cem, notas_cinquenta, notas_vinte))
print("Notas de R$ 10: %d\nNotas de R$ 5: %d\nNotas de R$ 1: %d"%(notas_dez, notas_cinco, notas_um))
if __name__ == '__main__':
main()
|
[
"osmarsalesjr@gmail.com"
] |
osmarsalesjr@gmail.com
|
bccdece2cd099fc2dc4805d24691218c61918883
|
55a2a89fe752dc72ca353c091a47628c830e4117
|
/classification/tests/test_classifier.py
|
3a92278b2c73de077df8f2a1e59980057667a367
|
[
"Apache-2.0"
] |
permissive
|
hercules261188/serverless-transformers-on-aws-lambda
|
28df74886154906494f7315298d534000f784b41
|
d48caab0e07ae8326d4b37ab730faf2cffd02f7d
|
refs/heads/master
| 2023-07-09T01:37:49.042169
| 2021-08-20T08:39:46
| 2021-08-20T08:39:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
from src.classifier import Classifier
pipeline = Classifier()
def test_response(requests, response):
assert response == pipeline(requests)
|
[
"noreply@github.com"
] |
hercules261188.noreply@github.com
|
2d0d1a621fa5ff4224c806a23ae0828c5a4408ae
|
999f3f3da1cb70cb5872f99a09d65d7c4df71cf7
|
/src/data/1061.py
|
51dbb3f209cfabf9eaecdccf40b8420c755a3ff3
|
[
"MIT"
] |
permissive
|
NULLCT/LOMC
|
0f0d1f01cce1d5633e239d411565ac7f0c687955
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
refs/heads/main
| 2023-07-27T17:03:46.703022
| 2021-09-04T08:58:45
| 2021-09-04T08:58:45
| 396,290,713
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,168
|
py
|
import sys
input = sys.stdin.readline
from collections import defaultdict, deque
def II():
return int(input())
def IS():
return input().rstrip()
def MI():
return map(int, input().split())
def LI():
return list(map(int, input().split()))
def MS():
return input().rstrip().split()
def LS():
return list(input().rstrip())
n, Q = MI()
d = defaultdict(list)
for i in range(n - 1):
a, b = MI()
a -= 1
b -= 1
d[a].append(b)
d[b].append(a)
def biper(n: int, d: defaultdict(list)):
seen = [0] * n
parity = [0] * n
q = deque()
q.append((0, 0))
while q:
v, p = q.pop()
if seen[v] == 0:
seen[v] = 1
parity[v] = p
else:
continue
for to in d[v]:
if seen[to] == 0:
if p == 0:
q.appendleft((to, 1))
else:
q.appendleft((to, 0))
else:
continue
return parity
l = biper(n, d)
# print(l)
for i in range(Q):
C, D = MI()
C -= 1
D -= 1
if l[C] == l[D]:
print('Town')
else:
print('Road')
|
[
"cockatiel.u10@gmail.com"
] |
cockatiel.u10@gmail.com
|
429d79300bcd0ab017f398e7e54a70ad643630b9
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_2_neat/16_0_2_jimmy17_B.py
|
02e02091c0d5cf8bb9357e91027a4f4de051b7df
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 556
|
py
|
tests = int(raw_input())
for i in range(tests):
result = 0
pancakes = raw_input()
state = pancakes[0]
has_changed = False
plus_exists = False
for ch in pancakes:
if ch != state:
has_changed = True
if state == '+':
plus_exists = True
result += 2
else:
if not plus_exists:
result += 1
state = ch
if has_changed == False and state == '-':
result = 1
print 'Case #'+ str(i+1)+': '+ str(result)
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
feaa101319f2ec0937022c8f51000b88ddd84e02
|
78ca13fcd5a7556227136814703d2536880591b7
|
/list_iteration/py/py3_list_iter_02.py
|
dace7dcba9da6b3a7164190c1d1bebfe56b02e4a
|
[] |
no_license
|
dheerajs0346/PYPL
|
24a7658607eb905e012d49f86a66216b37398918
|
ebfadd75d44ef17e78e4cf1daf8f9e2f66c5ee30
|
refs/heads/master
| 2023-03-10T04:56:37.056033
| 2021-02-17T18:50:14
| 2021-02-17T18:50:14
| 375,725,102
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
## list iteration methods in Py
## author: Vladimir Kulyukin
lst = [1, 2, 3, 4]
## use lazy range from 0 upto the length of lst - 1.
## index into a specific spot in lst.
for i in range(0, len(lst)):
print(lst[i])
print('i=', i) # i is bound to its last value in for-loop
## the above for-loop and print produce the following output:
## 1
## 2
## 3
## 4
## i= 3
|
[
"vladimir.kulyukin@gmail.com"
] |
vladimir.kulyukin@gmail.com
|
57a1603b2f96aa4b651f5e650ca820a55bba227b
|
7bdca6fb5f972586efcfb379cded7a5e3891d27c
|
/pymaster/tests/test_masking_flat.py
|
f056e2a287de98f84cf497b0d467039f9c08d1e2
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
LSSTDESC/NaMaster
|
0c31705a5ca5d57d0ad8e7af80dc071811cdfc81
|
b45317f840320855b7e38799fa411782e2886289
|
refs/heads/master
| 2023-08-11T11:57:13.177268
| 2023-08-01T12:11:21
| 2023-08-01T12:11:21
| 142,736,704
| 47
| 25
|
BSD-3-Clause
| 2023-08-01T12:11:23
| 2018-07-29T06:29:34
|
C
|
UTF-8
|
Python
| false
| false
| 2,280
|
py
|
import pytest
import numpy as np
import pymaster as nmt
class MaskingTesterFlat(object):
def __init__(self):
self.nx = self.ny = 200
self.lx = self.ly = np.radians(10.)
self.msk = np.zeros([self.ny, self.nx])
self.msk[:self.ny//2, :] = 1.
self.aposize = 1.
self.inv_xthr = 1./np.radians(self.aposize)
self.ioff = self.ny//2-int(np.radians(self.aposize)/(self.ly/self.ny))
MT = MaskingTesterFlat()
def test_mask_flat_errors():
with pytest.raises(ValueError): # Badly shaped input
nmt.mask_apodization_flat(MT.msk[0], MT.lx,
MT.ly, MT.aposize,
apotype="C1")
with pytest.raises(RuntimeError): # Negative apodization
nmt.mask_apodization_flat(MT.msk, MT.lx, MT.ly,
-MT.aposize, apotype="C1")
with pytest.raises(RuntimeError): # Wrong apodization type
nmt.mask_apodization_flat(MT.msk, MT.lx, MT.ly,
MT.aposize, apotype="C3")
def test_mask_flat_c1():
msk_apo = nmt.mask_apodization_flat(MT.msk, MT.lx, MT.ly,
MT.aposize, apotype="C1")
# Below transition
assert (msk_apo[MT.ny//2:, :] < 1E-10).all()
# Above transition
assert (np.fabs(msk_apo[:MT.ioff, :]-1.) < 1E-10).all()
# Within transition
ind_transition = np.arange(MT.ioff, MT.ny//2, dtype=int)
x = MT.inv_xthr*np.fabs((MT.ny/2.-ind_transition)*MT.ly/MT.ny)
f = x-np.sin(x*2*np.pi)/(2*np.pi)
assert (np.fabs(msk_apo[ind_transition, :] - f[:, None])
< 1E-10).all()
def test_mask_flat_c2():
msk_apo = nmt.mask_apodization_flat(MT.msk, MT.lx,
MT.ly, MT.aposize,
apotype="C2")
# Below transition
assert (msk_apo[MT.ny//2:, :] < 1E-10).all()
# Above transition
assert (np.fabs(msk_apo[:MT.ioff, :]-1.) < 1E-10).all()
# Within transition
ind_transition = np.arange(MT.ioff, MT.ny//2, dtype=int)
x = MT.inv_xthr*np.fabs((MT.ny/2.-ind_transition)*MT.ly/MT.ny)
f = 0.5*(1-np.cos(x*np.pi))
assert (np.fabs(msk_apo[ind_transition, :] -
f[:, None]) < 1E-10).all()
|
[
"noreply@github.com"
] |
LSSTDESC.noreply@github.com
|
e84af738a00cae662826ffe2c63e25e47325c97c
|
e854337c828f355a1c916c3adffcff56b069e4c2
|
/flights/migrations/0003_passenger.py
|
175a8cd40a837cff8c71430748aedc3938a81ab1
|
[] |
no_license
|
viralsir/djangoProject_evening
|
a6c90e4e4a1d689de53f46d53366db0a7fc410f5
|
1fb05e66df1373c8bf23e7344f388f9ace1ea7cf
|
refs/heads/master
| 2023-04-24T11:54:41.338160
| 2021-05-21T12:15:52
| 2021-05-21T12:15:52
| 361,741,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
# Generated by Django 3.2 on 2021-05-10 12:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flights', '0002_auto_20210510_1715'),
]
operations = [
migrations.CreateModel(
name='passenger',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=40)),
('flights', models.ManyToManyField(related_name='passenger', to='flights.flight')),
],
),
]
|
[
"viralsir2018@gmail.com"
] |
viralsir2018@gmail.com
|
23153ae789f6cb8f400fa348bf266dffb1b07264
|
c56ddcc2807151a5c44d3a1d65a1984bc8fd9b84
|
/4 кю/Strip Comments.py
|
ac850d313c2f546cb6f74af406e94c9aa4b26948
|
[] |
no_license
|
kelpasa/Code_Wars_Python
|
2cd18dd404603a6535887e8e6ed2d08da19562ba
|
939ec1dd08ffc7939bb9a139bf42901d6f24fbdd
|
refs/heads/master
| 2022-12-17T02:00:28.319351
| 2020-09-23T09:11:20
| 2020-09-23T09:11:20
| 246,642,898
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
import re
def solution(s, markers):
if not markers:
return s.strip()
pattern = re.compile(
' *[{}].*\n'
.format(''.join([m if m not in '-^' else '\\' + m for m in markers]))
)
return re.sub(pattern, '\n', s + '\n')[:-1]
|
[
"noreply@github.com"
] |
kelpasa.noreply@github.com
|
5b76f8005aa35ad3b95eaf1d9c01c4b33046a647
|
ef198b8a5625626773351ca8de3da6bd7969be25
|
/0x0F-python-object_relational_mapping/1-filter_states.py
|
d90b3afd9dde3c46e9169e77c1a160d564a5f998
|
[] |
no_license
|
fdetun/holbertonschool-higher_level_programming
|
efde2762a55066f9c571a3f6ea4b724af96be6a8
|
3733fc52fddab9df3bc51e6ea2905dad0eefe9ae
|
refs/heads/master
| 2022-12-26T10:28:15.381013
| 2020-09-26T02:12:48
| 2020-09-26T02:12:48
| 259,254,129
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
#!/usr/bin/python3
"""
upper N
"""
import MySQLdb as mdb
import sys
def byfoued():
"""
function by foued
"""
fdb = mdb.connect(host='localhost',
port=3306,
user=sys.argv[1],
passwd=sys.argv[2],
db=sys.argv[3]
)
cursor = fdb.cursor()
cursor.execute("""SELECT * FROM states WHERE name
LIKE BINARY 'N%' ORDER BY id ASC""")
fd = cursor.fetchall()
for i in fd:
print(i)
cursor.close()
fdb.close()
if __name__ == "__main__":
byfoued()
|
[
"fouedads@gmail.com"
] |
fouedads@gmail.com
|
3203eda17fc7fab819b461afd5e033a342bf2850
|
eb215d9387a8aa006fbb3c1339cf34bdba82ec32
|
/app/controllers/Favorites.py
|
064bbdbc6dc74d744312d9c547e1e99eb9cd98b8
|
[] |
no_license
|
elliotsyoung/crime-dodger
|
ba7d98e593e650c3937565bd47a95fdca7129203
|
268fa82250c98ab1765eed547a515d08c581911a
|
refs/heads/master
| 2021-01-22T05:27:45.296838
| 2016-09-01T22:30:43
| 2016-09-01T22:30:43
| 67,058,461
| 0
| 0
| null | 2016-08-31T17:22:26
| 2016-08-31T17:22:26
| null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
from system.core.controller import *
class Favorites(Controller):
def __init__(self, action):
super(Favorites, self).__init__(action)
self.load_model('Crime')
self.load_model('Favorite')
self.load_model('User')
self.db = self._app.db
def edit(self,id):
favorite=self.models['Favorite'].get_favorite(id)
return self.load_view('/favorites/edit.html',favorite=favorite[0])
|
[
"wapniarski@gmail.com"
] |
wapniarski@gmail.com
|
be4a55087db33754f407e24b68ea1708c532ea30
|
89b45e528f3d495f1dd6f5bcdd1a38ff96870e25
|
/pyneng/exercises/09_functions/task_9_1.py
|
f3fea8eb574faa2713a4238fc517d66e031c07fd
|
[] |
no_license
|
imatyukin/python
|
2ec6e712d4d988335fc815c7f8da049968cc1161
|
58e72e43c835fa96fb2e8e800fe1a370c7328a39
|
refs/heads/master
| 2023-07-21T13:00:31.433336
| 2022-08-24T13:34:32
| 2022-08-24T13:34:32
| 98,356,174
| 2
| 0
| null | 2023-07-16T02:31:48
| 2017-07-25T22:45:29
|
Python
|
UTF-8
|
Python
| false
| false
| 3,458
|
py
|
# -*- coding: utf-8 -*-
"""
Задание 9.1
Создать функцию generate_access_config, которая генерирует конфигурацию
для access-портов.
Функция ожидает такие аргументы:
- словарь с соответствием интерфейс-VLAN такого вида:
{'FastEthernet0/12': 10,
'FastEthernet0/14': 11,
'FastEthernet0/16': 17}
- шаблон конфигурации access-портов в виде списка команд (список access_mode_template)
Функция должна возвращать список всех портов в режиме access с конфигурацией
на основе шаблона access_mode_template. В конце строк в списке не должно быть
символа перевода строки.
В этом задании заготовка для функции уже сделана и надо только продолжить писать
само тело функции.
Пример итогового списка (перевод строки после каждого элемента сделан
для удобства чтения):
[
'interface FastEthernet0/12',
'switchport mode access',
'switchport access vlan 10',
'switchport nonegotiate',
'spanning-tree portfast',
'spanning-tree bpduguard enable',
'interface FastEthernet0/17',
'switchport mode access',
'switchport access vlan 150',
'switchport nonegotiate',
'spanning-tree portfast',
'spanning-tree bpduguard enable',
...]
Проверить работу функции на примере словаря access_config
и списка команд access_mode_template.
Если предыдущая проверка прошла успешно, проверить работу функции еще раз на словаре
access_config_2 и убедиться, что в итоговом списке правильные номера интерфейсов
и вланов.
Ограничение: Все задания надо выполнять используя только пройденные темы.
"""
from pprint import pprint
access_mode_template = [
"switchport mode access",
"switchport access vlan",
"switchport nonegotiate",
"spanning-tree portfast",
"spanning-tree bpduguard enable",
]
access_config = {"FastEthernet0/12": 10, "FastEthernet0/14": 11, "FastEthernet0/16": 17}
access_config_2 = {
"FastEthernet0/03": 100,
"FastEthernet0/07": 101,
"FastEthernet0/09": 107,
}
def generate_access_config(intf_vlan_mapping, access_template):
"""
intf_vlan_mapping - словарь с соответствием интерфейс-VLAN такого вида:
{'FastEthernet0/12':10,
'FastEthernet0/14':11,
'FastEthernet0/16':17}
access_template - список команд для порта в режиме access
Возвращает список всех портов в режиме access с конфигурацией на основе шаблона
"""
cfg = []
for intf, vlan in intf_vlan_mapping.items():
cfg.append("interface " + intf)
for s in access_template:
if s.endswith('vlan'):
s = s + ' ' + str(vlan)
cfg.append(s)
return cfg
cfg = generate_access_config(access_config, access_mode_template)
pprint(cfg)
|
[
"i.matyukin@gmail.com"
] |
i.matyukin@gmail.com
|
17e86708d5b2a0476756c63ab8d0cd12a77eba92
|
610ac1da64200c109b9ac48d162058fdd85801aa
|
/initmethd1.py
|
0611c42bbb6351ad0642f65bd482c172afe1023f
|
[] |
no_license
|
rajdharmkar/Python2.7
|
3d88e7c76c92bbba7481bce7a224ccc8670b3abb
|
9c6010e8afd756c16e426bf8c3a40ae2cefdadfe
|
refs/heads/master
| 2021-05-03T18:56:36.249812
| 2019-10-08T00:17:46
| 2019-10-08T00:17:46
| 120,418,397
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,676
|
py
|
class Human:
# def __init__(self): pass # init method initializing instance variables
#
# def __str__(): pass # ??
#
# def __del__(self): pass # ??...these three def statements commented out using code>> comment with line comment..toggles
def __init__(self, name, age, gender): # self means object in the init method inline comment example
# type: (object, object, object) -> object
self.name = name # creating and assigning new variable
self.age = age
self.gender = gender
def speak_name(self): # a method defined here with a print statement to execute as well
print "my name is %s" % self.name
def speak(self, text):
# def speak(text):; without self, we get nameerror...speaks take exactly one argument given two given; gave one arg but self is implied so two counted as given
print text
def perform_math(self, operation, *args):
print "%s performed math and the result was %f" % (self.name, operation(*args))
def add(a, b):#this is an example of function not an object method and is callable everywhere
return a + b
rhea = Human('Rhea', 20, 'female')
bill = Human('William', '24',
'male') # creating new object 'bill' which is an instance of class Human with variables assignment as well
# getting saying bill is not defined
print bill.name
print bill.age
print bill.gender
bill.speak_name()
bill.speak("Love")
rhea.perform_math(add, 34,45)
# method speak_name belongs to object 'bill' here; a method call?
# method diff from function and it can be called only from the object and it bound to the class
|
[
"rajdharmkar@gmail.com"
] |
rajdharmkar@gmail.com
|
f40e095dd0a9ac20f890db612881d524f776ab90
|
6316ad09411bca6d9bdee7d1a2fa1046e753d0d5
|
/celery_app/__init__.py
|
5fc1dcf7e6cb6533ac7b33198d647e22abdf0ece
|
[] |
no_license
|
huazhz/food_master
|
a6da09adb75d303e95733069dec486a3ca7792df
|
f5a444da3fd98d4a17948265cfe863f1ea1f4746
|
refs/heads/master
| 2020-04-08T02:34:56.423886
| 2018-04-02T13:36:43
| 2018-04-02T13:36:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import redis
from celery import Celery
# from celery_app.celeryconfig import broker, backend
app = Celery('tasks')
app.config_from_object('celery_app.celeryconfig')
pool = redis.ConnectionPool(host='127.0.0.1', port=6379)
r = redis.Redis(connection_pool=pool)
|
[
"huangkwell@163.com"
] |
huangkwell@163.com
|
96c00338f7ff5b13c7eeac22e83d169897ab16aa
|
853d4cec42071b76a80be38c58ffe0fbf9b9dc34
|
/venv/Lib/site-packages/nltk/corpus/reader/crubadan.py
|
70c2e82bdc0cedc69f1d70c5343d24457ed96867
|
[] |
no_license
|
msainTesting/TwitterAnalysis
|
5e1646dbf40badf887a86e125ef30a9edaa622a4
|
b1204346508ba3e3922a52380ead5a8f7079726b
|
refs/heads/main
| 2023-08-28T08:29:28.924620
| 2021-11-04T12:36:30
| 2021-11-04T12:36:30
| 424,242,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,622
|
py
|
# Natural Language Toolkit: An Crubadan N-grams Reader
#
# Copyright (C) 2001-2021 NLTK Project
# Author: Avital Pekker <avital.pekker@utoronto.ca>
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
An NLTK interface for the n-gram statistics gathered from
the corpora for each language using An Crubadan.
There are multiple potential applications for the data but
this reader was created with the goal of using it in the
context of language identification.
For details about An Crubadan, this data, and its potential uses, see:
http://borel.slu.edu/crubadan/index.html
"""
import re
from os import path
from nltk.corpus.reader import CorpusReader
from nltk.data import ZipFilePathPointer
from nltk.probability import FreqDist
class CrubadanCorpusReader(CorpusReader):
"""
A corpus reader used to access language An Crubadan n-gram files.
"""
_LANG_MAPPER_FILE = "table.txt"
_all_lang_freq = {}
def __init__(self, root, fileids, encoding="utf8", tagset=None):
super().__init__(root, fileids, encoding="utf8")
self._lang_mapping_data = []
self._load_lang_mapping_data()
def lang_freq(self, lang):
"""Return n-gram FreqDist for a specific language
given ISO 639-3 language code"""
if lang not in self._all_lang_freq:
self._all_lang_freq[lang] = self._load_lang_ngrams(lang)
return self._all_lang_freq[lang]
def langs(self):
"""Return a list of supported languages as ISO 639-3 codes"""
return [row[1] for row in self._lang_mapping_data]
def iso_to_crubadan(self, lang):
"""Return internal Crubadan code based on ISO 639-3 code"""
for i in self._lang_mapping_data:
if i[1].lower() == lang.lower():
return i[0]
def crubadan_to_iso(self, lang):
"""Return ISO 639-3 code given internal Crubadan code"""
for i in self._lang_mapping_data:
if i[0].lower() == lang.lower():
return i[1]
def _load_lang_mapping_data(self):
"""Load language mappings between codes and description from table.txt"""
if isinstance(self.root, ZipFilePathPointer):
raise RuntimeError(
"Please install the 'crubadan' corpus first, use nltk.download()"
)
mapper_file = path.join(self.root, self._LANG_MAPPER_FILE)
if self._LANG_MAPPER_FILE not in self.fileids():
raise RuntimeError("Could not find language mapper file: " + mapper_file)
with open(mapper_file, encoding="utf-8") as raw:
strip_raw = raw.read().strip()
self._lang_mapping_data = [row.split("\t") for row in strip_raw.split("\n")]
def _load_lang_ngrams(self, lang):
"""Load single n-gram language file given the ISO 639-3 language code
and return its FreqDist"""
if lang not in self.langs():
raise RuntimeError("Unsupported language.")
crubadan_code = self.iso_to_crubadan(lang)
ngram_file = path.join(self.root, crubadan_code + "-3grams.txt")
if not path.isfile(ngram_file):
raise RuntimeError("No N-gram file found for requested language.")
counts = FreqDist()
with open(ngram_file, encoding="utf-8") as f:
for line in f:
data = line.split(" ")
ngram = data[1].strip("\n")
freq = int(data[0])
counts[ngram] = freq
return counts
|
[
"msaineti@icloud.com"
] |
msaineti@icloud.com
|
04e2128404e809a729d91ded71472deff19e7274
|
533f86815dcded10183f623b6ddd552fadd9e38c
|
/Lesson_08/hw8/DmitryBirulin_DZ_magic_v2.py
|
3dd1343f0e280ffb969253a832174f2d555acaae
|
[] |
no_license
|
DoctorSad/_Course
|
8ab81db218cd9a0bfefb118094912c53b11256d4
|
da5ba4d6904910be033241e3b68c846e883a24fa
|
refs/heads/main
| 2023-04-15T10:26:27.294706
| 2021-05-06T08:59:38
| 2021-05-06T08:59:38
| 351,796,720
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,506
|
py
|
"""
Реализуйте игру Magic (hw3/magic.py) с некоторыми дополнениями.
1. При запуске, программа спрашивает имя игрока.
2. В словаре player_data хранить данные игрока и актуализировать их после
каждой сыгранной игры. Оперировать такими данными:
name - имя игрока
games - общее количество сыграных игр
record - рекордное количество попыток (минимальное)
avg_attempts - среднее количество попыток за игру
3. При выходе из программы данные игрока записывать в файл (txt либо json).
**4. При запуске программы, после ввода имени пользователем, читать файл,
если данные об игроке есть в файле то загружать их в player_data.
"""
import random
from pathlib import Path
import json
def main():
BASE_DIR = Path(__file__).resolve().parent
FILES_DIR = BASE_DIR / "Files"
FILES_DIR.mkdir(exist_ok=True)
file_path = FILES_DIR / "player_data.json"
player_data = {}
print('Введите имя игрока: ')
player_name = input()
with open(file_path) as f:
player_data_in = json.load(f)
for i in player_data_in:
if i['name'] == player_name:
player_data = i
new_player = False
break
else:
player_data = {'name': player_name, 'games': 0, 'record': 9999, 'avg_attempts': float(1)}
new_player = True
while True:
tmp_data = magic()
player_data_out = []
player_data['games'] += 1
if player_data['record'] > tmp_data['record']:
player_data['record'] = tmp_data['record']
player_data['avg_attempts'] = round(((player_data['avg_attempts'] * (player_data['games'] - 1) +
tmp_data['attempts']) / player_data['games']), 2)
if new_player:
player_data_in.append(player_data)
player_data_out = player_data_in
else:
for i in player_data_in:
if i['name'] != player_name:
player_data_out.append(i)
else:
player_data_out.append(player_data)
out = input(' Continue (Y/n)?: ')
if out == 'n':
print('Bye!')
break
with open(file_path, "w") as f:
data = json.dumps(player_data_out, indent=4)
f.write(data)
def magic() -> dict:
while True:
count = 1
guess = None
record = 1000000
print('Введите нижний диапазон: ', end='')
min_ = input()
print('Введите верхний диапазон: ', end='')
max_ = input()
try:
min_ = int(min_)
max_ = int(max_)
except ValueError:
print('Вы не ввели диапазоны с типом <int>')
else:
magic_number = random.randint(min_, max_)
while not guess:
print('Введите число: ', end='')
try:
number = int(input())
except ValueError:
print('Вы не ввели число типа <int>')
break
if number > magic_number:
print('Вы ввели число которое больше рандомного')
count += 1
elif number < magic_number:
print('Вы ввели число которое меньше рандомного')
count += 1
elif number == magic_number:
print('Вы угадали рандомное число <', magic_number, '>. Использовано попыток: ', count)
if count < record:
print('Вы установили новый рекорд!')
record = count
guess = 1
return {'record': record, 'attempts': count}
if __name__ == "__main__":
main()
|
[
"DoctorSad@gmail.com"
] |
DoctorSad@gmail.com
|
d10f037bb1bef97f1a9452fcdb59b25bcd316a5b
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_35/344.py
|
223d826084a19fea614cdec9902fde6de1a8a235
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,314
|
py
|
#vim: fileencoding=utf-8 :
import sys, string
sinks=[]
alpha, map, matrix=None, None, None
MAX_LATTITUDE=20000
def read_n():
return [int(n) for n in sys.stdin.readline().split()]
def read_map():
h,w=read_n()
map=[]
for j in range(h):
map.append(read_n())
assert len(map)==h
return h,w,map
def main():
for casenum in range(read_n()[0]):
one_case(casenum)
def one_case(casenum):
global alpha, ialpha, matrix, map, W, H
H,W,map=read_map()
ialpha=alpha_iter()
matrix=make_matrix(W,H)
alpha=ialpha.next()
for i in range(H):
for j in range(W):
flow(i, j, [])
print 'Case #%d:' % (casenum+1)
print_matrix()
def flow(i, j, trails):
global alpha, ialpha
#print_matrix()
try:
mark(i,j,alpha)
except ValueError:
#print 'meet drainage basins'
# 같은 drainage basins를 만났다
v=matrix[i][j]
for row,col in trails:
matrix[row][col]=v
return
smallest=map[i][j]
dir=None
dirs=[
('N', (i-1, j)),
('W', (i, j-1)),
('E', (i, j+1)),
('S', (i+1, j)),
]
for d, (row, col) in dirs:
if (row>=0 and col>=0) and (row<H and col<W):
#print 'W, H, row, col, len(map), len(map[0])', W, H, row, col, len(map), len(map[0])
altitude=map[row][col]
if altitude < smallest:
#print 'd, altitude, smallest', d, altitude, smallest
smallest=altitude
dir=d
if dir:
row,col=dict(dirs)[dir]
#print 'dir, row, col: ', dir, row, col
trails.append((i,j))
flow(row, col, trails)
else:
sinks.append((i,j))
alpha=ialpha.next()
def mark(row, col, alpha):
assert row>=0 and col>=0
#print 'matrix size, row, col', len(matrix), len(matrix[row]), row, col
if matrix[row][col]!=0:
raise ValueError('already marked')
matrix[row][col]=alpha
def make_matrix(w, h):
return [[0]*w for i in xrange(h)]
def alpha_iter():
for c in string.lowercase:
yield c
def print_matrix():
for i in range(len(matrix)):
for j in range(len(matrix[i])):
print matrix[i][j],
print
if __name__=='__main__':
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
25e5950f6d51c06a9351d4a52d1452dd0ff8a617
|
056adbbdfb968486ecc330f913f0de6f51deee33
|
/200-number-of-islands/number-of-islands.py
|
3137ef84af9b6aa64b412c25fb441d919cf4a809
|
[] |
no_license
|
privateHmmmm/leetcode
|
b84453a1a951cdece2dd629c127da59a4715e078
|
cb303e610949e953b689fbed499f5bb0b79c4aea
|
refs/heads/master
| 2021-05-12T06:21:07.727332
| 2018-01-12T08:54:52
| 2018-01-12T08:54:52
| 117,215,642
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,358
|
py
|
# -*- coding:utf-8 -*-
# Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
#
# Example 1:
# 11110110101100000000
# Answer: 1
# Example 2:
# 11000110000010000011
# Answer: 3
#
# Credits:Special thanks to @mithmatt for adding this problem and creating all test cases.
class Solution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
"""
# BFS approach
n = len(grid)
if n == 0: return 0
m = len(grid[0])
if m == 0: return 0
ans = 0
step = [(-1, 0), (1, 0), (0, -1), (0, 1)]
queue = [-1 for i in range(0, n*m)]
def floodfill(i, j):
h = 0
queue[0] = [i, j]
grid[i][j] = '0'
r = 1 # [h, r)
while h<r:
ii, jj = queue[h]
h +=1
for s in step:
newi = ii + s[0]
newj = jj + s[1]
if 0<=newi<n and 0<=newj<m and grid[newi][newj] == '1':
grid[newi][newj] = '0'
queue[r] = [newi, newj]
r +=1
for i in range(0, n):
for j in range(0,m):
if grid[i][j] == '1':
ans +=1
floodfill(i, j)
return ans
"""
"""
# union-find
n = len(grid)
if n == 0: return 0
m = len(grid[0])
if m == 0: return 0
def merge(x, y):
fx = getfa(x)
fy = getfa(y)
if fx != fy:
fa[fx] = fy
def getfa(x):
if fa[x]!=x:
fa[x]=getfa(fa[x])
return fa[x]
step = [(1, 0), (0, 1)]
fa = range(0, n*m)
for i in range(0, n):
for j in range(0, m):
if grid[i][j]=='1':
for s in step:
newI = i + s[0]
newJ = j + s[1]
if 0<=newI<n and 0<=newJ<m and grid[newI][newJ]=='1':
merge(i*m+j, newI*m+newJ)
Set = set()
for i in range(0, n):
for j in range(0, m):
if grid[i][j] == '1':
Set.add(getfa(i*m+j))
return len(Set)
"""
# DFS
m = len(grid)
if m == 0: return 0
n = len(grid[0])
if n == 0: return 0
steps = [(-1, 0), (1, 0), (0, -1), (0, 1)]
def DFS(i, j):
grid[i][j] = '0'
for s in steps:
newI = i + s[0]
newJ = j + s[1]
if 0<=newI<m and 0<=newJ<n and grid[newI][newJ] == '1':
DFS(newI, newJ)
res = 0
for i in range(0, m):
for j in range(0, n):
if grid[i][j] == '1':
res +=1
DFS(i, j)
return res
|
[
"hyan90@ucsc.edu"
] |
hyan90@ucsc.edu
|
0e789989d27f3d2625ed474126c16466417e6a8b
|
91781e25054b55850fe982add76da1dd709e1218
|
/manage.py
|
961816f726429a4d7a1c0b618036c142a2d92248
|
[] |
no_license
|
bloogrox/ssp-cabinet
|
5b026bb3f392aec60c6a70bf2cb3d4d162a4b237
|
7de03ed35fc603305d07446b8ecd97811c1d9bb2
|
refs/heads/master
| 2021-01-01T06:41:00.628110
| 2017-12-14T13:59:41
| 2017-12-14T13:59:41
| 97,483,236
| 1
| 2
| null | 2018-12-18T20:40:25
| 2017-07-17T14:05:01
|
Python
|
UTF-8
|
Python
| false
| false
| 813
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cabinet.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"bloogrox@gmail.com"
] |
bloogrox@gmail.com
|
5ea470a7c5f0251219964dad00a091892d220849
|
7b6930e5bf5af256cc6ddff3d5a6656e3dddce84
|
/generate_data.py
|
d3c386ccde85e652c23b76502d7cd33ee7867a7f
|
[] |
no_license
|
isabella232/de_tech_test_pyspark
|
40a9800a781195fcad36d771f304f39fdac8be7f
|
6a00b38584453f5f933bb18ddc7eaf521d923235
|
refs/heads/main
| 2023-06-26T03:31:29.939619
| 2021-07-08T14:27:28
| 2021-07-08T14:27:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,436
|
py
|
import random
from functools import partial
import csv
from itertools import product
import string
import csv
from datetime import datetime
import os
def generate_data(size=1000000):
"""
Generates csvs in root directory. The csvs are:
main/test_data.csv
- field1, field2, field3, field4 are random ints in range(1,20)
- val1, val2, val3 are random floats
Args:
size (int, optional): The number of rows required in the
main test data csv.
Raises:
FileExistsError: Raised if a file has already been generated
with today's date.
"""
def _randomly_nullify(series, n):
"Replaces n entires in series with None"
indices = random.choices(range(size),k=n)
return [v if i not in indices else None for i,v in enumerate(series)]
date = datetime.today().strftime('%Y-%m-%d')
part_choices = partial(random.choices, range(1,20), k=size)
field1 = _randomly_nullify(
part_choices(weights=[i**2/2 for i in range(1,20)]), 5
) # end weighted
field2 = _randomly_nullify(
part_choices(weights=[(20-i)/i for i in range(1,20)]), 30
) # start weighted
field3 = part_choices(weights=[1/(1+abs(i - 10)) for i in range(1,20)]) # mid weighted
field4 = part_choices() # uniform
val1 = (random.gauss(1000, 100) for i in range(size)) # normal random
val2 = (random.random()*1000*i if i else 0 for i in field1) # random correlated with field1
val3 = _randomly_nullify(
[random.random()*1000*i for i in field4],10
) # random correlated with field4
combined = zip(field1, field2, field3, field4, val1, val2, val3)
path = os.path.join(os.getcwd(), f'data/main/{date}/test_data.csv')
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'x', newline='') as f:
writer = csv.writer(f)
writer.writerow(['field1','field2','field3','field4','val1','val2','val3'])
writer.writerows(combined)
# lookup csv
field = [i for i in range(1,20) if i != 10]
group = product(field, field)
lookup = list([x, y, random.choice(string.ascii_letters)] for x,y in group)
try:
with open('data/lookup.csv', 'x', newline='') as f:
writer = csv.writer(f)
writer.writerow(['field1','f2','lookup_val'])
writer.writerows(lookup)
except FileExistsError:
pass
|
[
"noreply@github.com"
] |
isabella232.noreply@github.com
|
43ff85cc266c6409377138647e481859ad1febf5
|
fd40d6375ddae5c8613004a411341f0c984e80d5
|
/examples/websites.py
|
f23bd681715e78801378f8fd3435768abb9d6654
|
[
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
ieaves/tenzing
|
93c3353e62621c90adefc5a174a2dcde9aacbc46
|
92d39c1c3a5633d8074e0ffe8c2687c465aebbc8
|
refs/heads/master
| 2020-04-25T07:14:31.388737
| 2020-01-07T02:51:13
| 2020-01-07T02:51:13
| 172,608,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
import pandas as pd
from visions.core.implementations import visions_complete_set
from visions.core.functional import type_cast, type_inference
from visions.core.summaries.summary import CompleteSummary
# Load dataset
df = pd.read_csv(
"https://raw.githubusercontent.com/berkmancenter/url-lists/master/lists/et.csv",
parse_dates=["date_added"],
)
# Type
typeset = visions_complete_set()
# Type inference
inferred_types = type_inference(df, typeset)
print(inferred_types)
# Type cast
cast_df, cast_types = type_cast(df, typeset)
print(cast_types)
# Summarization
summary = CompleteSummary()
summaries = summary.summarize(cast_df, cast_types)
for key, variable_summary in summaries["series"].items():
print(key, variable_summary)
|
[
"ian.k.eaves@gmail.com"
] |
ian.k.eaves@gmail.com
|
1667b6219a909476a37dc1d9dc14e719e3cd0678
|
5a7f0462e4b37fffa42840024d5b185bd22d8358
|
/entry_point_inspector/group.py
|
11ff9ecf0593f4b83b2df8739c87a17c5c3f253e
|
[
"Apache-2.0"
] |
permissive
|
dhellmann/entry_point_inspector
|
a9884406e5f50277172a3c67154bfc5170714f33
|
77dc43a76feef9fffe6a2edbc1970593d2afc5ae
|
refs/heads/master
| 2021-06-12T18:36:30.693175
| 2021-04-12T20:30:46
| 2021-04-12T20:30:46
| 11,693,466
| 19
| 9
|
Apache-2.0
| 2021-04-12T20:24:08
| 2013-07-26T19:43:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,527
|
py
|
import logging
from cliff import lister
import pkg_resources
LOG = logging.getLogger(__name__)
class GroupList(lister.Lister):
"""Shows the groups for which plugins are available.
"""
def take_action(self, parsed_args):
names = set()
for dist in pkg_resources.working_set:
LOG.debug('checking distribution "%s"', dist)
entry_map = pkg_resources.get_entry_map(dist)
names.update(set(entry_map.keys()))
return (
('Name',),
((n,) for n in sorted(names)),
)
class GroupShow(lister.Lister):
"""Shows the members of a specific group.
"""
def get_parser(self, prog_name):
p = super(GroupShow, self).get_parser(prog_name)
p.add_argument(
'group',
help='the name of the group to show',
)
return p
def take_action(self, parsed_args):
results = []
for ep in pkg_resources.iter_entry_points(parsed_args.group):
try:
ep.load()
except Exception as err:
load_error = str(err) # unicode?
else:
load_error = ''
attr = '.'.join(ep.attrs)
results.append((
ep.name,
ep.module_name,
attr,
str(ep.dist), # unicode?
load_error,
))
return (
('Name', 'Module', 'Member', 'Distribution', 'Error'),
results,
)
|
[
"doug.hellmann@gmail.com"
] |
doug.hellmann@gmail.com
|
d8c3cc3d8f58fa5679b00e83bfc4695e814c6d89
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2928/60717/271819.py
|
69abec6b11d324214d2dc8fd5938d954d5d43f48
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
v=int(input())
list1=input().split()
for i in range(0,9):
list1[i]=int(list1[i])
if min(list1)>v:
print(-1)
else:
n=int(v/min(list1))
tmp=min(list1)
index=list1.index(tmp)
list2=[index+1 for i in range(0,n)]
index+=1
while index<9:
summ=0
for i in range(0,len(list2)):
summ+=list1[list2[i]-1]
for i in range(0,len(list2)):
if summ-list1[list2[i]-1]+list1[index]<=v:
summ=summ-list1[list2[i]-1]+list1[index]
list2[i]=index+1
index+=1
output=''
for i in range(0,len(list2)):
output+=str(list2[i])
print(output)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
4bcfb0710a767c789b9a4f612cf464628ec5fe42
|
26e4bea46942b9afa5a00b9cde9a84f2cc58e3c9
|
/thinkpython/ex4_md5sum.py
|
741e74eb3f7cbcf1f3796406663c928922b3ff7e
|
[] |
no_license
|
MeetLuck/works
|
46da692138cb9741a913d84eff6822f107510dc7
|
ab61175bb7e2ed5c5113bf150e0541ae18eb04c4
|
refs/heads/master
| 2020-04-12T05:40:25.143075
| 2017-08-21T17:01:06
| 2017-08-21T17:01:06
| 62,373,576
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,046
|
py
|
''' Exercise 4 '''
# 1. Write a program that searches a directory and all of its subdirectories,
# recursively, and returns a list of complete paths for all files with a given suffix(like mp3)
# 2. To recognize duplicates, you can use md5sum to compute a 'checksum' for each files.
# If two files have the same checksum, they probably have the same contents.
# 3. To duble-check, you can use the Unix command diff
import os
def walk(dirname):
''' Finds the names of all files in dirname and its sub-directories. '''
names = list()
for name in os.listdir(dirname):
path = os.path.join(dirname, name)
if os.path.isfile(path):
names.append(path)
else:
names.extend( walk(path) )
return names
def pipe(cmd):
''' Runs a command in a subprocess
Returns (res,stat), the output of the subprocess and the exit status
'''
fp = os.popen(cmd)
res = fp.read()
stat = fp.close()
assert stat is None
return res, stat
def compute_checksum(filename):
''' Computes the MD5 checksum of the contents of a file '''
cmd = 'md5sum ' + filename
return pipe(cmd)
def check_diff(name1,name2):
''' Computes the difference between the contents of two files
name1, name2 : string of filenames
'''
cmd = 'diff %s %s' %(name1,name2)
return pipe(cmd)
def compute_checksums(dirname, suffix):
''' Computes checksums for all files with the given suffix
dirname: string name of directory to search
suffix: string suffix to match
Returns map from checksum to list of files with that checksum
'''
names = walk(dirname)
d = dict()
for name in names:
if name.endswith(suffix):
res, stat = compute_checksum(name)
checksum, _ = res.split()
if checksum in d:
d[checksum].append(name)
else:
d[checksum] = [name]
return d
def check_pairs(names):
''' Checks whether any in a list of files differs from others
names: [ list of string filenames ]
'''
for name1 in names:
for name2 in names:
if name1 < name2:
res, stat = check_diff(name1,name2)
if res:
return False
return True
def print_duplicates(d):
''' checks for duplicate files
Reports any files with the same checksum and checks whether they are, in fact, identical
d: map from checksum to list of files with that checksum
'''
for key, names in d.iteritems():
print key, names
if len(names) > 1:
print 'The following files have the same checksum:'
for name in names:
print name
if check_pairs(names):
print 'And they are identical'
if __name__ == '__main__':
# dirname = os.path.abspath(__file__)
dirname = os.path.dirname(__file__)
print dirname
d = compute_checksums(dirname=dirname, suffix='.py')
print_duplicates(d)
|
[
"withpig1994@hanmail.net"
] |
withpig1994@hanmail.net
|
a751ae94d464f1373dde689a9c0e8df70e5cfa1c
|
d7261ed9957df0bb1f832778201f39edc3dffc49
|
/bot_redis/storeges.py
|
a5d9e6aeb7b8d5b7fc32b84cacd2f1d968f4df3c
|
[] |
no_license
|
unitorzero/SearchBot
|
915bcd50803029a2322ed33593079dc93bf379ff
|
396163468b3223894c862d83f46165fd9e26477f
|
refs/heads/master
| 2022-12-11T06:24:33.944342
| 2019-11-26T11:50:01
| 2019-11-26T11:50:01
| 224,178,376
| 0
| 0
| null | 2022-06-21T23:34:32
| 2019-11-26T11:40:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,102
|
py
|
from db.bot_orm.tables.user import User
import logging
class Users:
log = logging
def __init__(self):
self.UserTable = User
self.users = []
self.user_ids = []
self.user_telegram_ids = []
self.admins = []
self.admin_ids = []
self.admin_telegram_ids = []
self.update()
def __str__(self):
return '\r\nusers: %s \r\n\r\nadmins: %s\r\n' % (list(map(str, self.users)), list(map(str, self.admins)))
def update(self):
self.users = self.UserTable.all()
self.admins = list(filter(lambda x: x.admin, self.users))
self.user_ids = list(map(lambda x: x.id, self.users))
self.user_telegram_ids = list(map(lambda x: x.telegram_id, self.users))
self.admin_ids = list(map(lambda x: x.id, self.admins))
self.admin_telegram_ids = list(map(lambda x: x.telegram_id, self.admins))
self.log.info('Storage Users was updated. \r\n%s' % self)
return self
def get_user_by_telegram_id(self, _id):
items = list(filter(lambda x: x.telegram_id == _id, self.users))
return items[0] if len(items) else False
def get_user_by_id(self, _id):
items = list(filter(lambda x: x.id == int(_id), self.users))
return items[0] if len(items) else False
def get_admin_by_id(self, _id):
items = list(filter(lambda x: x.id == _id, self.admins))
return items[0] if len(items) else False
def get_admin_by_telegram_id(self, _id):
items = list(filter(lambda x: x.telegram_id == _id, self.admins))
return items[0] if len(items) else False
def delete_user(self, _id):
User.delete(_id)
user = self.get_user_by_id(_id)
self.log.warning('User |%s| was deleted.' % user)
self.update()
return user
def set_description(self, _id, description):
User.add_description(_id, description)
self.update()
user = self.get_user_by_id(_id)
self.log.warning('User |%s| set decription %s.' % (user, description))
return user
users = Users()
|
[
"you@example.com"
] |
you@example.com
|
4dd14a31dbec97cf808271e1bfbf5baa9c2bb497
|
62154efb438d7d799cc98eba1f150679bf79f7b9
|
/mbm/string.py
|
d4ae1901fb00f49d05fab8643a4b6232b81e1e45
|
[] |
no_license
|
asmark/mafia-by-mail
|
5e4b30572741a8bae49d99bd5f1cd88c413969bb
|
2beff28a402336723e44a7ca8f31e6711747350b
|
refs/heads/master
| 2020-12-25T11:21:49.665920
| 2016-02-03T21:28:46
| 2016-02-03T21:28:46
| 51,032,830
| 0
| 0
| null | 2016-02-03T21:44:48
| 2016-02-03T21:44:48
| null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
import re
import string
class Template(string.Template):
def __init__(self, template):
super().__init__(template)
self.vars = tuple(self._extract_vars())
def _extract_vars(self):
for match in self.pattern.finditer(self.template):
if match.group('invalid') is not None:
self._invalid(m)
if match.group('escaped'):
continue
yield match.group('braced') or match.group('named')
def substitute_with_name(self, lookup=lambda name: name):
return self.substitute(**{v: lookup(v) for v in self.vars})
def reformat_lines(s):
return re.sub(r'\n+', lambda m: '\n' if len(m.group(0)) > 1 else ' ', s)
|
[
"tony@rfw.name"
] |
tony@rfw.name
|
feccae051f13771d4f3877c8a034cd5bfe92c450
|
7ee11bb1adc0f58fc4a8362be86487258e4c149a
|
/example/Schema.py
|
0968d1abc4a98ace09b1aaac38f4e33f3500e96d
|
[] |
no_license
|
eddiedb6/schema
|
416b41532e7c3726b88a8fb15197c1a61622c1bb
|
72627caa15bdc097b9f80118ffcf9965f5ff5955
|
refs/heads/master
| 2020-05-21T04:33:54.705016
| 2019-02-14T09:11:43
| 2019-02-14T09:11:43
| 65,799,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,963
|
py
|
{
SchemaConfigRoot: {
SchemaType: SchemaTypeDict,
SchemaRule: [
HasKey(Const.UI, Const.Action)
]
},
Const.UI: {
SchemaType: SchemaTypeDict,
SchemaRule: [
HasKey(Const.Name, Const.Type),
CheckAsTypeFromKey(Const.Type)
]
},
Const.UIRoot: {
SchemaType: SchemaTypeDict
},
Const.UIApp: {
SchemaType: SchemaTypeDict,
SchemaRule: [
HasKey(Const.Path)
]
},
Const.UIWeb: {
SchemaInherit: Const.UIApp
},
Const.AppRoot: {
SchemaType: SchemaTypeDict
},
Const.AppButton: {
SchemaInherit: Const.AppRoot
},
Const.AppForm: {
SchemaInherit: Const.AppRoot
},
Const.Path: {
SchemaType: SchemaTypeString,
SchemaRule: [
NotEmpty(SchemaTypeString)
]
},
Const.Name: {
SchemaType: SchemaTypeString,
SchemaRule: [
NotEmpty(SchemaTypeString)
]
},
Const.Type: {
SchemaType: SchemaTypeString,
SchemaRule: [
ValueIn(Const.UIType)
]
},
Const.SubUI: {
SchemaType: SchemaTypeArray,
SchemaRule: [
CheckForeachAsType(Const.UI)
]
},
Const.Caption: {
SchemaType: SchemaTypeString
},
Const.BreakTime: {
SchemaType: SchemaTypeInteger
},
Const.Text: {
SchemaType: SchemaTypeString
},
Const.Script: {
SchemaType: SchemaTypeString,
SchemaRule: [
NotEmpty(SchemaTypeString)
]
},
Const.Action: {
SchemaType: SchemaTypeDict
},
Const.SubAction: {
SchemaType: SchemaTypeArray,
SchemaRule: [
CheckForeachAsType(Const.Action)
]
},
Const.Ignore: {
SchemaType: SchemaTypeDict,
SchemaRule: [
IgnoreChildSchema()
]
}
}
|
[
"eddiedb6@gmail.com"
] |
eddiedb6@gmail.com
|
2034adf0328604846a83fc1c284cfefbb7834f5b
|
4f9de774a4a67ea6419bab6fbe3333028ab84fa0
|
/logic/components/velocity.py
|
a131a706cd464efba6168992a99af551707df99e
|
[] |
no_license
|
Zireael07/Flask-roguelike
|
871c7584c4df424af0ce71ad2e548539c54721ae
|
44ed1aafd3e2ff4fdf442d1b31b11cf35cf6f5d0
|
refs/heads/master
| 2020-09-27T13:12:23.227546
| 2020-01-03T14:45:39
| 2020-01-03T14:45:39
| 226,524,533
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
class Velocity():
def __init__(self, dx=0, dy=0):
self.dx = dx
self.dy = dy
# readable representation
def __str__(self):
return 'Vel(dx='+str(self.dx)+', dy='+str(self.dy)+ ')'
|
[
"zi@mail.com"
] |
zi@mail.com
|
d73758a6e4a7b56359dd8ab165025a812fdfe00e
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/429/usersdata/309/98523/submittedfiles/jogoDaVelha_BIB.py
|
077f0b29e7ef4040f1afea9f7eb009446dd540c9
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,530
|
py
|
# -*- coding: utf-8 -*-
from datetime import datetime
# autenticação do simbolo para a jogada humano
def solicitaSimboloDoHumano():
# nome=input('Qual seu nome(ou apelido)? ')
simbH= input("Qual o simbolo que você deseja utilizar no jogo? ")
while simbH!="X" and simbH!="x" and simbH!="O" and simbH!="o" :
print ("Ops! Simbolo inválido")
simbH= input("Informe um simbolo válido que deseja utilizar para a partida: X ou O : ")
return simbH
#sorteio
def sorteioPrimeiraJogada ():
now= datetime.now()
a=now.second
if a%2==0:
print("Vencedor do sorteio para inicio do jogo: Computador")
# chamar a função printartab com a jogada do computador
else:
print("Vencedor do sorteio para inicio do jogo: Jogador")
#apos disso perguntar a posição desejada e printartab
#Função para printar o tabuleiro:
def mostraTabuleiro():
print (tabuleiro[0][0] +'|'+ tabuleiro[0][1] + '|'+ tabuleiro[0][2])
print (tabuleiro[1][0] +'|'+ tabuleiro[1][1] + '|'+ tabuleiro[1][2])
print (tabuleiro[2][0] +'|'+ tabuleiro[2][1] + '|'+ tabuleiro[2][2])
#Função da jogado do humano
def jogadaHumana(nome, simbH, tabuleiro):
casa=int(input(" Qual a sua jogada, %s ?" %nome))
tabuleiro.insert(casa, simbH)
#Função para validar uma jogada
#def validarJogada():
#Função da Jogada do computador
#def jogadaComputador():
#Função que verifica o vencedor
#def verificaVencedor():
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
e71adbb986cb66140b07800f36e7086d3e5523cd
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=3.5_rd=0.5_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=4/params.py
|
8a3f119b2e020763ecccd1a111e1a000b2370760
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.524833',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 4,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
b2454946299a8748c66ae48602950df7191e9989
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/pg_1323+042/sdB_pg_1323+042_coadd.py
|
b826cd815baf19301b82379a8ff8b83c57ce8070
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[201.583167,3.965192], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_pg_1323+042/sdB_pg_1323+042_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_pg_1323+042/sdB_pg_1323+042_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
3b6c319dc0422c1182842c6d3eb219ef9dfa8e9d
|
c2566840fea739e40e8b93ba692fb861c885a82d
|
/setup.py
|
4c2f5008c1cbc9fda6636dcbe78d1f951c9d7b7a
|
[
"MIT"
] |
permissive
|
deeso/fiery-snap
|
e462fa35a2710ff5a356cef8d1bcabb398541f87
|
c95cf4cd6089ea5b15a49440c684c687e8b417e6
|
refs/heads/master
| 2021-05-05T15:09:41.071803
| 2018-03-30T13:44:51
| 2018-03-30T13:44:51
| 117,293,928
| 14
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import os
data_files = [(d, [os.path.join(d, f) for f in files])
for d, folders, files in os.walk(os.path.join('src', 'config'))]
setup(name='fiery-snap',
version='1.0',
description='connect together data flows',
author='Brad Antoniewicz, Adam Pridgen',
author_email='bantonie@cisco.com, adpridge@cisco.com',
install_requires=['toml', 'kombu', 'redis', 'validators',
'web.py', 'regex', 'python-twitter',
'bs4', 'pymongo', 'requests'],
packages=find_packages('src'),
package_dir={'': 'src'},
)
|
[
"adam.pridgen@thecoverofnight.com"
] |
adam.pridgen@thecoverofnight.com
|
119dcbef5caee3c2e569f4eeecf04414e5af7500
|
ea82f29e5e0c1a1aeac1edf4e9297a4d7d1003c1
|
/backup/Ignorância Zero-backup/Ignorância Zero/059Exercício1.py
|
76d6e417e4daacc77581efb126bfdc6490d9acb1
|
[] |
no_license
|
G-itch/Projetos
|
b66a4c41d8a073bf13dc993e71f5cddfba047f3a
|
69a503a9c62cc887c0785a8d205c653eff402826
|
refs/heads/master
| 2023-04-20T08:26:59.181315
| 2021-05-05T15:33:40
| 2021-05-05T15:33:40
| 341,940,302
| 0
| 0
| null | 2021-05-05T15:33:40
| 2021-02-24T15:13:43
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 421
|
py
|
class Quadrado(object):
def __init__(self,l):
self.lado = l
def mudar_valor(self,lado):
self.lado = lado
return self.lado
def retornar_lado(self):
return f"O lado do quadrado é igual a {self.lado}"
def área(self):
return f"A área do quadrado é igual a {self.lado*self.lado}"
Q = Quadrado(12)
# Q.mudar_valor(16)
print(Q.retornar_lado())
print(Q.área())
|
[
"enfurtini@gmail.com"
] |
enfurtini@gmail.com
|
4c76c2ebcd10489a9dd46341289ea3e1e1a33e99
|
5b93930ce8280b3cbc7d6b955df0bfc5504ee99c
|
/nodes/VanderPlas17Python/C_Chapter2/I_StructuredData/D_Onto/index.py
|
949fef6f3a6ef9e0fa59ab4d610d0ca3fdb9e857
|
[] |
no_license
|
nimra/module_gen
|
8749c8d29beb700cac57132232861eba4eb82331
|
2e0a4452548af4fefd4cb30ab9d08d7662122cf4
|
refs/heads/master
| 2022-03-04T09:35:12.443651
| 2019-10-26T04:40:49
| 2019-10-26T04:40:49
| 213,980,247
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,201
|
py
|
# Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.HierBlock import HierBlock as hbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.ListBlock import ListBlock as lbk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
blocks = [
# RecordArrays: Structured Arrays with a Twist
# NumPy also provides the np.recarray class, which is almost identical to the struc‐
# tured arrays just described, but with one additional feature: fields can be accessed as
# attributes rather than as dictionary keys. Recall that we previously accessed the ages
# by writing:
# In[15]: data['age']
# Out[15]: array([25, 45, 37, 19], dtype=int32)
# If we view our data as a record array instead, we can access this with slightly fewer
# keystrokes:
# In[16]: data_rec = data.view(np.recarray)
# data_rec.age
# Out[16]: array([25, 45, 37, 19], dtype=int32)
# The downside is that for record arrays, there is some extra overhead involved in
# accessing the fields, even when using the same syntax. We can see this here:
# In[17]: %timeit data['age']
# %timeit data_rec['age']
# %timeit data_rec.age
# 1000000 loops, best of 3: 241 ns per loop
# 100000 loops, best of 3: 4.61 µs per loop
# 100000 loops, best of 3: 7.27 µs per loop
# Whether the more convenient notation is worth the additional overhead will depend
# on your own application.
#
# On to Pandas
# This section on structured and record arrays is purposely at the end of this chapter,
# because it leads so well into the next package we will cover: Pandas. Structured arrays
# like the ones discussed here are good to know about for certain situations, especially
# in case you’re using NumPy arrays to map onto binary data formats in C, Fortran, or
# another language. For day-to-day use of structured data, the Pandas package is a
# much better choice, and we’ll dive into a full discussion of it in the next chapter.
#
#
#
#
# 96 | Chapter 2: Introduction to NumPy
#
# CHAPTER 3
# Data Manipulation with Pandas
#
#
#
#
# In the previous chapter, we dove into detail on NumPy and its ndarray object, which
# provides efficient storage and manipulation of dense typed arrays in Python. Here
# we’ll build on this knowledge by looking in detail at the data structures provided by
# the Pandas library. Pandas is a newer package built on top of NumPy, and provides an
# efficient implementation of a DataFrame. DataFrames are essentially multidimen‐
# sional arrays with attached row and column labels, and often with heterogeneous
# types and/or missing data. As well as offering a convenient storage interface for
# labeled data, Pandas implements a number of powerful data operations familiar to
# users of both database frameworks and spreadsheet programs.
# As we saw, NumPy’s ndarray data structure provides essential features for the type of
# clean, well-organized data typically seen in numerical computing tasks. While it
# serves this purpose very well, its limitations become clear when we need more flexi‐
# bility (attaching labels to data, working with missing data, etc.) and when attempting
# operations that do not map well to element-wise broadcasting (groupings, pivots,
# etc.), each of which is an important piece of analyzing the less structured data avail‐
# able in many forms in the world around us. Pandas, and in particular its Series and
# DataFrame objects, builds on the NumPy array structure and provides efficient access
# to these sorts of “data munging” tasks that occupy much of a data scientist’s time.
# In this chapter, we will focus on the mechanics of using Series, DataFrame, and
# related structures effectively. We will use examples drawn from real datasets where
# appropriate, but these examples are not necessarily the focus.
#
# Installing and Using Pandas
# Installing Pandas on your system requires NumPy to be installed, and if you’re build‐
# ing the library from source, requires the appropriate tools to compile the C and
#
#
# 97
#
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"On to Pandas",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
[self.add(a) for a in blocks]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Onto(HierNode):
def __init__(self):
super().__init__("On to Pandas")
self.add(Content())
# eof
|
[
"lawrence.mcafee@gmail.com"
] |
lawrence.mcafee@gmail.com
|
586d70d29746fed538966ba9258cfa7d67de7905
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flashblade/FB_2_7/models/array_connection_key_get_response.py
|
6960a362ef6721c2cf3a867826a91bffea1cb82f
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972
| 2023-08-25T07:40:41
| 2023-08-25T07:40:41
| 160,391,444
| 18
| 29
|
BSD-2-Clause
| 2023-09-08T09:08:30
| 2018-12-04T17:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,277
|
py
|
# coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.7, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_7 import models
class ArrayConnectionKeyGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'total_item_count': 'int',
'items': 'list[ArrayConnectionKey]'
}
attribute_map = {
'continuation_token': 'continuation_token',
'total_item_count': 'total_item_count',
'items': 'items'
}
required_args = {
}
def __init__(
self,
continuation_token=None, # type: str
total_item_count=None, # type: int
items=None, # type: List[models.ArrayConnectionKey]
):
"""
Keyword args:
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the `continuation_token` to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The `continuation_token` is generated if the `limit` is less than the remaining number of items, and the default sort is used (no sort is specified).
total_item_count (int): Total number of items after applying `filter` params.
items (list[ArrayConnectionKey])
"""
if continuation_token is not None:
self.continuation_token = continuation_token
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ArrayConnectionKeyGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ArrayConnectionKeyGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ArrayConnectionKeyGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"tlewis@purestorage.com"
] |
tlewis@purestorage.com
|
6bd956775f12f250875477f676345e6b0e234bf8
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_legion.py
|
5090b650106bfa138215037cce9864eaddf0603c
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
#calss header
class _LEGION():
def __init__(self,):
self.name = "LEGION"
self.definitions = [u'very large in number: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
1c07af723bbd25d1d1d976b25d84c6a4956b4734
|
84c9a6fb5e18741f14a55d0d737e2a556383770d
|
/venv/Lib/site-packages/w3af/plugins/audit/phishing_vector.py
|
4b0519748eb2af2346eb67a13bcaf81251014def
|
[] |
no_license
|
AravindChan96/Vulcan
|
638a1db2f84df08bc50dd76c7f142014d529fbec
|
5548a6f36f04108ac1a6ed8e707930f9821f0bd9
|
refs/heads/master
| 2022-11-05T15:05:54.224578
| 2020-06-19T20:44:14
| 2020-06-19T20:44:14
| 273,396,348
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,808
|
py
|
"""
phishing_vector.py
Copyright 2006 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from __future__ import with_statement
import w3af.core.controllers.output_manager as om
import w3af.core.data.constants.severity as severity
import w3af.core.data.parsers.parser_cache as parser_cache
from w3af.core.data.fuzzer.fuzzer import create_mutants
from w3af.core.controllers.plugins.audit_plugin import AuditPlugin
from w3af.core.data.kb.vuln import Vuln
class phishing_vector(AuditPlugin):
"""
Find phishing vectors.
:author: Andres Riancho (andres.riancho@gmail.com)
"""
TAGS = ('iframe', 'frame')
# I test this with different URL handlers because the developer may have
# blacklisted http:// and https:// but missed ftp://.
#
# I also use hTtp instead of http because I want to evade some (stupid)
# case sensitive filters
TEST_URLS = ('hTtp://w3af.org/',
'htTps://w3af.org/',
'fTp://w3af.org/',
'//w3af.org')
def audit(self, freq, orig_response, debugging_id):
"""
Find those phishing vectors!
:param freq: A FuzzableRequest
:param orig_response: The HTTP response associated with the fuzzable request
:param debugging_id: A unique identifier for this call to audit()
"""
mutants = create_mutants(freq, self.TEST_URLS)
self._send_mutants_in_threads(self._uri_opener.send_mutant,
mutants,
self._analyze_result,
debugging_id=debugging_id)
om.out.debug('Finished audit.phishing_vector (did=%s)' % debugging_id)
def _contains_payload(self, response):
"""
get_tags_by_filter is CPU-intensive (but cached whenever possible), and
we want to prevent calls to it, so we first check if the HTTP response
body contains the payloads we sent.
:param response: The HTTP response body
:return: True if the response body contains at least one of the payloads
"""
body = response.body
body = body.lower()
for test_url in self.TEST_URLS:
if test_url.lower() in body:
return True
return False
def _analyze_result(self, mutant, response):
"""
Analyze results of the _send_mutant method.
"""
if not response.is_text_or_html():
return
if self._has_bug(mutant):
return
# Performance improvement to prevent calling the CPU-expensive
# get_tags_by_filter
if not self._contains_payload(response):
return
for tag in parser_cache.dpc.get_tags_by_filter(response, self.TAGS):
# pylint: disable=E1101
src_attr = tag.attrib.get('src', None)
# pylint: enable=E1101
if src_attr is None:
continue
for url in self.TEST_URLS:
if not src_attr.startswith(url):
continue
# Vuln vuln!
desc = 'A phishing vector was found at: %s'
desc %= mutant.found_at()
v = Vuln.from_mutant('Phishing vector', desc, severity.LOW,
response.id, self.get_name(), mutant)
v.add_to_highlight(src_attr)
self.kb_append_uniq(self, 'phishing_vector', v)
break
msg = ('Performed HTTP response analysis at audit.phishing_vector URL %s,'
' HTTP response ID %s.')
args = (response.get_uri(), response.id)
om.out.debug(msg % args)
def get_long_desc(self):
"""
:return: A DETAILED description of the plugin functions and features.
"""
return """
This plugins identifies phishing vectors in web applications, a bug of
this type is found if the victim requests the URL
"http://site.tld/asd.asp?info=http://attacker.tld" and the HTTP response
contains:
...
<iframe src="http://attacker.tld">
...
"""
|
[
"aravindchan.96@gmail.com"
] |
aravindchan.96@gmail.com
|
f60c1eb3c8a6cb9f4f440d9385e77ee379db0e27
|
f594560136416be39c32d5ad24dc976aa2cf3674
|
/mmdet/models/utils/__init__.py
|
e74ba89e8c2101360d921a5f8437da48d0250e9a
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ShiqiYu/libfacedetection.train
|
bd9eb472c2599cbcb2f028fe7b51294e76868432
|
dce01651d44d2880bcbf4e296ad5ef383a5a611e
|
refs/heads/master
| 2023-07-14T02:37:02.517740
| 2023-06-12T07:42:00
| 2023-06-12T07:42:00
| 245,094,849
| 732
| 206
|
Apache-2.0
| 2023-06-12T07:42:01
| 2020-03-05T07:19:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,809
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as, sigmoid_geometric_mean
from .normed_predictor import NormedConv2d, NormedLinear
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean',
'preprocess_panoptic_gt', 'DyReLU',
'get_uncertain_point_coords_with_randomness', 'get_uncertainty'
]
|
[
"noreply@github.com"
] |
ShiqiYu.noreply@github.com
|
52c36b71575238f0e602221a8231091745c3f7e7
|
63e2bed7329c79bf67279f9071194c9cba88a82c
|
/SevOneApi/python-client/test/test_flow_device_mapping_dto.py
|
f5a0fb8a90151ec40ab8e9ae1adadd21f084d03a
|
[] |
no_license
|
jsthomason/LearningPython
|
12422b969dbef89578ed326852dd65f65ab77496
|
2f71223250b6a198f2736bcb1b8681c51aa12c03
|
refs/heads/master
| 2021-01-21T01:05:46.208994
| 2019-06-27T13:40:37
| 2019-06-27T13:40:37
| 63,447,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 955
|
py
|
# coding: utf-8
"""
SevOne API Documentation
Supported endpoints by the new RESTful API # noqa: E501
OpenAPI spec version: 2.1.18, Hash: db562e6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.flow_device_mapping_dto import FlowDeviceMappingDto # noqa: E501
from swagger_client.rest import ApiException
class TestFlowDeviceMappingDto(unittest.TestCase):
"""FlowDeviceMappingDto unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFlowDeviceMappingDto(self):
"""Test FlowDeviceMappingDto"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.flow_device_mapping_dto.FlowDeviceMappingDto() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"johnsthomason@gmail.com"
] |
johnsthomason@gmail.com
|
04bf6220e402915f8d1332a869e2c2ba6796980f
|
1dd7fecaa182c1d7a29460dc5385066b68bcf676
|
/Inheritance - example 1/inheritance_ex_1.py
|
c18d95665de1909c9d64948bc046485bd709560e
|
[] |
no_license
|
mainka1f/PythonUtilities
|
f081df31e6ea4311d4973ef7ba6bc0ff6be75fb1
|
f310d088a7a7a5f2c95c27cba3a7985207568d62
|
refs/heads/master
| 2021-12-02T19:21:11.915510
| 2012-05-01T21:43:57
| 2012-05-01T21:43:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,670
|
py
|
# Can a class talk to another class?
#
# DWB 052808
def func1(value):
print " From func1, value is ", value
class Class1():
class1Data=5
print " This is Class1(), class1Data = ", class1Data
def testPrint1(self):
print " 888888 This is def testPrint1"
# call_2_from_1=Class2()
class Class2(Class1):
def __init__(self,print_value):
self.print_value=print_value
self.class2Data=15
self.class3Data=100
if self.print_value == 1:
print " print_value = %d " % self.print_value
# if self.print_value == 2:
# print " print_value = %d, class2Data = " % (self.print_value, class2Data)
class2Data=10
func1(3)
# def testPrint(self):
# print " *** Inside testPrint, class2Data = ", self.class2Data
# print ' *** Inside testPrint, class3Data = ', self.class3Data
# if self.print_value == 2:
# print " print_value = %d, class2Data = " % (self.print_value, class2Data)
print " This is Class2(), class2Data =", class2Data
# call_1_from_2=Class1()
# call_1_from_2.testPrint1()
# class1Data is not defined within the scope of Class2
# print " class1Data before instantiate Class2 = ",class1Data
# Proof you can instantiate Class1 from within Class2 even tho Class2 also inherits from Class1
# testclass=Class1()
# testclass.testPrint1()
# print " class1Data after instantiate Class2 = ",class1Data
print " This is the third print statement"
#print " *** class2Data = ", class2Data
class2=Class2(1)
class22=Class2(2)
#class22.testPrint()
class2.testPrint1()
class22.testPrint1()
|
[
"dwbarne@gmail.com"
] |
dwbarne@gmail.com
|
2d9fa1e779bb18f4c5a6cdc8952046e41c32841e
|
cabe35a027a4c26a6360f60b00b176235d79c98b
|
/others/13.py
|
89cefbd8b028eab64906e16e7c34a8e66aab9aae
|
[] |
no_license
|
rishikant42/Python-TheHardWay
|
e3ac9c903be5065277095827a7e31662a1d56cbf
|
5c1c7ff6c376627bc6b1abf1fc7a8d7f3ef40176
|
refs/heads/master
| 2022-07-23T15:12:32.572778
| 2022-06-25T10:29:52
| 2022-06-25T10:29:52
| 70,502,885
| 0
| 1
| null | 2017-02-26T12:51:23
| 2016-10-10T15:42:42
|
Python
|
UTF-8
|
Python
| false
| false
| 282
|
py
|
# recursive approach
def fact1(n):
if n == 1:
return 1
else:
return n * fact1(n-1)
# iterative approach
def fact2(n):
res = 1
for i in range(1,n+1):
res *= i
return res
print fact1(6)
print fact2(6)
import math
print math.factorial(6)
|
[
"rksbtp@gmail.com"
] |
rksbtp@gmail.com
|
9ec48f0b256ba4be2a48b69990488c3634b4b7d3
|
5b1ff6054c4f60e4ae7315db9f20a334bc0b7634
|
/APC20/consts.py
|
a80a1506e7a792c468d852b8ff12a2d980fc556d
|
[] |
no_license
|
maratbakirov/AbletonLive9_RemoteScripts
|
2869122174634c75405a965401aa97a2dae924a1
|
4a1517c206353409542e8276ebab7f36f9bbd4ef
|
refs/heads/master
| 2021-06-05T14:38:27.959025
| 2021-05-09T11:42:10
| 2021-05-09T11:42:10
| 13,348,327
| 3
| 4
| null | 2016-10-16T13:51:11
| 2013-10-05T16:27:04
|
Python
|
UTF-8
|
Python
| false
| false
| 184
|
py
|
#Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/APC20/consts.py
MANUFACTURER_ID = 71
ABLETON_MODE = 65
NOTE_MODE = 67
|
[
"julien@julienbayle.net"
] |
julien@julienbayle.net
|
66d338740fdb20e03fa286110358ce655f6f26da
|
e7031386a884ae8ed568d8c219b4e5ef1bb06331
|
/ram/serializers.py
|
b87c62ef863a93dcee31c43b9f95f7700fede589
|
[] |
no_license
|
ikbolpm/ultrashop-backend
|
a59c54b8c4d31e009704c3bf0e963085477092cf
|
290fa0ecdad40ec817867a019bff2ce82f08d6fe
|
refs/heads/dev
| 2022-11-30T21:49:17.965273
| 2020-09-24T10:16:12
| 2020-09-24T10:16:12
| 147,561,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
from rest_framework import serializers
from .models import Ram
class RamSerializer(serializers.ModelSerializer):
class Meta:
model = Ram
fields = [
'id',
'generation',
]
|
[
"ikbolpm@gmail.com"
] |
ikbolpm@gmail.com
|
9a55141c5b1f4d7d1780699da7390a14e77ea789
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-1/23833417cfe83723f088bea08e63844cae6f1121-<convert_dense_weights_data_format>-fix.py
|
69a055d556bb6f7f95b4123fc14c444fa5fbfd70
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,728
|
py
|
def convert_dense_weights_data_format(dense, previous_feature_map_shape, target_data_format='channels_first'):
'Utility useful when changing a convnet\'s `data_format`.\n\n When porting the weights of a convnet from one data format to the other,\n if the convnet includes a `Flatten` layer\n (applied to the last convolutional feature map)\n followed by a `Dense` layer, the weights of that `Dense` layer\n should be updated to reflect the new dimension ordering.\n\n # Arguments\n dense: The target `Dense` layer.\n previous_feature_map_shape: A shape tuple of 3 integers,\n e.g. `(512, 7, 7)`. The shape of the convolutional\n feature map right before the `Flatten` layer that\n came before the target `Dense` layer.\n target_data_format: One of "channels_last", "channels_first".\n Set it "channels_last"\n if converting a "channels_first" model to "channels_last",\n or reciprocally.\n '
assert (target_data_format in {'channels_last', 'channels_first'})
(kernel, bias) = dense.get_weights()
for i in range(kernel.shape[1]):
if (target_data_format == 'channels_first'):
(c, h, w) = previous_feature_map_shape
original_fm_shape = (h, w, c)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (2, 0, 1))
else:
(h, w, c) = previous_feature_map_shape
original_fm_shape = (c, h, w)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (1, 2, 0))
kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
dense.set_weights([kernel, bias])
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
c382de0265f1f1ec89213eb1b3fdfd2e350d1205
|
eeb469954b768095f2b8ad2376f1a114a3adb3fa
|
/399.py
|
5b8d40d1b0b9c2f4aa9770d6b7d9621200cdd1c4
|
[
"MIT"
] |
permissive
|
RafaelHuang87/Leet-Code-Practice
|
ef18dda633932e3cce479f7d5411552d43da0259
|
7754dcee38ffda18a5759113ef06d7becf4fe728
|
refs/heads/master
| 2020-07-18T20:09:10.311141
| 2020-02-11T09:56:39
| 2020-02-11T09:56:39
| 206,305,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
class Solution:
def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
import collections
def dfs(x, y, graph, visited):
if x not in graph or y not in graph:
return -1
if x == y: return 1
for n in graph[x]:
if n in visited:
continue
visited.add(n)
d = dfs(n, y, graph, visited)
if d != -1:
return graph[x][n] * d
return -1
graph = collections.defaultdict(dict)
for (x, y), val in zip(equations, values):
graph[x][y] = val
graph[y][x] = 1.0 / val
return [dfs(x, y, graph, set()) for x, y in queries]
|
[
"rafaelhuang@163.com"
] |
rafaelhuang@163.com
|
05bf8a2b61905ec7be7a20f74f1fac33bed20718
|
6a3af6fe669b2e17db1fa7d0751cbc4e04948079
|
/fn_utilities/fn_utilities/components/utilities_xml_transformation.py
|
4045c55e0ea7cf1fa36decd6a8d1b5931e341ba4
|
[
"MIT"
] |
permissive
|
jjfallete/resilient-community-apps
|
5f0a728fe0be958acc44d982bf0289959f84aa20
|
2e3c4b6102555517bad22bf87fa4a06341714166
|
refs/heads/master
| 2022-04-17T13:20:36.961976
| 2020-04-13T07:03:54
| 2020-04-13T07:03:54
| 169,295,943
| 1
| 0
|
MIT
| 2020-04-13T07:03:56
| 2019-02-05T19:06:57
|
Python
|
UTF-8
|
Python
| false
| false
| 3,313
|
py
|
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
"""Function implementation"""
import logging
from lxml import etree
import os
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'utilities_xml_transformation"""
XML_DIR = "xml_stylesheet_dir"
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts.get("fn_utilities", {})
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get("fn_utilities", {})
@function("utilities_xml_transformation")
def _utilities_xml_transformation_function(self, event, *args, **kwargs):
"""Function: Perform a transformation of an xml document based on a given stylesheet"""
try:
# Get the function parameters:
xml_source = kwargs.get("xml_source") # text
xml_stylesheet = kwargs.get("xml_stylesheet") # text
validate_fields(("xml_source", "xml_stylesheet"), kwargs)
# confirm that our required parameter exists and is a directory
if not (self.options.get(FunctionComponent.XML_DIR) and os.path.isdir(self.options.get(FunctionComponent.XML_DIR))):
raise ValueError("missing or incorrectly specified configuration property: {}".format(FunctionComponent.XML_DIR))
log = logging.getLogger(__name__)
log.info("xml_source: %s", xml_source)
log.info("xml_stylesheet: %s", xml_stylesheet)
# get the stylesheet
stylesheet = os.path.join(self.options.get(FunctionComponent.XML_DIR), xml_stylesheet)
if not (os.path.exists(stylesheet) and os.path.isfile(stylesheet)):
raise ValueError("stylesheet file not found: {}".format(stylesheet))
yield StatusMessage("starting...")
parser = etree.XMLParser(ns_clean=True, recover=True, encoding="utf-8")
# read xsl file
xsl = open(stylesheet, mode="rb").read()
xsl_root = etree.fromstring(xsl, parser=parser)
transform = etree.XSLT(xsl_root)
# read xml
xml_root = etree.fromstring(xml_source.encode("utf-8"), parser=parser)
# transform xml with xslt
transformation_doc = transform(xml_root)
# return transformation result
result = etree.tostring(transformation_doc)
results = {
"content": result.decode("utf-8")
}
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception:
yield FunctionError()
def validate_fields(fieldList, kwargs):
"""
ensure required fields are present. Throw ValueError if not
:param fieldList:
:param kwargs:
:return: no return
"""
for field in fieldList:
if field not in kwargs or kwargs.get(field) == '':
raise ValueError('Required field is missing or empty: '+field)
|
[
"hpyle@us.ibm.com"
] |
hpyle@us.ibm.com
|
2f50e3dff496deca069c7a1bdc2f9db071839fe6
|
fc29ccdcf9983a54ae2bbcba3c994a77282ae52e
|
/Leetcode_By_Topic/backtrack-037.py
|
86ec511949455de352be7b227edc243671f39727
|
[] |
no_license
|
linnndachen/coding-practice
|
d0267b197d9789ab4bcfc9eec5fb09b14c24f882
|
5e77c3d7a0632882d16dd064f0aad2667237ef37
|
refs/heads/master
| 2023-09-03T19:26:25.545006
| 2021-10-16T16:29:50
| 2021-10-16T16:29:50
| 299,794,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
from typing import List
class Solution:
def solveSudoku(self, board: List[List[str]]) -> None:
"""
Do not return anything, modify board in-place instead.
"""
self.backtrack(board, 0, 0)
def backtrack(self, board, r, c):
# Go to next empty space
while board[r][c] != '.':
c += 1
if c == 9:
c, r = 0, r+1
if r == 9: # base case
return True
# for all possibilities,
for i in range(1, 10):
# if one of them works
if self.isValidSudokuMove(board, r, c, str(i)):
board[r][c] = str(i)
#continue to test if it will fit the rest
if self.backtrack(board, r, c):
return True
# backtracking if it doesn't work and continue
# with another possibility
board[r][c] = '.'
return False
def isValidSudokuMove(self, board, r, c, n):
# Check row
if any(board[r][j] == n for j in range(9)):
return False
# Check col
if any(board[i][c] == n for i in range(9)):
return False
# Check block
br, bc = 3*(r//3), 3*(c//3)
if any(board[i][j] == n for i in range(br, br+3) for j in range(bc, bc+3)):
return False
return True
|
[
"lchen.msc2019@ivey.ca"
] |
lchen.msc2019@ivey.ca
|
bc91f06774edb26b8793277101e26cf610a1def6
|
d29293cbaef904a8b1cae69b94ff215fe5e52af3
|
/website-sosokan/sosokan/migrations/0056_auto_20161217_1903.py
|
e15639273abd6aff023c399eb51617ef08a2d02c
|
[] |
no_license
|
Sosoking328/BeautyContest
|
e00e22a8b8539846e5f39802fab927804bf81e3e
|
5689640be30d92a81a9f4d50e348fec26705a149
|
refs/heads/master
| 2022-12-11T22:53:31.419871
| 2017-07-21T20:56:39
| 2017-07-21T20:56:39
| 97,427,120
| 1
| 0
| null | 2022-12-07T23:57:43
| 2017-07-17T02:29:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,514
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-12-18 00:03
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sosokan', '0055_auto_20161215_2006'),
]
operations = [
migrations.AlterField(
model_name='ad',
name='createdAt',
field=models.FloatField(default=1482019404.335),
),
migrations.AlterField(
model_name='ad',
name='created_on',
field=models.DateTimeField(default=datetime.datetime(2016, 12, 17, 19, 3, 24, 335000)),
),
migrations.AlterField(
model_name='ad',
name='descendingTime',
field=models.FloatField(default=-1482019404.335),
),
migrations.AlterField(
model_name='ad',
name='updatedAt',
field=models.FloatField(default=1482019404.335),
),
migrations.AlterField(
model_name='adimage',
name='createdAt',
field=models.FloatField(default=1482019404.336),
),
migrations.AlterField(
model_name='adimage',
name='descendingTime',
field=models.FloatField(default=-1482019404.336),
),
migrations.AlterField(
model_name='adimage',
name='updatedAt',
field=models.FloatField(default=1482019404.336),
),
]
|
[
"king@sosokanapp.com"
] |
king@sosokanapp.com
|
96b40d4b78c509377f89326c15ed85475f2edc54
|
87fdda531054e0bbbaa0c88fc8bb21426d8466c4
|
/blog/views.py
|
7e0f8857efdcd090ed7284d8c281e5d3206bdb87
|
[] |
no_license
|
fanyang668/mysite
|
ef771586f4ed192844875ff050047960bf63c9cd
|
1d8e02997f90d4f0f4b46590a398d9f12083a210
|
refs/heads/master
| 2021-08-30T00:47:02.019599
| 2017-12-15T12:17:31
| 2017-12-15T12:17:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
from django.shortcuts import render, get_object_or_404
from .models import BlogArticles
# Create your views here.
def blog_title(request):
blogs = BlogArticles.objects.all()
return render(request, 'blog/titles.html', {'blogs': blogs})
def article_title(request, article_id):
# article = BlogArticles.objects.get(id=article_id)
article = get_object_or_404(BlogArticles, id=article_id)
pub = article.publish
return render(request, "blog/content.html", {"article": article, "publish": pub})
|
[
"email@example.com"
] |
email@example.com
|
78e0b151b1426fb3a47c554d33ae1df1193ee67f
|
7f771a20879dab8bb48309d98ffe6f1125204abb
|
/users/views.py
|
bcb4b9ec473582737beabc6e0e500e6d409a4b23
|
[] |
no_license
|
sergiy-chumachenko/all-auth
|
34269aadada2d8d7dbc32c64ec5435ba9c68bbae
|
79ec735c702f535360f8b547b71e9e14d5be6e0c
|
refs/heads/master
| 2022-12-12T00:38:02.378627
| 2020-07-12T09:05:21
| 2020-07-12T09:05:21
| 193,956,902
| 1
| 0
| null | 2022-04-22T21:53:14
| 2019-06-26T18:21:24
|
Python
|
UTF-8
|
Python
| false
| false
| 270
|
py
|
from django.views.generic import CreateView
from django.urls import reverse_lazy
from .forms import CustomUserCreationForm
class SignUpView(CreateView):
template_name = 'signup.html'
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
|
[
"chumachenko.sergiy@gmail.com"
] |
chumachenko.sergiy@gmail.com
|
484e7b413bbeb560929b680ac097f71c1dd5e2d9
|
26fc334777ce27d241c67d97adc1761e9d23bdba
|
/tests/django_tests/tests/staticfiles_tests/cases.py
|
918ec4f99e43c2544f29a71013c544dcea1e3953
|
[
"BSD-3-Clause"
] |
permissive
|
alihoseiny/djongo
|
1434c9e78c77025d7e0b3330c3a40e9ea0029877
|
e2edf099e398573faa90e5b28a32c3d7f1c5f1e9
|
refs/heads/master
| 2020-03-27T23:27:02.530397
| 2018-08-30T14:44:37
| 2018-08-30T14:44:37
| 147,317,771
| 2
| 1
|
BSD-3-Clause
| 2018-09-04T09:00:53
| 2018-09-04T09:00:53
| null |
UTF-8
|
Python
| false
| false
| 4,385
|
py
|
import codecs
import os
import shutil
import tempfile
from django.conf import settings
from django.core.management import call_command
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
from .settings import TEST_SETTINGS
class BaseStaticFilesMixin:
"""
Test case with a couple utility assertions.
"""
def assertFileContains(self, filepath, text):
self.assertIn(
text,
self._get_file(filepath),
"'%s' not in '%s'" % (text, filepath),
)
def assertFileNotFound(self, filepath):
with self.assertRaises(IOError):
self._get_file(filepath)
def render_template(self, template, **kwargs):
if isinstance(template, str):
template = Template(template)
return template.render(Context(**kwargs)).strip()
def static_template_snippet(self, path, asvar=False):
if asvar:
return "{%% load static from static %%}{%% static '%s' as var %%}{{ var }}" % path
return "{%% load static from static %%}{%% static '%s' %%}" % path
def assertStaticRenders(self, path, result, asvar=False, **kwargs):
template = self.static_template_snippet(path, asvar)
self.assertEqual(self.render_template(template, **kwargs), result)
def assertStaticRaises(self, exc, path, result, asvar=False, **kwargs):
with self.assertRaises(exc):
self.assertStaticRenders(path, result, **kwargs)
@override_settings(**TEST_SETTINGS)
class StaticFilesTestCase(BaseStaticFilesMixin, SimpleTestCase):
pass
@override_settings(**TEST_SETTINGS)
class CollectionTestCase(BaseStaticFilesMixin, SimpleTestCase):
"""
Tests shared by all file finding features (collectstatic,
findstatic, and static serve view).
This relies on the asserts defined in BaseStaticFilesTestCase, but
is separated because some test cases need those asserts without
all these tests.
"""
def setUp(self):
super().setUp()
temp_dir = tempfile.mkdtemp()
# Override the STATIC_ROOT for all tests from setUp to tearDown
# rather than as a context manager
self.patched_settings = self.settings(STATIC_ROOT=temp_dir)
self.patched_settings.enable()
self.run_collectstatic()
# Same comment as in runtests.teardown.
self.addCleanup(shutil.rmtree, temp_dir)
def tearDown(self):
self.patched_settings.disable()
super().tearDown()
def run_collectstatic(self, *, verbosity=0, **kwargs):
call_command('collectstatic', interactive=False, verbosity=verbosity,
ignore_patterns=['*.ignoreme'], **kwargs)
def _get_file(self, filepath):
assert filepath, 'filepath is empty.'
filepath = os.path.join(settings.STATIC_ROOT, filepath)
with codecs.open(filepath, "r", "utf-8") as f:
return f.read()
class TestDefaults:
"""
A few standard test cases.
"""
def test_staticfiles_dirs(self):
"""
Can find a file in a STATICFILES_DIRS directory.
"""
self.assertFileContains('test.txt', 'Can we find')
self.assertFileContains(os.path.join('prefix', 'test.txt'), 'Prefix')
def test_staticfiles_dirs_subdir(self):
"""
Can find a file in a subdirectory of a STATICFILES_DIRS
directory.
"""
self.assertFileContains('subdir/test.txt', 'Can we find')
def test_staticfiles_dirs_priority(self):
"""
File in STATICFILES_DIRS has priority over file in app.
"""
self.assertFileContains('test/file.txt', 'STATICFILES_DIRS')
def test_app_files(self):
"""
Can find a file in an app static/ directory.
"""
self.assertFileContains('test/file1.txt', 'file1 in the app dir')
def test_nonascii_filenames(self):
"""
Can find a file with non-ASCII character in an app static/ directory.
"""
self.assertFileContains('test/⊗.txt', '⊗ in the app dir')
def test_camelcase_filenames(self):
"""
Can find a file with capital letters.
"""
self.assertFileContains('test/camelCase.txt', 'camelCase')
def test_filename_with_percent_sign(self):
self.assertFileContains('test/%2F.txt', '%2F content')
|
[
"nesdis@gmail.com"
] |
nesdis@gmail.com
|
a47718f6027b7994870b738ed4ba9c3887c47f3b
|
993ff3e6eb510b5083db5f15f2c0299c05a2c0f6
|
/hr_leave_calculation/models/hr_payslip_leave.py
|
249a42b9c4bfe8dcaf554da7e24cc2ec02be2c22
|
[] |
no_license
|
Raghupathy15/Sigma
|
36b24e76e81bad4ae5435508018f1c960878901d
|
42eed3b211a7be8d8c9b70dc359a432c02be07f1
|
refs/heads/main
| 2023-01-31T18:25:09.712666
| 2020-12-09T09:22:17
| 2020-12-09T09:22:17
| 319,904,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,200
|
py
|
import base64
import logging
from odoo import api, fields, models
from odoo import tools, _
from odoo.exceptions import ValidationError, AccessError
from odoo.modules.module import get_module_resource
from datetime import datetime
class HrPayslipLeave(models.Model):
_name='hr.payslip.leave'
#establishing the one2many relation between models
leave_payslip_ids = fields.Many2one('hr.payslip')
leaves_taken = fields.Float(string="Leaves Taken")
leave_type_id = fields.Many2one('hr.leave.allocation',string="Leave Type")
employee_id = fields.Many2one('hr.employee',string="Employee Name",related="leave_payslip_ids.employee_id",store=True)
name = fields.Many2one('hr.employee',related="employee_id")
date_from = fields.Date(string="From Date",related="leave_payslip_ids.date_from",store=True)
date_to = fields.Date(string="To Date",related="leave_payslip_ids.date_to",store=True)
current_year = fields.Integer(string="Current Year",compute="check_year")
@api.multi
@api.depends('date_from')
def check_year(self):
for line in self:
present_year = line.date_from
line.current_year = present_year.year
@api.multi
def unlink(self):
return super(HrPayslipLeave, self).unlink()
|
[
"raghupathy@emxcelsolutions.com"
] |
raghupathy@emxcelsolutions.com
|
650dd79043efc6560641569792262a2d69200509
|
cbc5e26bb47ae69e80a3649c90275becf25ce404
|
/xlsxwriter/test/styles/test_write_cell_xfs.py
|
3dc4700ec4158cc5e3987704539c462907e5ea23
|
[
"BSD-2-Clause-Views",
"BSD-3-Clause",
"MIT"
] |
permissive
|
mst-solar-car/kicad-bom-generator
|
c3549409c3139f787ad28391372b5cb03791694a
|
2aae905056d06f3d25343a8d784049c141d05640
|
refs/heads/master
| 2021-09-07T14:00:40.759486
| 2018-02-23T23:21:13
| 2018-02-23T23:21:13
| 107,868,801
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 970
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...styles import Styles
from ...format import Format
class TestWriteCellXfs(unittest.TestCase):
"""
Test the Styles _write_cell_xfs() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_cell_xfs(self):
"""Test the _write_cell_xfs() method"""
xf_format = Format()
xf_format.has_font = 1
self.styles._set_style_properties([[xf_format], None, 1, 0, 0, 0, [], []])
self.styles._write_cell_xfs()
exp = """<cellXfs count="1"><xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/></cellXfs>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
|
[
"mwrb7d@mst.edu"
] |
mwrb7d@mst.edu
|
3ff9e02698530cd2acb3ebe154d35b9db080f78c
|
2fed297f777ac6a01f21870d74e4259ed0c17dfd
|
/examples/distributions/continuous/comparisons.py
|
f3741b9fc6332da0e64e6626ef4e8762716053cb
|
[
"MIT"
] |
permissive
|
vahndi/probability
|
1bf4e5e7835d5dc57b5a5a43d76ded2b23975ec5
|
ff3f5434d3da0d46b127b02cf733699e5a43c904
|
refs/heads/master
| 2023-05-24T17:04:00.430326
| 2023-05-22T13:53:41
| 2023-05-22T13:53:41
| 231,456,036
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,214
|
py
|
import matplotlib.pyplot as plt
from math import sqrt
from numpy import arange
from examples.colors import ML_APP_DARK_BLUE
from probability.distributions import Normal, Laplace
from probability.distributions.continuous.students_t import StudentsT
x = arange(-4, 4.01, 0.05)
def plot_normal_students_t_laplace():
"""
Machine Learning: A Probabilistic Perspective. Figure 2.7
"""
_, axes = plt.subplots(ncols=2, figsize=(16, 9))
# define distributions
normal = Normal(mu=0, sigma=1)
students_t = StudentsT(nu=1)
laplace = Laplace(mu=0, b=1 / sqrt(2))
# plot pdfs
ax = axes[0]
normal.plot(x=x, ls=':', color='black', ax=ax)
students_t.plot(x=x, ls='--', color=ML_APP_DARK_BLUE, ax=ax)
laplace.plot(x=x, ls='-', color='red', ax=ax)
ax.set_ylim(0, 0.8)
ax.legend(loc='upper right')
# plot log-pdfs
ax = axes[1]
normal.log_pdf().plot(x=x, ls=':', color='black', ax=ax)
students_t.log_pdf().plot(x=x, ls='--', color=ML_APP_DARK_BLUE, ax=ax)
laplace.log_pdf().plot(x=x, ls='-', color='red', ax=ax)
ax.set_ylim(-9, 0)
ax.legend(loc='upper right')
plt.show()
if __name__ == '__main__':
plot_normal_students_t_laplace()
|
[
"vahndi.minah@frogdesign.com"
] |
vahndi.minah@frogdesign.com
|
9f52785f1e128273a432d4fd107afdce23cc2482
|
4142b8c513d87361da196631f7edd82f11465abb
|
/python/globalround17/1610A.py
|
43f737746b3709c50f36fc2d47e6019e07c30e65
|
[] |
no_license
|
npkhanhh/codeforces
|
b52b66780426682ea1a3d72c66aedbe6dc71d7fe
|
107acd623b0e99ef0a635dfce3e87041347e36df
|
refs/heads/master
| 2022-02-08T17:01:01.731524
| 2022-02-07T10:29:52
| 2022-02-07T10:29:52
| 228,027,631
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
from sys import stdin
for _ in range(int(stdin.readline())):
a, b = list(map(int, stdin.readline().split()))
if a == 1 and b == 1:
print(0)
else:
print(min([a, b, 2]))
|
[
"npkhanh93@gmail.com"
] |
npkhanh93@gmail.com
|
6f61be457dacf2ea995176cc6b5ab19159a4408c
|
0d87906ca32b68965c3aa5b4cb829383276b13c8
|
/tests/extension/thread_/axi_dma_long_wide/thread_axi_dma_long_wide.py
|
b470e2299c027001f75aea514eca00311c865250
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
tanbour/veriloggen
|
301beea3d9419c2d63d1d1159a2ec52ed316ef20
|
858fbc872be78964cfc7e5a23e1491b2c3d5cf52
|
refs/heads/master
| 2020-03-18T20:38:24.653119
| 2018-05-19T04:49:01
| 2018-05-19T04:49:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,275
|
py
|
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.thread as vthread
import veriloggen.types.axi as axi
def mkLed(memory_datawidth=128):
m = Module('blinkled')
clk = m.Input('CLK')
rst = m.Input('RST')
datawidth = 32
addrwidth = 10
myaxi = vthread.AXIM(m, 'myaxi', clk, rst, memory_datawidth)
myram = vthread.RAM(m, 'myram', clk, rst, datawidth, addrwidth)
all_ok = m.TmpReg(initval=0)
def blink(size):
all_ok.value = True
# Test for 4KB boundary check
offset = myaxi.boundary_size - 16
body(size, offset)
if all_ok:
print('ALL OK')
def body(size, offset):
# write
for i in range(size):
wdata = i + 100
myram.write(i, wdata)
laddr = 0
gaddr = offset
myaxi.dma_write(myram, laddr, gaddr, size)
print('dma_write: [%d] -> [%d]' % (laddr, gaddr))
# write
for i in range(size):
wdata = i + 1000
myram.write(i, wdata)
laddr = 0
gaddr = (size + size) * 4 + offset
myaxi.dma_write(myram, laddr, gaddr, size)
print('dma_write: [%d] -> [%d]' % (laddr, gaddr))
# read
laddr = 0
gaddr = offset
myaxi.dma_read(myram, laddr, gaddr, size)
print('dma_read: [%d] <- [%d]' % (laddr, gaddr))
for i in range(size):
rdata = myram.read(i)
if vthread.verilog.NotEql(rdata, i + 100):
print('rdata[%d] = %d' % (i, rdata))
all_ok.value = False
# read
laddr = 0
gaddr = (size + size) * 4 + offset
myaxi.dma_read(myram, laddr, gaddr, size)
print('dma_read: [%d] <- [%d]' % (laddr, gaddr))
for i in range(size):
rdata = myram.read(i)
if vthread.verilog.NotEql(rdata, i + 1000):
print('rdata[%d] = %d' % (i, rdata))
all_ok.value = False
th = vthread.Thread(m, 'th_blink', clk, rst, blink)
fsm = th.start(256 + 256 + 64)
return m
def mkTest(memory_datawidth=128):
m = Module('test')
# target instance
led = mkLed(memory_datawidth)
# copy paras and ports
params = m.copy_params(led)
ports = m.copy_sim_ports(led)
clk = ports['CLK']
rst = ports['RST']
memory = axi.AxiMemoryModel(m, 'memory', clk, rst, memory_datawidth)
memory.connect(ports, 'myaxi')
uut = m.Instance(led, 'uut',
params=m.connect_params(led),
ports=m.connect_ports(led))
simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, m.make_reset(), period=100)
init.add(
Delay(1000000),
Systask('finish'),
)
return m
if __name__ == '__main__':
test = mkTest()
verilog = test.to_verilog('tmp.v')
print(verilog)
sim = simulation.Simulator(test)
rslt = sim.run()
print(rslt)
|
[
"shta.ky1018@gmail.com"
] |
shta.ky1018@gmail.com
|
b179a283949bcad747cb9b49e48b4d422e022aaa
|
4b68243d9db908945ee500174a8a12be27d150f9
|
/pogoprotos/networking/requests/messages/use_item_move_reroll_message_pb2.py
|
273b1820e6a4533e1e3bde4d1ba0975d24b6c332
|
[] |
no_license
|
ykram/pogoprotos-py
|
7285c86498f57dcbbec8e6c947597e82b2518d80
|
a045b0140740625d9a19ded53ece385a16c4ad4a
|
refs/heads/master
| 2020-04-20T10:19:51.628964
| 2019-02-02T02:58:03
| 2019-02-02T02:58:03
| 168,787,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 3,705
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/requests/messages/use_item_move_reroll_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.inventory.item import item_id_pb2 as pogoprotos_dot_inventory_dot_item_dot_item__id__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/use_item_move_reroll_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nJpogoprotos/networking/requests/messages/use_item_move_reroll_message.proto\x12\'pogoprotos.networking.requests.messages\x1a\'pogoprotos/inventory/item/item_id.proto\"\x80\x01\n\x18UseItemMoveRerollMessage\x12\x32\n\x07item_id\x18\x01 \x01(\x0e\x32!.pogoprotos.inventory.item.ItemId\x12\x12\n\npokemon_id\x18\x02 \x01(\x06\x12\x1c\n\x14reroll_unlocked_move\x18\x03 \x01(\x08\x62\x06proto3')
,
dependencies=[pogoprotos_dot_inventory_dot_item_dot_item__id__pb2.DESCRIPTOR,])
_USEITEMMOVEREROLLMESSAGE = _descriptor.Descriptor(
name='UseItemMoveRerollMessage',
full_name='pogoprotos.networking.requests.messages.UseItemMoveRerollMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='item_id', full_name='pogoprotos.networking.requests.messages.UseItemMoveRerollMessage.item_id', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pokemon_id', full_name='pogoprotos.networking.requests.messages.UseItemMoveRerollMessage.pokemon_id', index=1,
number=2, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reroll_unlocked_move', full_name='pogoprotos.networking.requests.messages.UseItemMoveRerollMessage.reroll_unlocked_move', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=161,
serialized_end=289,
)
_USEITEMMOVEREROLLMESSAGE.fields_by_name['item_id'].enum_type = pogoprotos_dot_inventory_dot_item_dot_item__id__pb2._ITEMID
DESCRIPTOR.message_types_by_name['UseItemMoveRerollMessage'] = _USEITEMMOVEREROLLMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UseItemMoveRerollMessage = _reflection.GeneratedProtocolMessageType('UseItemMoveRerollMessage', (_message.Message,), dict(
DESCRIPTOR = _USEITEMMOVEREROLLMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.use_item_move_reroll_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.UseItemMoveRerollMessage)
))
_sym_db.RegisterMessage(UseItemMoveRerollMessage)
# @@protoc_insertion_point(module_scope)
|
[
"mark@noffle.net"
] |
mark@noffle.net
|
e9cb1df974800fca9dbdcdd57dfd6d44af0d781e
|
a74cabbe1b11fc8ef575ea86f2543cd95db78ec9
|
/python_program/q813_Largest_Sum_of_Averages.py
|
ba6895f075efa57a8d1917bfbb87dbf17a1b3760
|
[] |
no_license
|
tszandy/leetcode
|
87e3ccf291b2879637d2d8238935a455b401a78a
|
f1f4361541dcffbb291285663c8820d7ffb37d2f
|
refs/heads/master
| 2023-04-06T15:34:04.847875
| 2023-03-26T12:22:42
| 2023-03-26T12:22:42
| 204,069,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,358
|
py
|
from typing import List
from collections import Counter,defaultdict,deque
from math import *
from functools import reduce,lru_cache,total_ordering
import numpy as np
from heapq import *
from bisect import bisect_left,bisect_right
from itertools import count,zip_longest
import queue
class Solution:
def largestSumOfAverages(self, nums: List[int], k: int) -> float:
self.n = len(nums)
self.nums = nums
return self.dp(0,k)
@lru_cache(None)
def dp(self,i,k):
if k==1:
return self.average(self.nums[i:])
max_score = 0
for j in range(i+1,self.n-1-(k-3)):
max_score = max(max_score,self.average(self.nums[i:j])+self.dp(j,k-1))
return max_score
def average(self,arr):
return sum(arr)/len(arr)
sol = Solution()
# input
nums = [9,1,2,3,9,1,2,3,4,1,4,21,4,69,2,3,3,2,15,1,32,2,3,1,70,5,2,3,2,2,1,1,5,2,1,45]
k = 20
# output
output = sol.largestSumOfAverages(nums,k)
# answer
answer = 303.53333
print(output, answer, answer == output)
# input
nums = [1,2,3,4,5,6,7]
k = 4
# output
output = sol.largestSumOfAverages(nums,k)
# answer
answer = 20.50000
print(output, answer, answer == output)
# input
nums = [9,1,2,3,9]
k = 3
# output
output = sol.largestSumOfAverages(nums,k)
# answer
answer = 20.50000
print(output, answer, answer == output)
|
[
"444980834@qq.com"
] |
444980834@qq.com
|
675804bc3995971da75d7f7dc54fd7b0cfca0c94
|
9ac35a2327ca9fddcf55077be58a1babffd23bdd
|
/cadence/replay_interceptor.py
|
7a511b6fc7c5bd8161e0861eb3de409232798476
|
[
"MIT"
] |
permissive
|
meetchandan/cadence-python
|
f1eb987c135f620607a62495096a89494216d847
|
cfd7a48e6da7c289c9ae0c29c94d12d2b05986e4
|
refs/heads/master
| 2022-12-14T12:46:32.364375
| 2020-09-16T15:50:55
| 2020-09-16T15:50:55
| 260,763,097
| 1
| 0
|
MIT
| 2020-09-16T15:48:14
| 2020-05-02T19:47:56
|
Python
|
UTF-8
|
Python
| false
| false
| 719
|
py
|
import inspect
from typing import Callable
def get_replay_aware_interceptor(fn: Callable):
def interceptor(*args, **kwargs):
from cadence.decision_loop import ITask
task: ITask = ITask.current()
if not task.decider.decision_context.is_replaying():
return fn(*args, **kwargs)
return interceptor
def make_replay_aware(target: object):
# TODO: Consider using metaclasses instead
if hasattr(target, "_cadence_python_intercepted"):
return target
for name, fn in inspect.getmembers(target):
if inspect.ismethod(fn):
setattr(target, name, get_replay_aware_interceptor(fn))
target._cadence_python_intercepted = True
return target
|
[
"firdaus.halim@gmail.com"
] |
firdaus.halim@gmail.com
|
8ed2c23107f1e3c65b51da65f817da0e32039f3b
|
6b37deabac3116e65bc869035cf8cfa50f22590c
|
/past/past3/c_geometric_progression/main.py
|
5f19e1e8cdefd25049f0dda8767837bd2e333831
|
[] |
no_license
|
hiromichinomata/atcoder
|
92122a2a2a8b9327f4c8dc0e40889e8dc0321079
|
82216622d9040e95239b4a21e973cb12e59d7f6e
|
refs/heads/master
| 2022-10-05T04:00:44.509719
| 2022-08-14T04:46:49
| 2022-08-14T04:46:49
| 176,891,471
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
#!/bin/python3
# pypy3
import sys
input = sys.stdin.readline
def main():
a, r, n = list(map(int, input().strip().split()))
v = a
LIMIT = 10**9
for _ in range(n-1):
v *= r
if v > LIMIT:
print('large')
sys.exit()
print(v)
main()
|
[
"git@hiromichinomata.com"
] |
git@hiromichinomata.com
|
b7ef3750a7d4fe8089d84c6855b84748143367c2
|
5fda498ef0bfc06962ad9b864d229193c45ccb4a
|
/Project1_Analyzing_the_NYC_Subway_Dataset/problem_sets2to5/problem_set2_wrangling_subway_data/2_9_get_hourly_exits.py
|
f4774640992d40f8fab48a745798f5a75ac894b5
|
[] |
no_license
|
prabhurgit/Data_Aanlyst_Nanodegree_projects
|
7934869b63cae57cb2851e22a5023c6cbe3d18ba
|
a7a13d93c632cd1840ba3a00fff80a60a131b7f3
|
refs/heads/master
| 2021-05-31T18:47:48.669414
| 2016-03-30T04:08:39
| 2016-03-30T04:08:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,406
|
py
|
import pandas
def get_hourly_exits(df):
'''
The data in the MTA Subway Turnstile data reports on the cumulative
number of entries and exits per row. Assume that you have a dataframe
called df that contains only the rows for a particular turnstile machine
(i.e., unique SCP, C/A, and UNIT). This function should change
these cumulative exit numbers to a count of exits since the last reading
(i.e., exits since the last row in the dataframe).
More specifically, you want to do two things:
1) Create a new column called EXITSn_hourly
2) Assign to the column the difference between EXITSn of the current row
and the previous row. If there is any NaN, fill/replace it with 0.
You may find the pandas functions shift() and fillna() to be helpful in this exercise.
Example dataframe below:
Unnamed: 0 C/A UNIT SCP DATEn TIMEn DESCn ENTRIESn EXITSn ENTRIESn_hourly EXITSn_hourly
0 0 A002 R051 02-00-00 05-01-11 00:00:00 REGULAR 3144312 1088151 0 0
1 1 A002 R051 02-00-00 05-01-11 04:00:00 REGULAR 3144335 1088159 23 8
2 2 A002 R051 02-00-00 05-01-11 08:00:00 REGULAR 3144353 1088177 18 18
3 3 A002 R051 02-00-00 05-01-11 12:00:00 REGULAR 3144424 1088231 71 54
4 4 A002 R051 02-00-00 05-01-11 16:00:00 REGULAR 3144594 1088275 170 44
5 5 A002 R051 02-00-00 05-01-11 20:00:00 REGULAR 3144808 1088317 214 42
6 6 A002 R051 02-00-00 05-02-11 00:00:00 REGULAR 3144895 1088328 87 11
7 7 A002 R051 02-00-00 05-02-11 04:00:00 REGULAR 3144905 1088331 10 3
8 8 A002 R051 02-00-00 05-02-11 08:00:00 REGULAR 3144941 1088420 36 89
9 9 A002 R051 02-00-00 05-02-11 12:00:00 REGULAR 3145094 1088753 153 333
'''
#your code here
df['EXITSn_hourly'] = (df['EXITSn'] - df.shift(1)['EXITSn']) #calculate hourly exits
df = df.fillna(0) #fill NA with 0
return df
|
[
"xiewisdom@gmail.com"
] |
xiewisdom@gmail.com
|
d0c266d3816a85e94982006addc47834bed26724
|
beae392dcd51001613e5d3e226587d646d854c1f
|
/ML_Finance/NumpySumRowsColumns.py
|
1aa0c502cc9ad2908e0d0a1bdc501c332b60bb86
|
[] |
no_license
|
ShubraChowdhury/ML
|
2c7a0a51ca657dda47ceb8a19cecbcafa6cd16b0
|
1e631f2ab86bfd35e2c8790501a7effb4e0d106a
|
refs/heads/master
| 2021-05-15T14:44:28.753164
| 2018-11-18T14:06:48
| 2018-11-18T14:06:48
| 106,418,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 26 08:39:38 2016
@author: DevAdmin
"""
import numpy as np
def test_run():
print("Random seed initializing the pseudo-random number generator.\n",
"Each time you run the random variable generator you will \n",
"get the same value untill session is closed")
np.random.seed(693)
a = np.random.randint(0,10, size=(5,4))
print("Array \n", a)
print("\n Sum of Columns use axis =0 \n", a.sum(axis=0))
print("\n Sum of Rows use axis =1 \n", a.sum(axis=1))
print("\n Mean of Columns use axis =0 \n", a.mean(axis=0))
print("\n Mean of Rows use axis =1 \n", a.mean(axis=1))
print("\n Total mean \n", a.mean())
if __name__ == "__main__":
test_run()
|
[
"noreply@github.com"
] |
ShubraChowdhury.noreply@github.com
|
fa38ee2c202a55385c5053c08d36f4cf040a5090
|
7725cafb8259f94cd9b3e2240182eb90d0e5246f
|
/src/scripts/icu_transliterate.py
|
e83fb9ba9c8e6b66c954df8aa0eac69e56227635
|
[] |
no_license
|
rasoolims/zero-shot-mt
|
c95cee19b583b428941932cd93e45025a919f1d8
|
33830dc7e48fa4a06641590cfaa0f4eb52c2e314
|
refs/heads/master
| 2023-08-22T07:07:54.093714
| 2021-10-08T22:36:39
| 2021-10-08T22:36:39
| 364,627,830
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
import os
import sys
import icu
tl = icu.Transliterator.createInstance('Any-Latin; Latin-ASCII')
with open(os.path.abspath(sys.argv[1]), "r") as r, open(os.path.abspath(sys.argv[2]), "w") as w:
for i, line in enumerate(r):
transliteration = tl.transliterate(line.strip())
w.write(transliteration)
w.write("\n")
print(i, end="\r")
print("\n Finished!")
|
[
"rasooli.ms@gmail.com"
] |
rasooli.ms@gmail.com
|
ae148a59eca9f309030041953e95838ba788030e
|
6b9b032a5516c8d7dbb26deeb1b189022f8f9411
|
/LeetCode/arrary/easy/118.杨辉三角.py
|
33fb9a2306f182e155161f1caca7c305cd6bc9c0
|
[] |
no_license
|
mrmenand/Py_transaction
|
84db99a0010ae90f43fba6b737d7035e48af55fb
|
7e82422c84ad699805cc12568b8d3d969f66a419
|
refs/heads/master
| 2021-07-13T21:15:03.714689
| 2020-06-21T11:49:31
| 2020-06-21T11:49:31
| 176,281,954
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,290
|
py
|
# 118.杨辉三角
class Solution:
def generate(self, numRows):
ret = [[1] * i for i in range(1,numRows+1)]
for i in range(2,numRows):
for j in range(1,i):
ret[i][j] = ret[i-1][j] + ret[i-1][j-1]
return ret
# class Solution:
# def generate(self, numRows):
# # numRows: int) -> List[List[int]]:
# res = []
# for i in range(1,numRows+1):
# if i==1:
# res.append([1])
# else:
# temp = []
# for j in range(i):
#
# if j==0 or j==i-1:
# temp.append(1)
# else:
# temp.append(res[i-2][j-1]+res[i-2][j])
# res.append(temp)
#
# return res
# class Solution:
# def generate(self, numRows):
# """
# :type numRows: int
# :rtype: List[List[int]]
# """
# L = []
# if numRows == 0:
# return L
# for i in range(numRows):
# L.append([1])
# for j in range(1,i+1):
# if j==i:
# L[i].append(1)
# else:
# L[i].append(L[i-1][j]+L[i-1][j-1])
# return L
|
[
"1006024749@qq.com"
] |
1006024749@qq.com
|
44c406f18354a8c9152bbfa3c76ca89805199ac9
|
11dbb589aa305a1f33525f6ead538330aa1ae677
|
/modelling/devices/convertors.py
|
309fd9568f5b7d1b962a7f030d7a8c798b74a0fd
|
[
"MIT"
] |
permissive
|
riahtu/EnergyManagementSystem
|
e70f48d70dc59a3cf6089149ec17b4dac054439e
|
2a48ba3b9bf7ff3003c197ee43ea9efbfbe42baa
|
refs/heads/master
| 2021-09-09T14:27:20.170163
| 2018-03-17T01:51:03
| 2018-03-17T01:51:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,293
|
py
|
"""
Convertor models for universal energy management system
The models include the following types of convertors.
1) AC 2 DC convertors
2) DC 2 DC convertors
"""
import configuration.configuration_convertors as default_parameters
BIC = \
{
"ID": default_parameters.BIC["AREA"], # Static information
"SMAX": default_parameters.BIC["SMAX"], # Static information
"EFF_AC2DC": default_parameters.BIC["EFF_AC2DC"], # Static information
"EFF_DC2AC": default_parameters.BIC["EFF_DC2AC"], # Static information
"STATUS": default_parameters.BIC["STATUS"], # Measurement information
"P_AC2DC":default_parameters.BIC["P_AC2DC"], # Measurement information
"P_DC2AC":default_parameters.BIC["P_DC2AC"],# Measurement information
"Q_AC":default_parameters.BIC["COMMAND_DC2AC"],# Measurement information
"TIME_GENERATED": default_parameters.BIC["TIME_GENERATED"], # Dynamic information
"TIME_APPLIED": default_parameters.BIC["TIME_APPLIED"], # Dynamic information
"TIME_COMMANDED": default_parameters.BIC["TIME_COMMANDED"], # Dynamic information
"COMMAND_AC2DC":default_parameters.BIC["COMMAND_AC2DC"], # Dynamic information
"COMMAND_DC2AC":default_parameters.BIC["COMMAND_DC2AC"], # Dynamic information
"COMMAND_Q":default_parameters.BIC["COMMAND_DC2AC"],# Dynamic information
}
|
[
"matrixeigs@gmail.com"
] |
matrixeigs@gmail.com
|
3f61d5add3da668acc3f4a002df63ca6c9826407
|
57cb9fef5efac78758f5d151b959ca2216c94083
|
/edx/app/analytics_api/venvs/analytics_api/bin/cwutil
|
d16864d59606f61bb57c2e230c1973db1db7d249
|
[] |
no_license
|
JosiahKennedy/openedx-branded
|
9751d5362088276a87b2e0edca0913568eeb1ac4
|
d16a25b035b2e810b8ab2b0a2ac032b216562e26
|
refs/heads/master
| 2022-12-21T02:39:17.133147
| 2020-03-25T06:03:23
| 2020-03-25T06:03:23
| 249,895,218
| 0
| 1
| null | 2022-12-08T01:23:48
| 2020-03-25T05:33:05
| null |
UTF-8
|
Python
| false
| false
| 5,083
|
#!/edx/app/analytics_api/venvs/analytics_api/bin/python2.7
# Author: Chris Moyer <cmoyer@newstex.com>
# Description: CloudWatch Utility
# For listing stats, creating alarms, and managing
# other CloudWatch aspects
import boto
cw = boto.connect_cloudwatch()
from datetime import datetime, timedelta
def _parse_time(time_string):
"""Internal function to parse a time string"""
def _parse_dict(d_string):
result = {}
if d_string:
for d in d_string.split(","):
d = d.split(":")
result[d[0]] = d[1]
return result
def ls(namespace=None):
"""
List metrics, optionally filtering by a specific namespace
namespace: Optional Namespace to filter on
"""
print "%-10s %-50s %s" % ("Namespace", "Metric Name", "Dimensions")
print "-"*80
for m in cw.list_metrics():
if namespace is None or namespace.upper() in m.namespace:
print "%-10s %-50s %s" % (m.namespace, m.name, m.dimensions)
def stats(namespace, metric_name, dimensions=None, statistics="Average", start_time=None, end_time=None, period=60, unit=None):
"""
Lists the statistics for a specific metric
namespace: The namespace to use, usually "AWS/EC2", "AWS/SQS", etc.
metric_name: The name of the metric to track, pulled from `ls`
dimensions: The dimensions to use, formatted as Name:Value (such as QueueName:myQueue)
statistics: The statistics to measure, defaults to "Average"
'Minimum', 'Maximum', 'Sum', 'Average', 'SampleCount'
start_time: Start time, default to now - 1 day
end_time: End time, default to now
period: Period/interval for counts, default to 60 minutes
unit: Unit to track, default depends on what metric is being tracked
"""
# Parse the dimensions
dimensions = _parse_dict(dimensions)
# Parse the times
if end_time:
end_time = _parse_time(end_time)
else:
end_time = datetime.utcnow()
if start_time:
start_time = _parse_time(start_time)
else:
start_time = datetime.utcnow() - timedelta(days=1)
print "%-30s %s" % ('Timestamp', statistics)
print "-"*50
data = {}
for m in cw.get_metric_statistics(int(period), start_time, end_time, metric_name, namespace, statistics, dimensions, unit):
data[m['Timestamp']] = m[statistics]
keys = data.keys()
keys.sort()
for k in keys:
print "%-30s %s" % (k, data[k])
def put(namespace, metric_name, dimensions=None, value=None, unit=None, statistics=None, timestamp=None):
"""
Publish custom metrics
namespace: The namespace to use; values starting with "AWS/" are reserved
metric_name: The name of the metric to update
dimensions: The dimensions to use, formatted as Name:Value (such as QueueName:myQueue)
value: The value to store, mutually exclusive with `statistics`
statistics: The statistics to store, mutually exclusive with `value`
(must specify all of "Minimum", "Maximum", "Sum", "SampleCount")
timestamp: The timestamp of this measurement, default is current server time
unit: Unit to track, default depends on what metric is being tracked
"""
def simplify(lst):
return lst[0] if len(lst) == 1 else lst
print cw.put_metric_data(namespace, simplify(metric_name.split(';')),
dimensions = simplify(map(_parse_dict, dimensions.split(';'))) if dimensions else None,
value = simplify(value.split(';')) if value else None,
statistics = simplify(map(_parse_dict, statistics.split(';'))) if statistics else None,
timestamp = simplify(timestamp.split(';')) if timestamp else None,
unit = simplify(unit.split(';')) if unit else None)
def help(fnc=None):
"""
Print help message, optionally about a specific function
"""
import inspect
self = sys.modules['__main__']
if fnc:
try:
cmd = getattr(self, fnc)
except:
cmd = None
if not inspect.isfunction(cmd):
print "No function named: %s found" % fnc
sys.exit(2)
(args, varargs, varkw, defaults) = inspect.getargspec(cmd)
print cmd.__doc__
print "Usage: %s %s" % (fnc, " ".join([ "[%s]" % a for a in args]))
else:
print "Usage: cwutil [command]"
for cname in dir(self):
if not cname.startswith("_") and not cname == "cmd":
cmd = getattr(self, cname)
if inspect.isfunction(cmd):
doc = cmd.__doc__
print "\t%s - %s" % (cname, doc)
sys.exit(1)
if __name__ == "__main__":
import sys
self = sys.modules['__main__']
if len(sys.argv) >= 2:
try:
cmd = getattr(self, sys.argv[1])
except:
cmd = None
args = sys.argv[2:]
else:
cmd = help
args = []
if not cmd:
cmd = help
try:
cmd(*args)
except TypeError as e:
print e
help(cmd.__name__)
|
[
"josiahk@phyziklabs.com"
] |
josiahk@phyziklabs.com
|
|
3365f9a3741b09624b9cb9f33dbbe0772f11f3f0
|
c71d332dd845036c21c9fd8f4f571f9209bf2672
|
/Remove K Digits.py
|
d0debe9c725fa1a85abe458b44127cde95a061cc
|
[] |
no_license
|
diksha12p/DSA_Practice_Problems
|
2884fd9e77094d9662cb8747744dd2ef563e25e4
|
d56e3d07620d51871199f61ae82cff2bd75b4744
|
refs/heads/master
| 2023-01-20T15:31:37.824918
| 2020-11-29T21:37:12
| 2020-11-29T21:37:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,449
|
py
|
"""
Given a non-negative integer num represented as a string, remove k digits from the number so that the new number is the
smallest possible.
Note:
The length of num is less than 10002 and will be ≥ k.
The given num does not contain any leading zero.
Example 1:
Input: num = "1432219", k = 3
Output: "1219"
Explanation: Remove the three digits 4, 3, and 2 to form the new number 1219 which is the smallest.
Example 2:
Input: num = "10200", k = 1
Output: "200"
Explanation: Remove the leading 1 and the number is 200. Note that the output must not contain leading zeroes.
Example 3:
Input: num = "10", k = 2
Output: "0"
Explanation: Remove all the digits from the number and it is left with nothing which is 0.
"""
class Solution:
def removeKdigits(self, num: str, k: int) -> str:
stack = []
# IDEA: Remove the element from L to R if it causes a dip i.e. greater than the next element
for char in num:
while k and stack and stack[-1] > char:
stack.pop()
k -= 1
stack.append(char)
# Num is already in an increasing order -> Stack has the same numbers
while k:
stack.pop()
k -= 1
# Retrieving the number from the entries in stack
# or '0'ensures that something is returned in case stack in empty
return ''.join(stack).lstrip('0') or '0'
sol = Solution()
print(sol.removeKdigits('10', 2))
|
[
"noreply@github.com"
] |
diksha12p.noreply@github.com
|
a3ea6474e50b140da2329e05fcf499abc667ef99
|
ea1af1a564f96fb36974aa094192877598b0c6bf
|
/Chapter5/Exercises/ex5_6.py
|
1f18d6252d311a11cea4128fa003c09bcec92874
|
[] |
no_license
|
GSantos23/Crash_Course
|
63eecd13a60141e520b5ca4351341c21c4782801
|
4a5fc0cb9ce987948a728d43c4f266d34ba49a87
|
refs/heads/master
| 2020-03-20T23:20:43.201255
| 2018-08-21T01:13:06
| 2018-08-21T01:13:06
| 137,841,877
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
# Exercise 5.6
"""
Stages of Life: Write an if - elif - else chain that determines a person’s
stage of life. Set a value for the variable age , and then:
• If the person is less than 2 years old, print a message that the person
is a baby.
• If the person is at least 2 years old but less than 4, print a message
that the person is a toddler.
• If the person is at least 4 years old but less than 13, print a message
that the person is a kid.
• If the person is at least 13 years old but less than 20, print a message
that the person is a teenager.
• If the person is at least 20 years old but less than 65, print a message
that the person is an adult.
• If the person is age 65 or older, print a message that the person is an
elder.
"""
age = 25
if age < 2:
print("You're a baby")
elif age >= 2 and age < 4:
print("You're a toddler")
elif age >= 4 and age < 13:
print("You're a kid")
elif age >= 13 and age < 20:
print("You're a teenager")
elif age >= 20 and age < 65:
print("You're an adult")
else:
print("You're an elder")
|
[
"santosgerson64@gmail.com"
] |
santosgerson64@gmail.com
|
9d84e3bc55c3151f848a83afe7ea6ccc17c78ccf
|
1450bb467a73b80a3e3f649fd9423679482a235a
|
/test/printEgammaUserData.py
|
5c558ea0fee2f17400d678aad9111dd962eecf89
|
[] |
no_license
|
cms-egamma/EgammaPostRecoTools
|
430d57decd747f16904d06ccb8f61a0556ef2bb2
|
209673a77cd91b36f1fe3f09579b7f7fc4552089
|
refs/heads/master
| 2021-09-25T09:38:01.836119
| 2021-09-17T14:19:49
| 2021-09-17T14:19:49
| 223,762,349
| 0
| 4
| null | 2021-04-04T16:59:18
| 2019-11-24T15:03:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,846
|
py
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DataFormats.FWLite import Events, Handle
import ROOT
import argparse
def convert_to_str(vec_str):
output = ""
for entry in vec_str:
if output != "": output+="\n "
output+=entry
return output
def convertpair_to_str(vec_str):
output = ""
for entry in vec_str:
if output != "": output+="\n "
output+=entry.first
return output
def print_ele_user_data(ele):
print("ele userfloats:")
print(" "+convert_to_str(ele.userFloatNames()))
print("ele userints:")
print(" "+convert_to_str(ele.userIntNames()))
print("ele IDs:")
print(" "+convertpair_to_str(ele.electronIDs()))
def print_pho_user_data(pho):
print("pho userfloats:")
print(" "+convert_to_str(pho.userFloatNames()))
print("pho userints:")
print(" "+convert_to_str(pho.userIntNames()))
print("pho IDs:")
print(" "+convertpair_to_str(pho.photonIDs()))
if __name__ == "__main__":
"""
prints electron and photon miniAOD user data
note: it assumes that all electrons and photons have exactly the same userdata so we can just print
the first one. This is currently true except for low pt electrons and photons hence we put a >20 GeV
cut on the ele/pho we print
"""
ROOT.gSystem.Load("libFWCoreFWLite.so");
ROOT.gSystem.Load("libDataFormatsFWLite.so");
ROOT.FWLiteEnabler.enable()
parser = argparse.ArgumentParser(description='prints E/gamma pat::Electrons/Photons user data')
parser.add_argument('filename',help='input filename')
args = parser.parse_args()
eles, ele_label = Handle("std::vector<pat::Electron>"), "slimmedElectrons"
phos, pho_label = Handle("std::vector<pat::Photon>"), "slimmedPhotons"
#we put a minimum et as low et electrons/photons may not have all the variables
min_pho_et = 20
min_ele_et = 20
done_ele = False
done_pho = False
events = Events(args.filename)
for event_nr,event in enumerate(events):
if done_ele and done_pho: break
if not done_pho:
event.getByLabel(pho_label,phos)
for pho_nr,pho in enumerate(phos.product()):
if pho.et()<min_pho_et:
continue
else:
print_pho_user_data(pho)
done_pho = True
break
if not done_ele:
event.getByLabel(ele_label,eles)
for ele_nr,ele in enumerate(eles.product()):
if ele.et()<min_ele_et:
continue
else:
print_ele_user_data(ele)
done_ele = True
break
|
[
"sam.j.harper@gmail.com"
] |
sam.j.harper@gmail.com
|
32d5735e30c382c4e7768e9ca3cfbc44ac537e6d
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Projects/twilio/tests/integration/numbers/v2/regulatory_compliance/test_supporting_document.py
|
c6d7aab5bb0875c1e174a0bdd4fc75c4d84b9225
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:b7f19e719e96de6cf10ec0f84fda59ffe9a97e89340d069975676ef4fca5b46d
size 8793
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
531654703bc4607c33e930eb411f8c3da0423548
|
072f8bffbfef6e149ad1934ea9183a79864c1acd
|
/venv/Lib/site-packages/ironic_inspector_client/test/test_common_http.py
|
b7a15d5a131c23653b35f8a98bf9f8c3300b7dca
|
[] |
no_license
|
numvc/LuxoftBot
|
77d9bf8f5f63aee63350f1ec82f4b940afe203d2
|
29d7ca8868ab86bc076509d103f7596039333417
|
refs/heads/master
| 2020-09-21T21:37:12.527546
| 2019-12-04T23:24:35
| 2019-12-04T23:24:35
| 224,939,956
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,174
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
from keystoneauth1 import exceptions
from keystoneauth1 import session
import mock
from ironic_inspector_client.common import http
class TestCheckVersion(unittest.TestCase):
@mock.patch.object(http.BaseClient, 'server_api_versions',
lambda *args, **kwargs: ((1, 0), (1, 99)))
def _check(self, version):
cli = http.BaseClient(1, inspector_url='http://127.0.0.1:5050')
return cli._check_api_version(version)
def test_tuple(self):
self.assertEqual((1, 0), self._check((1, 0)))
def test_small_tuple(self):
self.assertEqual((1, 0), self._check((1,)))
def test_int(self):
self.assertEqual((1, 0), self._check(1))
def test_str(self):
self.assertEqual((1, 0), self._check("1.0"))
def test_invalid_tuple(self):
self.assertRaises(TypeError, self._check, (1, "x"))
self.assertRaises(ValueError, self._check, (1, 2, 3))
def test_invalid_str(self):
self.assertRaises(ValueError, self._check, "a.b")
self.assertRaises(ValueError, self._check, "1.2.3")
self.assertRaises(ValueError, self._check, "foo")
def test_unsupported(self):
self.assertRaises(http.VersionNotSupported, self._check, (99, 42))
FAKE_HEADERS = {
http._MIN_VERSION_HEADER: '1.0',
http._MAX_VERSION_HEADER: '1.9'
}
@mock.patch.object(session.Session, 'get', autospec=True,
**{'return_value.status_code': 200,
'return_value.headers': FAKE_HEADERS})
class TestServerApiVersions(unittest.TestCase):
def _check(self, current=1):
return http.BaseClient(
api_version=current,
inspector_url='http://127.0.0.1:5050').server_api_versions()
def test_no_headers(self, mock_get):
mock_get.return_value.headers = {}
minv, maxv = self._check()
self.assertEqual((1, 0), minv)
self.assertEqual((1, 0), maxv)
def test_with_headers(self, mock_get):
mock_get.return_value.headers = {
'X-OpenStack-Ironic-Inspector-API-Minimum-Version': '1.1',
'X-OpenStack-Ironic-Inspector-API-Maximum-Version': '1.42',
}
minv, maxv = self._check(current=(1, 2))
self.assertEqual((1, 1), minv)
self.assertEqual((1, 42), maxv)
def test_with_404(self, mock_get):
mock_get.return_value.status_code = 404
mock_get.return_value.headers = {}
minv, maxv = self._check()
self.assertEqual((1, 0), minv)
self.assertEqual((1, 0), maxv)
def test_with_other_error(self, mock_get):
mock_get.return_value.status_code = 500
mock_get.return_value.headers = {}
self.assertRaises(http.ClientError, self._check)
class TestRequest(unittest.TestCase):
base_url = 'http://127.0.0.1:5050/v1'
def setUp(self):
super(TestRequest, self).setUp()
self.headers = {http._VERSION_HEADER: '1.0'}
self.session = mock.Mock(spec=session.Session)
self.session.get_endpoint.return_value = self.base_url
self.req = self.session.request
self.req.return_value.status_code = 200
@mock.patch.object(http.BaseClient, 'server_api_versions',
lambda self: ((1, 0), (1, 42)))
def get_client(self, version=1, inspector_url=None, use_session=True):
if use_session:
return http.BaseClient(version, session=self.session,
inspector_url=inspector_url)
else:
return http.BaseClient(version, inspector_url=inspector_url)
def test_ok(self):
res = self.get_client().request('get', '/foo/bar')
self.assertIs(self.req.return_value, res)
self.req.assert_called_once_with(self.base_url + '/foo/bar', 'get',
raise_exc=False, headers=self.headers)
self.session.get_endpoint.assert_called_once_with(
service_type='baremetal-introspection',
interface=None, region_name=None)
def test_no_endpoint(self):
self.session.get_endpoint.return_value = None
self.assertRaises(http.EndpointNotFound, self.get_client)
self.session.get_endpoint.assert_called_once_with(
service_type='baremetal-introspection',
interface=None, region_name=None)
def test_endpoint_not_found(self):
self.session.get_endpoint.side_effect = exceptions.EndpointNotFound()
self.assertRaises(http.EndpointNotFound, self.get_client)
self.session.get_endpoint.assert_called_once_with(
service_type='baremetal-introspection',
interface=None, region_name=None)
@mock.patch.object(session.Session, 'request', autospec=True,
**{'return_value.status_code': 200})
def test_ok_no_auth(self, mock_req):
res = self.get_client(
use_session=False,
inspector_url='http://some/host').request('get', '/foo/bar')
self.assertIs(mock_req.return_value, res)
mock_req.assert_called_once_with(mock.ANY,
'http://some/host/v1/foo/bar', 'get',
raise_exc=False, headers=self.headers)
def test_ok_with_session_and_url(self):
res = self.get_client(
use_session=True,
inspector_url='http://some/host').request('get', '/foo/bar')
self.assertIs(self.req.return_value, res)
self.req.assert_called_once_with('http://some/host/v1/foo/bar', 'get',
raise_exc=False, headers=self.headers)
def test_explicit_version(self):
res = self.get_client(version='1.2').request('get', '/foo/bar')
self.assertIs(self.req.return_value, res)
self.headers[http._VERSION_HEADER] = '1.2'
self.req.assert_called_once_with(self.base_url + '/foo/bar', 'get',
raise_exc=False, headers=self.headers)
def test_error(self):
self.req.return_value.status_code = 400
self.req.return_value.content = json.dumps(
{'error': {'message': 'boom'}}).encode('utf-8')
self.assertRaisesRegexp(http.ClientError, 'boom',
self.get_client().request, 'get', 'url')
def test_error_discoverd_style(self):
self.req.return_value.status_code = 400
self.req.return_value.content = b'boom'
self.assertRaisesRegexp(http.ClientError, 'boom',
self.get_client().request, 'get', 'url')
|
[
"feys-00@mail.ru"
] |
feys-00@mail.ru
|
4ad05ebbeca5160b6ac94c8e898cfc8f3c38295f
|
22b348a0d10519cb1f1da5e886fdf2d3c167cf5a
|
/myweb/api/controllers/v1/schemas/user.py
|
407d624aefe8d1983e695e6d585c34ae1be6d667
|
[] |
no_license
|
liuluyang/openstack_mogan_study
|
dab0a8f918ffd17e0a747715998e81304672b75b
|
8624f765da7f5aa0c210f0fa945fc50cf8a67b9e
|
refs/heads/master
| 2021-01-19T17:03:15.370323
| 2018-04-12T09:50:38
| 2018-04-12T09:50:38
| 101,040,396
| 1
| 1
| null | 2017-11-01T02:17:31
| 2017-08-22T08:30:22
|
Python
|
UTF-8
|
Python
| false
| false
| 505
|
py
|
from myweb.api.validation import parameter_types
metadata = {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9-_:. ]{1,255}$': {
'type': 'string', 'maxLength': 255, 'minLength':1
}
},
'additionalProperties': False
}
user_add = {
'type': 'object',
'properties': {
'name':parameter_types.name,
'policy':{ "enum": [ "disk" ,'one'] },
'metadata':metadata
},
'required': ['policy'],
'additionalProperties': False,
}
|
[
"1120773382@qq.com"
] |
1120773382@qq.com
|
6aede4f0a10c3494cb90fe869bdc6fdb62075c3c
|
8520c991dc543f5f4e1efe59ab401824173bb985
|
/565-array-nesting/solution.py
|
078594952952ef661f9c1760257fa4f6bfdf44c3
|
[] |
no_license
|
katryo/leetcode
|
d44f70f2853c4f5ea9a462d022feb0f5436c2236
|
0da45559271d3dba687858b8945b3e361ecc813c
|
refs/heads/master
| 2020-03-24T12:04:53.859047
| 2020-02-18T04:27:55
| 2020-02-18T04:27:55
| 142,703,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 897
|
py
|
class Solution(object):
def arrayNesting(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
moves_to_end = [-1] * len(nums)
for i in range(len(nums)):
if moves_to_end[i] != -1:
continue
visited = set()
path = []
cur = i
while cur not in visited:
visited.add(cur)
path.append(cur)
cur = nums[cur]
# path: [a, b, c, ..., f] f is the nearest to the goal
for j, loc in enumerate(reversed(path)):
moves_to_end[loc] = j+1
ans = -1
for moves in moves_to_end:
ans = max(ans, moves)
return ans
# s = Solution()
# print(s.arrayNesting([5,4,0,3,1,6,2]))
# print(s.arrayNesting([0]))
# print(s.arrayNesting([1, 0]))
# print(s.arrayNesting([0, 1]))
|
[
"katoryo55@gmail.com"
] |
katoryo55@gmail.com
|
6ef83d142494ed6bb3e2514a5a63d600fac7ecdf
|
c4afc78e2e8ffbcc430b8799e3e1073dac8e972d
|
/src/themester/views.py
|
b862f20737f533596cc82d36a9688be279b8cedd
|
[
"MIT"
] |
permissive
|
pauleveritt/themester-pre-hypermodern
|
5c04197c7d4589b29709c736b3bcd02312e40a61
|
219595956f8ca1886d0c30b33efd86234aaf57ec
|
refs/heads/master
| 2023-08-27T18:53:52.693340
| 2020-11-22T15:04:25
| 2020-11-22T15:04:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,482
|
py
|
"""
Like a component, but with for_=View only.
"""
from typing import Callable, Optional, Type
from venusian import Scanner, attach
from wired import ServiceContainer, ServiceRegistry
from wired_injector.injector import Injector
from themester.protocols import View
def register_view(
registry: ServiceRegistry,
target: Callable = None,
context: Optional[Type] = None,
name: Optional[str] = None,
):
""" Imperative form of the view decorator """
def view_factory(container: ServiceContainer):
injector = Injector(container)
view_instance = injector(target)
return view_instance
if name is None:
registry.register_factory(
view_factory, View, context=context
)
else:
registry.register_factory(
view_factory, View, context=context, name=name
)
class view:
def __init__(
self,
context: Optional[Type] = None,
name: Optional[str] = None
):
self.context = context
self.name = name
def __call__(self, wrapped):
def callback(scanner: Scanner, name: str, cls):
registry: ServiceRegistry = getattr(scanner, 'registry')
register_view(
registry,
target=cls,
context=self.context,
name=self.name,
)
attach(wrapped, callback, category='viewdom_wired')
return wrapped
|
[
"pauleveritt@me.com"
] |
pauleveritt@me.com
|
222d709d64de1813eca0d9d49af5111d7c124b88
|
0e91030c47071029d978dbfb9e7a30ae6826afe5
|
/venv/Scripts/easy_install-script.py
|
a0923dd8d08599e350611459fd8c7d659ba84dc3
|
[] |
no_license
|
liqi629/python_lemon
|
095983fadda3639b058043b399180d19f899284b
|
bc5e6e6c92561ba9cec2798b7735505b377e9cd6
|
refs/heads/master
| 2023-02-04T00:57:09.447008
| 2020-12-27T14:46:31
| 2020-12-27T14:46:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
#!C:\Users\lipan\PycharmProjects\lemon_python_learning\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"396167189@qq.com"
] |
396167189@qq.com
|
c0d46bae7bb41fb5836e0cddab066566832ec3b3
|
c6759b857e55991fea3ef0b465dbcee53fa38714
|
/tools/nntool/nntool/quantization/verify_quantization.py
|
70b3ea3146bfa1c5422a82a807a5c2ebef945dbe
|
[
"AGPL-3.0-or-later",
"AGPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Apache-2.0"
] |
permissive
|
GreenWaves-Technologies/gap_sdk
|
1b343bba97b7a5ce62a24162bd72eef5cc67e269
|
3fea306d52ee33f923f2423c5a75d9eb1c07e904
|
refs/heads/master
| 2023-09-01T14:38:34.270427
| 2023-08-10T09:04:44
| 2023-08-10T09:04:44
| 133,324,605
| 145
| 96
|
Apache-2.0
| 2023-08-27T19:03:52
| 2018-05-14T07:50:29
|
C
|
UTF-8
|
Python
| false
| false
| 3,015
|
py
|
# Copyright (C) 2020, 2021 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
def walk_graph(G, qrecs, node, visited):
problems = []
if node in visited:
return problems
visited.add(node)
qrec = qrecs.get(node.name)
if qrec is None:
problems.append(f"node {node.name} has no quantization set")
elif qrec.out_qs is None:
problems.append(f"node {node.name} has no output quantization set")
elif qrec.in_qs is None:
problems.append(f"node {node.name} has no input quantization set")
else:
for idx, edge_group in enumerate(G.indexed_out_edges(node.name)):
if len(qrec.out_qs) <= idx:
problems.append(
f"node {node.name} has no output quantization set on output {idx}")
continue
qtype = qrec.out_qs[idx]
if qtype is None and edge_group:
problems.append(
f"node {node.name} quantization on output {idx} is None")
continue
for edge in edge_group:
to_qrec = qrecs.get(edge.to_node.name)
if to_qrec is None or to_qrec.in_qs is None:
# error will be reported when node is visited
continue
if len(to_qrec.in_qs) <= edge.to_idx:
problems.append(
f"node {edge.to_node.name} has no input quantization set on input {edge.to_idx}")
if to_qrec.in_qs[edge.to_idx] is None:
problems.append(
f"node {edge.to_node.name} quantization set on input {edge.to_idx} is None")
if not qtype.quantization_equal(to_qrec.in_qs[edge.to_idx]):
problems.append(f"node {edge.to_node.name} quantization set on input {edge.to_idx} "
f"does not match node {node.name} output {idx} {qtype} -> {to_qrec.in_qs[edge.to_idx]}")
for edge in G.out_edges(node.name):
problems.extend(walk_graph(G, qrecs, edge.to_node, visited))
return problems
def verify_quantization(G):
if G.quantization is None:
return ["quantization is not set"]
qrecs = G.quantization
visited = set()
problems = []
for node in G.inputs():
problems.extend(walk_graph(G, qrecs, node, visited))
return problems
|
[
"yao.zhang@greenwaves-technologies.com"
] |
yao.zhang@greenwaves-technologies.com
|
1706a5f900e84feda55426b1a048153a06e8fd8c
|
6d05f11c55ea277a08fc375b1c5af5ecc076000b
|
/python/paddle/fluid/tests/unittests/test_fleet_nocvm_1.py
|
2959043482c9a587feffe35476ba2e6f4102938e
|
[
"Apache-2.0"
] |
permissive
|
sfraczek/Paddle
|
8602df1b11937400f93ac5861a366226208a6f05
|
d1e2c61b22b9675adc3c4a52227d2220babaa001
|
refs/heads/develop
| 2023-04-04T22:52:42.629243
| 2023-03-16T12:06:10
| 2023-03-16T12:06:10
| 140,574,617
| 0
| 0
|
Apache-2.0
| 2019-03-26T15:54:00
| 2018-07-11T12:51:20
|
C++
|
UTF-8
|
Python
| false
| false
| 4,102
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test fleet."""
import os
import unittest
import paddle
class TestFleet1(unittest.TestCase):
"""
Test cases for fleet minimize.
"""
def setUp(self):
"""Set up, set envs."""
os.environ["PADDLE_TRAINERS_NUM"] = "2"
os.environ[
"PADDLE_PSERVERS_IP_PORT_LIST"
] = "127.0.0.1:36001,127.0.0.2:36001"
def test_pslib_1(self):
"""Test cases for pslib."""
import paddle.fluid as fluid
from paddle.incubate.distributed.fleet.parameter_server.pslib import (
fleet,
)
from paddle.incubate.distributed.fleet.role_maker import (
GeneralRoleMaker,
)
os.environ["POD_IP"] = "127.0.0.1"
os.environ["PADDLE_PORT"] = "36001"
os.environ["TRAINING_ROLE"] = "TRAINER"
os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001"
os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36002"
os.environ["PADDLE_TRAINER_ID"] = "0"
role_maker = GeneralRoleMaker()
# role_maker.generate_role()
place = fluid.CPUPlace()
exe = fluid.Executor(place)
# fleet.init(role_maker)
train_program = fluid.Program()
startup_program = fluid.Program()
scope = fluid.Scope()
with fluid.program_guard(train_program, startup_program):
show = paddle.static.data(
name="show",
shape=[-1, 1],
dtype="int64",
lod_level=1,
)
emb = fluid.layers.embedding(
input=show,
size=[1, 1],
is_sparse=True,
is_distributed=True,
param_attr=fluid.ParamAttr(name="embedding"),
)
fc = paddle.static.nn.fc(x=emb, size=1, activation=None)
label = paddle.static.data(
name="click",
shape=[-1, 1],
dtype="int64",
lod_level=1,
)
label_cast = paddle.cast(label, dtype='float32')
cost = paddle.nn.functional.log_loss(fc, label_cast)
try:
adam = fluid.optimizer.Adam(learning_rate=0.000005)
adam = fleet.distributed_optimizer(
adam,
strategy={
"embedding": {
"sparse_accessor_class": "DownpourCtrAccessor"
}
},
)
adam.minimize([cost], [scope])
fleet.run_server()
except:
print("do not support pslib test, skip")
return
try:
# worker should call these methods instead of server
# the following is only for test when with_pslib=off
def test_func():
"""
it is only a test function
"""
return True
fleet._role_maker.is_first_worker = test_func
fleet._role_maker._barrier_worker = test_func
fleet.save_model("./model_000")
fleet.save_one_table(0, "./model_001")
fleet.save_one_table(0, "./model_002", prefix="hahaha")
fleet.load_model("./model_0003")
fleet.load_one_table(0, "./model_004")
fleet.confirm()
fleet.revert()
except:
print("do not support pslib test, skip")
return
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
sfraczek.noreply@github.com
|
418990d7fc131fafa72d7cf2a4e781b2938d3a2f
|
71327347c4ffe832b656edd40bdcdaf13f123e16
|
/pywolf/migrations/0018_auto_20180722_2259.py
|
8633d5ea8314a6bddcc80c7ee880e3b9c12ae265
|
[
"BSD-3-Clause"
] |
permissive
|
tevawolf/pywolf
|
8b58570deac4a33643c323c1ff9754e0ce2b33ed
|
94e3c26d8c3b279990624f23658e22ab00eead46
|
refs/heads/master
| 2020-04-02T12:17:12.680330
| 2018-11-19T07:34:42
| 2018-11-19T07:34:42
| 154,426,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,844
|
py
|
# Generated by Django 2.0.6 on 2018-07-22 13:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pywolf', '0017_villageparticipantvoice_voice_order'),
]
operations = [
migrations.AddField(
model_name='mvoicesetting',
name='max_str_length',
field=models.SmallIntegerField(default=0),
),
migrations.AddField(
model_name='mvoicesetting',
name='max_voice_point',
field=models.SmallIntegerField(default=0),
),
migrations.AddField(
model_name='villagevoicesetting',
name='max_str_length',
field=models.SmallIntegerField(default=0),
),
migrations.AddField(
model_name='villagevoicesetting',
name='max_voice_point',
field=models.SmallIntegerField(default=0),
),
migrations.AlterField(
model_name='villagevoicesetting',
name='epilogue_limit_off_flg',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='villagevoicesetting',
name='prologue_limit_off_flg',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='villagevoicesetting',
name='tomb_limit_off_flg',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='villagevoicesetting',
name='voice_number',
field=models.SmallIntegerField(default=0),
),
migrations.AlterField(
model_name='villagevoicesetting',
name='voice_point',
field=models.SmallIntegerField(default=0),
),
]
|
[
"tevawolf@yahoo.co.jp"
] |
tevawolf@yahoo.co.jp
|
edd042f5b2d0fa90c43f9451ea0b8549a7e1d32b
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flashblade/FB_2_10/models/object_store_access_policy_action_response.py
|
2a0764dd2f2a1eec8de689c8e0f360a56fadcca5
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972
| 2023-08-25T07:40:41
| 2023-08-25T07:40:41
| 160,391,444
| 18
| 29
|
BSD-2-Clause
| 2023-09-08T09:08:30
| 2018-12-04T17:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,268
|
py
|
# coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.10, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.10
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_10 import models
class ObjectStoreAccessPolicyActionResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[ObjectStoreAccessPolicyAction]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.ObjectStoreAccessPolicyAction]
):
"""
Keyword args:
items (list[ObjectStoreAccessPolicyAction])
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ObjectStoreAccessPolicyActionResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ObjectStoreAccessPolicyActionResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ObjectStoreAccessPolicyActionResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"azaman@purestorage.com"
] |
azaman@purestorage.com
|
08bfeae2350ed1c651b3c15cf39558ddead399b8
|
f810836bea801f2fa85418ac7f5f5ffb0f3e0bda
|
/abc/abc237/D - LR insertion.py
|
b87e8c318a6bc75b348e71e58c1c21e407f300c6
|
[] |
no_license
|
cocoinit23/atcoder
|
0afac334233e5f8c75d447f6adf0ddf3942c3b2c
|
39f6f6f4cc893e794d99c514f2e5adc9009ee8ca
|
refs/heads/master
| 2022-08-29T06:01:22.443764
| 2022-07-29T07:20:05
| 2022-07-29T07:20:05
| 226,030,199
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
"""
from collections import deque
n = int(input())
s = input()
ans = deque([n])
for i in range(n - 1, -1, -1):
if s[i] == 'R':
ans.appendleft(i)
else:
ans.append(i)
print(*ans)
"""
n = int(input())
s = input()
l = []
r = []
for i, c in enumerate(s):
if c == 'L':
r.append(i)
else:
l.append(i)
ans = l + [n] + r[::-1]
print(*ans)
|
[
"cocoinit23@gmail.com"
] |
cocoinit23@gmail.com
|
5c7375b81cad282b805c1d33d481cec56fd46b9d
|
4f83471a669772731a7b1781d46be6c4eba7ef33
|
/脚本化爬虫/购物类/Frys/get_Frys_info.py
|
a162ac5428aa36b14676fd4ff1170924dfb7a137
|
[] |
no_license
|
odtu945/LiuFan_Spider
|
19a5eb08ebafc5865931bdf96aea2b9dd436a614
|
848b3aff4754c102491b201684858c3f116ff90b
|
refs/heads/master
| 2022-01-05T10:12:32.085245
| 2019-03-24T02:30:38
| 2019-03-24T02:30:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,779
|
py
|
import codecs
import re
from Tools import get_html
import time
import pytz
import random
from datetime import datetime,timedelta
from multiprocessing import Pool,Lock
# #把list洗乱
# def shuffle_list(list_name):
#
# from random import shuffle
# shuffle(list_name)
# # 返回随机排序后的序列
# return list_name
#
# #传入商品页面的html和商品的id
# def get_info(html):
#
# items_info = []
# print ('------------itemsId------------')
# itemsId_list = re.findall(r'<div id="ProductAttributes">.*?<li style=.*?>(.*?)</li>.*?</div>',html,re.S)
#
# itemsId = str(itemsId_list[0]).split('#')[1]
# print (itemsId)
# # items_info.append(str(itemsId))
#
# # print ' ------------title------------'
# title = itemsId_list[1]
# print (title)
# # items_info.append(str(title))
#
# # print ' ------------Price------------'
# price = re.findall(r'<label id="l_price1_value.*?>(.*?)</label>', html,re.S)
# print (price[0])
# # items_info.append(str(price[0]))
#
# # print '------------Stock------------'
# stock = re.findall(r'<div style="width:90%; float: left;">(.*?)</div>',html,re.S)
#
# items_info.append(str(stock))
#
# # print ' ------------brand------------'
#
# brand = result_response['brand']
# items_info.append(str(brand))
#
#
#
# # ------------Reviews------------
# # review = ''
# # review_count = re.findall(r'<span id="acrCustomerReviewText" class="a-size-base">(.*?)</span>',html,re.S)
# # print review_count+'======'
# # review = str(review_count).split(' ')[0]
# # items_info['reviews'] = review
#
# # print ' ------------image------------'
# image_list = result_response['image_list']
# if image_list == []:
# image_list = ['','','','','']
# while len(image_list) < 5:
# image_list.append("")
#
# # print type(image_list)
# images = image_list[:5] # 最多取5张图片
# items_info += images
#
# #------------details------------
# # 前有\xe2\x9c\x94 \xc2\xa0
# details_Feature = result_response['detail']
# details_list = details_Feature['Feature']
#
# if isinstance(details_list, str):
# details_list = [details_list]
# print details_list
#
# while len(details_list)<4:
# details_list.append("")
#
# details_list = details_list[:4]
# for i in range(0,4):
# details_list[i] = str(details_list[i]).replace('\n','').replace('\xe2\x9c\x94','').replace('\xc2\xa0','').replace('<br>','.').replace('</br>','')
#
# if str(details_list[i]).find("Refurbished") == -1 or str(details_list[i]).find("used") == -1:
# is_Refurbished = False
# else:
# is_Refurbished = True
#
# items_info += details_list
#
# # ------------Specification------------
# Specification = result_response['description']
# if Specification == None or len(Specification)>5000:
# Specification = '\n'
# if len(Specification) <5000:
# Specification = Specification[:1000]
# Specification = ''.join(Specification)
# Specification = Specification.replace('\n','').replace('\xe2\x9c\x94','').replace('\xc2\xa0','').replace('<b>','').replace('</b>','').replace('<br>','').replace('</br>','').replace('<br/>','')
#
# items_info.append(str(Specification))
# if str(Specification).find("Refurbished") == -1 or str(Specification).find("used") == -1:
# is_Refurbished = False
# else:
# is_Refurbished = True
#
# print '=====================END==================================='
# print items_info
#
#
# result_file.write("\t".join(items_info) + "\n")
#
# print '============='
# # f.flush()
# item_file.flush()
# result_file.flush()
#
#
# #把itemsId页面的html传入get_info函数中,把失败的id重新存一个文件
# def handle(itemsurl):
# try:
# #商品详情页
# #获取每一个商品页面的html
# html = get_html.get_PhantomJS_html(itemsurl)
# # print html
# # 获取每一个商品的asin
# print html
#
# if html:
# #调用get_info函数,传入html
# get_info(html)
# else:
# with open('./Result/get_html_fail.txt', 'aw') as h:
# h.write(itemsurl + '\n')
#
# except Exception, e:
# # print itemsurl, ":", e
# with open('./Result/fail_url.txt','aw') as fail_url:
# fail_url.write(itemsurl+'\n')
# # with open('./Result/no_except.txt', 'aw') as f:
# # f.write(itemsurl + '\n')
#
# #把items_last.txt文件中的字段制成表格
# def create_titles(filename, titles):
# f = open(filename, "w")
# f.write("\t".join(titles) + "\n")
# #清除内部缓冲区
# f.flush()
# #关闭文件
# f.close()
#
# #去重后的items的id文件
# def start(items_file,file_name):
#
# global result_file, lock, titles,fr,ferr,item_file
# titles = ['itemsId', 'price', 'stock', 'brand', 'title', 'img1', 'img2', 'img3', 'img4',
# 'img5', 'detail1', 'detail2', 'detail3', 'detail4', 'Specification']
# item_file = open(items_file, 'r')
#
# #调用函数create_titles
# create_titles(file_name, titles)
# result_file = open(file_name, 'aw')
# items_list = item_file.readlines()
# #把获取的url依次传入handle
# items = []
# for item in items_list:
# item = item.split('\n')[0]
# items.append(item)
# lock = Lock()
# pool = Pool(10)
# #调用函数把items的url依次传入handle函数中爬虫
# pool.map(handle, items)
# pool.close()
# pool.join()
#
# item_file.close()
# result_file.close()
def get_asin(base_url,page):
#Electronics : Computers & Accessories : Monitors : Prime Eligible : New
for i in range(0, page): # 页码
start_num = i*25
url = base_url.replace("[page]", str(i)).replace('[start]',str(start_num))
print (url)
# time.sleep(2)
html = get_html.get_html(url)
url_list_re = re.findall(r'<td colspan="2">(.*?)</td>', html, re.S)
print (url_list_re)
url_list = re.findall(r'<A HREF="(.*?)">',str(url_list_re),re.S)
print (url_list)
print (len(url_list))
for goods_url in url_list:
with open("./Result/items_url.txt", "aw") as f:
f.write('http://www.frys.com/'+goods_url + "\n")
print (goods_url)
if __name__ == "__main__":
url = '''http://www.frys.com/search?cat=-68332&pType=pDisplay&resultpage=[page]&start=[start]&rows=25'''
page = 5
file_name = './Result/tablets.xls'
get_asin(url, page)
# start('./Result/items_url.txt',file_name)
|
[
"liufan.dery@gmail.com"
] |
liufan.dery@gmail.com
|
9bde53a4965ad421599d521020416e1db9e0916f
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/amg/azext_amg/vendored_sdks/_dashboard_management_client.py
|
673cde0f190c519ac78730831c000fed3d99e2f4
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 5,405
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models as _models
from ._configuration import DashboardManagementClientConfiguration
from ._serialization import Deserializer, Serializer
from .operations import (
EnterpriseDetailsOperations,
GrafanaOperations,
Operations,
PrivateEndpointConnectionsOperations,
PrivateLinkResourcesOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class DashboardManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""The Microsoft.Dashboard Rest API spec.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.dashboard.operations.Operations
:ivar grafana: GrafanaOperations operations
:vartype grafana: azure.mgmt.dashboard.operations.GrafanaOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections:
azure.mgmt.dashboard.operations.PrivateEndpointConnectionsOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources: azure.mgmt.dashboard.operations.PrivateLinkResourcesOperations
:ivar enterprise_details: EnterpriseDetailsOperations operations
:vartype enterprise_details: azure.mgmt.dashboard.operations.EnterpriseDetailsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2022-10-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = DashboardManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.grafana = GrafanaOperations(self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.enterprise_details = EnterpriseDetailsOperations(
self._client, self._config, self._serialize, self._deserialize
)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self) -> None:
self._client.close()
def __enter__(self) -> "DashboardManagementClient":
self._client.__enter__()
return self
def __exit__(self, *exc_details) -> None:
self._client.__exit__(*exc_details)
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
6b3b6c712d369e9de58b67fd7edfeb5615cd37e1
|
a5e6ce10ff98539a94a5f29abbc053de9b957cc6
|
/competition/20181125/d.py
|
da13b71ffe3d2ec5a7d23f0f506885ff52d138b9
|
[] |
no_license
|
shimaw28/atcoder_practice
|
5097a8ec636a9c2e9d6c417dda5c6a515f1abd9c
|
808cdc0f2c1519036908118c418c8a6da7ae513e
|
refs/heads/master
| 2020-07-26T10:59:51.927217
| 2020-06-13T11:53:19
| 2020-06-13T11:53:19
| 208,622,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
s = input()
#%%
# s = "codethanksfes"
#%%
a = s[0]
ans = 0
for l in s:
if l<=a:
a = l
ans += 1
print(ans)
#%%
|
[
"shima.w28@gmail.com"
] |
shima.w28@gmail.com
|
f7584add9a4108166a191f7f63b755d5aec39a5a
|
a2ff77ac12c8c8313ebb9d82b0afe0229ac065c6
|
/packages/desktop/gnome/addon/gitg/actions.py
|
a4af4b04586a3f92f2c96f7a1bee230ec0e17431
|
[] |
no_license
|
ademirel/COMAK
|
80966cffc1833c0d41dbe36514ef2480f4b87ead
|
311d8d572c0ed5fe429bb4b2748e509dab7a6785
|
refs/heads/master
| 2020-12-25T17:23:27.553490
| 2012-09-11T08:58:30
| 2012-09-11T08:58:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
autotools.autoreconf("-fiv")
shelltools.system("intltoolize --force --copy --automake")
autotools.configure("--disable-static")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
#autotools.install()
pisitools.dodoc("COPYING", "README", "AUTHORS", "ChangeLog")
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
d77c2989f52c9c5aa6a273abb52368e4bac985f3
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/detection/FSAF_for_Pytorch/mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py
|
c686bcec7c76d3324cacaf69abb51bd7dc5b59f5
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,981
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
_base_ = [
'../_base_/models/faster_rcnn_r50_caffe_c4.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
0f99ffc01cdb6537622d04736cfb2054589fd8e2
|
306baa2ad596e3962e427d587e7b0d4175a1e48e
|
/configs/eftnet/R2_ttf53_whh_beta01_1lr_log_2x.py
|
74a897aa9dbdab0f29a9de33d0dedb6950f47d88
|
[
"Apache-2.0"
] |
permissive
|
mrsempress/mmdetection
|
9c7ed7ed0c9f1d6200f79a2ab14fc0c8fe32c18a
|
cb650560c97a2fe56a9b369a1abc8ec17e06583a
|
refs/heads/master
| 2022-04-24T04:34:30.959082
| 2020-04-26T07:52:23
| 2020-04-26T07:52:23
| 258,957,856
| 0
| 0
|
Apache-2.0
| 2020-04-26T06:33:32
| 2020-04-26T06:33:32
| null |
UTF-8
|
Python
| false
| false
| 3,060
|
py
|
# model settings
model = dict(
type='CenterNet',
pretrained='./pretrain/darknet53.pth',
backbone=dict(
type='DarknetV3',
layers=[1, 2, 8, 8, 4],
inplanes=[3, 32, 64, 128, 256, 512],
planes=[32, 64, 128, 256, 512, 1024],
norm_cfg=dict(type='BN'),
out_indices=(1, 2, 3, 4),
frozen_stages=1,
norm_eval=False),
neck=dict(type='None'),
bbox_head=dict(
type='CXTHead',
inplanes=(128, 256, 512, 1024),
head_conv=128,
wh_conv=64,
use_deconv=False,
norm_after_upsample=False,
hm_head_conv_num=2,
wh_head_conv_num=2,
ct_head_conv_num=1,
fovea_hm=False,
num_classes=81,
use_exp_wh=False,
wh_offset_base=16,
wh_agnostic=True,
wh_heatmap=True,
shortcut_cfg=(1, 2, 3),
shortcut_attention=(False, False, False),
norm_cfg=dict(type='BN'),
norm_wh=False,
hm_center_ratio=0.27,
center_ratio=0.1,
hm_init_value=None,
giou_weight=5.,
merge_weight=1.,
hm_weight=1.,
ct_weight=1.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.01,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=12,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[18, 22])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=18)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'ttf53_whh_beta01_1lr_log_2x'
load_from = None
resume_from = 'work_dirs/1908/0813_R2_ttf53_WHH_BETA01_1LR/ttf53_whh_beta01_1lr_log_1x_0813_2257/epoch_9.pth'
workflow = [('train', 1)]
|
[
"mrsempress98@gmail.com"
] |
mrsempress98@gmail.com
|
c5fbfda6f64d0654fdb0f07855f20d2db1e8bb6a
|
a86287b997aceb02b836a951a188fff5c98cdca8
|
/train_cnn_multilabel/ckpt_pb.py
|
06334157fbd692e76b7ced2ca00fbe72a9410251
|
[] |
no_license
|
wonqiao/train_arch
|
fbdd9ee59ed67ad2a71e638fbcdaadafedc68759
|
f78aabffdfb65dd1d40ede40dde81de3b04f2144
|
refs/heads/master
| 2023-01-29T20:21:24.977626
| 2020-12-09T03:54:00
| 2020-12-09T03:54:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,402
|
py
|
# coding = utf-8
"""
Created on 2017 10.17
@author: liupeng
wechat: lp9628
blog: http://blog.csdn.net/u014365862/article/details/78422372
"""
import tensorflow as tf
from tensorflow.python.framework import graph_util
from lib.utils.multi_label_utils import get_next_batch_from_path, shuffle_train_data
from lib.utils.multi_label_utils import input_placeholder, build_net_multi_label, cost, train_op, model_mAP
import cv2
import numpy as np
import os
import sys
import config
MODEL_DIR = "model/"
MODEL_NAME = "frozen_model.pb"
if not tf.gfile.Exists(MODEL_DIR): #创建目录
tf.gfile.MakeDirs(MODEL_DIR)
height, width = config.height, config.width
num_classes = config.num_classes
arch_model = config.arch_model
X = tf.placeholder(tf.float32, [None, height, width, 3], name = "inputs_placeholder")
net, net_vis = build_net_multi_label(X, num_classes, 1.0, False, arch_model)
net = tf.nn.sigmoid(net)
predict = tf.reshape(net, [-1, num_classes], name='predictions')
def freeze_graph(model_folder):
#checkpoint = tf.train.get_checkpoint_state(model_folder) #检查目录下ckpt文件状态是否可用
#input_checkpoint = checkpoint.model_checkpoint_path #得ckpt文件路径
input_checkpoint = model_folder
output_graph = os.path.join(MODEL_DIR, MODEL_NAME) #PB模型保存路径
output_node_names = "predictions" #原模型输出操作节点的名字
#saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=True) #得到图、clear_devices :Whether or not to clear the device field for an `Operation` or `Tensor` during import.
saver = tf.train.Saver()
graph = tf.get_default_graph() #获得默认的图
input_graph_def = graph.as_graph_def() #返回一个序列化的图代表当前的图
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
saver.restore(sess, input_checkpoint) #恢复图并得到数据
#print "predictions : ", sess.run("predictions:0", feed_dict={"input_holder:0": [10.0]}) # 测试读出来的模型是否正确,注意这里传入的是输出 和输入 节点的 tensor的名字,不是操作节点的名字
output_graph_def = graph_util.convert_variables_to_constants( #模型持久化,将变量值固定
sess,
input_graph_def,
output_node_names.split(",") #如果有多个输出节点,以逗号隔开
)
with tf.gfile.GFile(output_graph, "wb") as f: #保存模型
f.write(output_graph_def.SerializeToString()) #序列化输出
print("%d ops in the final graph." % len(output_graph_def.node)) #得到当前图有几个操作节点
for op in graph.get_operations():
#print(op.name, op.values())
print("name:",op.name)
print ("success!")
#下面是用于测试, 读取pd模型,答应每个变量的名字。
graph = load_graph("model/frozen_model.pb")
for op in graph.get_operations():
#print(op.name, op.values())
print("name111111111111:",op.name)
pred = graph.get_tensor_by_name('prefix/inputs_placeholder:0')
print (pred)
temp = graph.get_tensor_by_name('prefix/predictions:0')
print (temp)
def load_graph(frozen_graph_filename):
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Then, we can use again a convenient built-in function to import a graph_def into the
# current default Graph
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
name="prefix",
op_dict=None,
producer_op_list=None
)
return graph
if __name__ == '__main__':
train_dir = 'model'
latest = tf.train.latest_checkpoint(train_dir)
if not latest:
print ("No checkpoint to continue from in", train_dir)
sys.exit(1)
print ("resume", latest)
# saver2.restore(sess, latest)
# model_folder = './model/model.ckpt-0'
model_folder = latest
freeze_graph(model_folder)
|
[
"MachineLP@163.com"
] |
MachineLP@163.com
|
1545b59188024f52bdd1c764d1b8ef97983fa250
|
3365e4d4fc67bbefe4e8c755af289c535437c6f4
|
/.history/src/core/dialogs/waterfall_dialog_20170814145942.py
|
6513d4211f297e41a9975f46c661f2308b482d5c
|
[] |
no_license
|
kiranhegde/OncoPlotter
|
f3ab9cdf193e87c7be78b16501ad295ac8f7d2f1
|
b79ac6aa9c6c2ca8173bc8992ba3230aa3880636
|
refs/heads/master
| 2021-05-21T16:23:45.087035
| 2017-09-07T01:13:16
| 2017-09-07T01:13:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,976
|
py
|
'''
Refs:
Embedding plot: https://sukhbinder.wordpress.com/2013/12/16/simple-pyqt-and-matplotlib-example-with-zoompan/
'''
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import (QApplication, QDialog, QWidget, QPushButton, QVBoxLayout, QTreeWidget, QTreeWidgetItem)
from PyQt5 import QtCore, QtGui
import core.gui.waterfall as waterfall
import numpy as np
from pprint import pprint
class Waterfall(QWidget, waterfall.Ui_Waterfall):
general_settings_signal = QtCore.pyqtSignal(list) #send list of plotting params
updated_rectangles_signal = QtCore.pyqtSignal(list) #send list of updated artists for redrawing
def __init__(self, parent):
super(Waterfall,self).__init__(parent)
self.setupUi(self)
#Button functions
self.btn_apply_general_settings.clicked.connect(self.send_settings)
self.patient_tree = self.create_patient_tree()
self.data_viewer_container.addWidget(self.patient_tree)
def on_waterfall_data_signal(self,signal):
self.waterfall_data = signal['waterfall_data'] #pandas dataframe
def on_generated_rectangles_signal(self,signal):
self.rectangles_received = signal[0]
self.add_items() #display in table
#print(self.rectangles_received)
def send_settings(self,signal):
self.list_general_settings = [
self.plot_title.text(),
self.x_label.text(),
self.y_label.text(),
self.twenty_percent_line.isChecked(),
self.thirty_percent_line.isChecked(),
self.zero_percent_line.isChecked(),
self.display_responses_as_text.isChecked()
]
self.general_settings_signal.emit(self.list_general_settings)
def create_patient_tree(self):
'''
Create QTreeWidget populated with a patient's data for the DataEntry dialog.
Assumes that self.temp_patient is the patient of interest and that the variable belongs to the dialog.
'''
self.tree = QTreeWidget()
self.root = self.tree.invisibleRootItem()
self.headers = [
'Patient #',
'Best response %',
'Overall response',
'Cancer type',
'Color coding key',
'Color'
]
self.headers_item = QTreeWidgetItem(self.headers)
self.tree.setColumnCount(len(self.headers))
self.tree.setHeaderItem(self.headers_item)
self.root.setExpanded(True)
#self.addItems()
#self.tree.header().setResizeMode(QtGui.QHeaderView.ResizeToContents)
#self.tree.header().setStretchLastSection(False)
return self.tree
def add_items(self):
'''
Populate viewing tree
'''
i=0
for rect in self.rectangles_received:
#populate editable tree with rect data
self.rect_item = QTreeWidgetItem(self.root)
self.rect_params = [
self.waterfall_data['Patient number'][i],
rect.get_height(),
self.waterfall_data['Overall response'][i],
self.waterfall_data['Cancer'][i]
]
for col in range(0,4):
self.rect_item.setText(col,str(self.rect_params[col]))
self.rect_item.setTextAlignment(col,4)
self.rect_item.setFlags(self.rect_item.flags() | QtCore.Qt.ItemIsEditable)
i+=1
class WaterfallPlotter(QWidget):
generated_rectangles_signal = QtCore.pyqtSignal(list) #send list of rects for data display in tree
def __init__(self,parent):
super(WaterfallPlotter,self).__init__(parent)
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar(self.canvas,self)
self.btn_plot = QPushButton('Default Plot')
self.btn_plot.clicked.connect(self.default_plot)
self.layout = QVBoxLayout()
self.layout.addWidget(self.toolbar)
self.layout.addWidget(self.canvas)
self.layout.addWidget(self.btn_plot)
self.setLayout(self.layout)
def on_waterfall_data_signal(self,signal):
self.waterfall_data = signal['waterfall_data'] #pandas dataframe
self.btn_plot.setEnabled(True)
def on_general_settings_signal(self,signal):
try:
hasattr(self,'ax')
self.ax.set_title(signal[0])
self.ax.set_xlabel(signal[1])
self.ax.set_ylabel(signal[2])
self.canvas.draw()
except Exception as e:
print(e)
def default_plot(self):
'''
Plot waterfall data
'''
self.figure.clear()
self.rect_locations = np.arange(len(self.waterfall_data['Best response percent change']))
self.ax = self.figure.add_subplot(111)
self.ax.axhline(y=20, linestyle='--', c='k', alpha=0.5, lw=2.0, label='twenty_percent')
self.ax.axhline(y=-30, linestyle='--', c='k', alpha=0.5, lw=2.0, label='thirty_percent')
self.ax.axhline(y=0, c='k', alpha=1, lw=2.0, label='zero_percent')
self.ax.grid(color = 'k', axis = 'y', alpha=0.25)
self.rects = self.ax.bar(self.rect_locations,self.waterfall_data['Best response percent change'])
self.auto_label_responses(self.ax, self.rects, self.waterfall_data)
#self.plot_table()
self.canvas.draw()
self.ax.hold(False) #rewrite the plot when plot() called
self.generated_rectangles_signal.emit([self.rects])
def plot_table(self):
rows = ['%s' % x for x in self.waterfall_data.keys()]
rows = rows[4:] #skip first three, they are the 4 standard headers, rest are table rows
columns = self.waterfall_data['Patient number'] #patient numbers
cell_text = []
for row in rows:
cell_text_temp = []
for col in range(len(columns)):
cell_text_temp.append(self.waterfall_data[row][col])
cell_text.append(cell_text_temp)
the_table = plt.table(cellText=cell_text, rowLabels=rows, colLabels=columns, loc='bottom', cellLoc='center')
plt.subplots_adjust(bottom=0.15,left=0.5)
self.ax.set_xlim(-0.5,len(columns)-0.5)
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off'
) # labels along the bottom edge are off
def update_plot(self):
'''
TODO
'''
pass
def auto_label_responses(self, ax, rects, waterfall_data):
'''Add labels above/below bars'''
i = 0
for rect in rects:
height = rect.get_height()
if height >= 0:
valign = 'bottom'
else:
valign = 'top'
ax.text(rect.get_x() + rect.get_width()/2., height,
'%s' % waterfall_data['Overall response'][i], ha='center', va=valign)
i+=1
|
[
"ngoyal95@terpmail.umd.edu"
] |
ngoyal95@terpmail.umd.edu
|
6deefd1f16785bce2090866dab195b0c52bb9f78
|
e649eaa158a0fb311ac69f8df466097b69f29d8a
|
/tapioca_toggl/tapioca_toggl.py
|
0d8116f1937344e5376c39b6db74915601d32816
|
[
"Python-2.0",
"MIT"
] |
permissive
|
pgniewosz/tapioca-toggl
|
862f1d454e34139e75bcb1596f92387c94a004f5
|
0b789934d18cbbfe1bdcbe6b0905bf653fb9c68f
|
refs/heads/master
| 2020-12-31T02:00:52.044366
| 2015-12-09T21:24:17
| 2015-12-09T21:24:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,167
|
py
|
# -*- coding: utf-8 -*-
from tapioca import (
JSONAdapterMixin,
TapiocaAdapter,
generate_wrapper_from_adapter,
)
from requests.auth import HTTPBasicAuth
from .resource_mapping import RESOURCE_MAPPING
class TogglClientAdapter(JSONAdapterMixin, TapiocaAdapter):
api_root = 'https://www.toggl.com/api/v8/'
resource_mapping = RESOURCE_MAPPING
def get_request_kwargs(self, api_params, *args, **kwargs):
params = super(TogglClientAdapter, self).get_request_kwargs(
api_params, *args, **kwargs
)
access_token = api_params.get('access_token')
if access_token:
params['auth'] = HTTPBasicAuth(access_token, 'api_token')
else:
params['auth'] = HTTPBasicAuth(
api_params.get('user'),
api_params.get('password')
)
return params
def get_iterator_list(self, response_data):
return response_data
def get_iterator_next_request_kwargs(self, iterator_request_kwargs,
response_data, response):
pass
Toggl = generate_wrapper_from_adapter(TogglClientAdapter)
|
[
"raphael@hackebrot.de"
] |
raphael@hackebrot.de
|
91417fed22a522075bbf7a6fe7c62e14eb481945
|
edbf37a80849468145cdcfca2012d205cdba9b50
|
/csv_to_hdf5.py
|
a1fdc67f0ebeb87b9e3b822575a7a3e38dc3bb06
|
[] |
no_license
|
hsiaoyi0504/cheminfo-final
|
f0f19d67c697b91195aff7fd52044e2f7a9fd434
|
7dbfb9729a443060d45562123768398513d11085
|
refs/heads/master
| 2021-01-12T09:32:26.927273
| 2017-02-14T17:21:26
| 2017-02-14T17:21:26
| 76,188,236
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,469
|
py
|
import sys
import os
from itertools import islice
import h5py
import numpy as np
from sklearn.model_selection import train_test_split
sys.path.append(os.path.abspath("./keras-molecules"))
from molecules.utils import load_dataset, one_hot_array, one_hot_index
import pandas as pd
data, charset = load_dataset('./keras-molecules/data/processed_zinc12_250k.h5', split = False)
charset = list(charset)
del data
smiles = []
solubility = []
with open('./data/total.csv') as f:
for line in islice(f,1,None):
temp = line.rstrip('\r\n')
temp = temp.split(',')
smiles.append(temp[0])
solubility.append(float(temp[1]))
clean_smiles = []
clean_solubility = []
for i in range(len(smiles)):
in_charset = True
if len(smiles[i])>120:
in_charset = False
else:
for char in smiles[i]:
if char not in charset:
in_charset = False
break
if in_charset:
clean_smiles.append(smiles[i])
clean_solubility.append(solubility[i])
h5f = h5py.File('./data/processed_solubility.h5', 'w')
data = pd.DataFrame({'structure':clean_smiles,'logS':clean_solubility})
keys = data['structure'].map(len)<121
data = data[keys]
dt = h5py.special_dtype(vlen=unicode)
h5f.create_dataset('structure',data=data['structure'],dtype=dt)
structures = data['structure'].map(lambda x: list(x.ljust(120)))
one_hot_encoded_fn = lambda row: map(lambda x: one_hot_array(x, len(charset)),one_hot_index(row, charset))
h5f.create_dataset('charset', data = charset)
def chunk_iterator(dataset, chunk_size=100):
chunk_indices = np.array_split(np.arange(len(dataset)),len(dataset)/chunk_size)
for chunk_ixs in chunk_indices:
chunk = dataset[chunk_ixs]
yield (chunk_ixs, chunk)
raise StopIteration
def create_chunk_dataset(h5file, dataset_name, dataset, dataset_shape,chunk_size=100, apply_fn=None):
new_data = h5file.create_dataset(dataset_name, dataset_shape,chunks=tuple([chunk_size]+list(dataset_shape[1:])))
for (chunk_ixs, chunk) in chunk_iterator(dataset):
if not apply_fn:
new_data[chunk_ixs, ...] = chunk
else:
new_data[chunk_ixs, ...] = apply_fn(chunk)
test_idx = structures.index
create_chunk_dataset(h5f, 'data_test', test_idx,(len(test_idx), 120, len(charset)),apply_fn=lambda ch: np.array(map(one_hot_encoded_fn,structures[ch])))
h5f.create_dataset('solubility',data=data['logS'][test_idx])
h5f.close()
|
[
"hsiaoyi0504@gmail.com"
] |
hsiaoyi0504@gmail.com
|
395ae2d504cb4be15aacfe24989028ced7cd5bb6
|
9ecd7568b6e4f0f55af7fc865451ac40038be3c4
|
/tianlikai/shandong/qingdao.py
|
2a7b9ffffeabdc15e72299245459613fca9782da
|
[] |
no_license
|
jasonTLK/scrapy
|
f5ac6e575e902c077a07dc0eb9d228506f1a173f
|
2de8245fbc8731cfd868bbd91168e26271045300
|
refs/heads/master
| 2021-01-20T04:22:23.080864
| 2017-04-28T07:46:29
| 2017-04-28T07:46:29
| 89,681,374
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,444
|
py
|
# -*- coding: utf-8 -*-
from items.biding import biding_gov
from utils.toDB import *
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
import datetime
# 山东青岛招投标网站
# 其他信息
class LianjiaSpider(CrawlSpider):
name = "qingdao.py"
allowed_domains = ["ggzy.qingdao.gov.cn"]
start_urls = [
"http://ggzy.qingdao.gov.cn/index.html"
]
rules = [
# 匹配正则表达式,处理下一页
Rule(LinkExtractor(allow=('',), deny=(r'.*(n32205482)|(n32205478)|(zbFlag=0).*',), unique=True), follow=True, callback='parse_item')
]
def parse_item(self, response):
print response.url
items = biding_gov()
items["url"] = response.url
items["info"] = ""
items["create_time"] = datetime.datetime.now()
items["update_time"] = datetime.datetime.now()
page_info = "".join(response.body)
try:
items["info"] = "".join(page_info).decode('gbk')
except:
items["info"] = "".join(page_info)
db = MongodbHandle("172.20.3.10 ", 27017, "Bid_Other_info")
db.get_insert(
"bid_shandong_QingDao",
{
"url": items["url"],
"info": items["info"],
"create_time": items["create_time"],
"update_time": items["update_time"]
}
)
|
[
"18723163167@163.com"
] |
18723163167@163.com
|
1c4c926d1547bee952cd3b18817763f0a75c1a1d
|
55123cea6831600a7f94b29df86aa12c9ccd82e4
|
/test/test_gpu_programming.py
|
26bcc6aa254f5b802e5a5dbb514fe0ccd3266ec9
|
[
"MIT"
] |
permissive
|
yngtodd/gpu_programming
|
34915f76d2c39d3a3dfe633d0dd40fee1c077579
|
84ce7cb4280690bfb46cb36fb7ef47863d97a529
|
refs/heads/master
| 2020-04-10T10:50:54.522162
| 2018-12-08T22:54:24
| 2018-12-08T22:54:24
| 160,977,394
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
"""
Tests for `gpu_programming` module.
"""
import pytest
from gpu_programming import gpu_programming
class TestGpu_programming(object):
@classmethod
def setup_class(cls):
pass
def test_something(self):
pass
@classmethod
def teardown_class(cls):
pass
|
[
"young.todd.mk@gmail.com"
] |
young.todd.mk@gmail.com
|
48f1a194c93c3f79e47c8abb291e2846c3dc4d3c
|
24b2f3f5f49ed19cf7fd3dcd433d6b72806e08cf
|
/python/sorting_and_searching/0658_Find_K_Closest_Elements.py
|
05baab2d171b85dcb87d49d361a97f8f74117f59
|
[] |
no_license
|
lizzzcai/leetcode
|
97089e4ca8c3c53b5a4a50de899591be415bac37
|
551cd3b4616c16a6562eb7c577ce671b419f0616
|
refs/heads/master
| 2021-06-23T05:59:56.928042
| 2020-12-07T03:07:58
| 2020-12-07T03:07:58
| 162,840,861
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,752
|
py
|
'''
08/04/2020
658. Find K Closest Elements - Medium
Tag: Binary Search
Given a sorted array, two integers k and x, find the k closest elements to x in the array. The result should also be sorted in ascending order. If there is a tie, the smaller elements are always preferred.
Example 1:
Input: [1,2,3,4,5], k=4, x=3
Output: [1,2,3,4]
Example 2:
Input: [1,2,3,4,5], k=4, x=-1
Output: [1,2,3,4]
Note:
The value k is positive and will always be smaller than the length of the sorted array.
Length of the given array is positive and will not exceed 104
Absolute value of elements in the array and x will not exceed 104
UPDATE (2017/9/19):
The arr parameter had been changed to an array of integers (instead of a list of integers). Please reload the code definition to get the latest changes.
'''
from typing import List
# Solution
class Solution1:
def findClosestElements(self, arr: List[int], k: int, x: int) -> List[int]:
'''
Time: O(n+nlogn+klogk)
Space: O(n)
'''
dist = [(abs(i-x), i) for i in arr]
dist.sort()
return sorted([t[1] for t in dist[:k]])
class Solution2:
def findClosestElements(self, arr: List[int], k: int, x: int) -> List[int]:
'''
Time: O(logn + k)
Space: O(k)
'''
def binary_search(l, r, target):
'''
find the first element >= than target
'''
while l <= r:
mid = (l+r) // 2
if arr[mid] >= target:
r = mid - 1
else:
l = mid + 1
return l
if x <= arr[0]:
return arr[:k]
if x >= arr[-1]:
return arr[-k:]
n = len(arr)
idx = binary_search(0, n-1, x)
l, r = max(0, idx-k), min(n-1, idx+k)
while l+k <= r:
# If there is a tie, the smaller elements are always preferred.
if x - arr[l] <= arr[r] - x:
r -= 1
else:# x - arr[l] > arr[r] - x:
l += 1
return arr[l:r+1]
# Unit Test
import unittest
class TestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_testCase(self):
for Sol in [Solution1(), Solution2()]:
func = Sol.findClosestElements
self.assertEqual(func([1,2,3,4,5], 4, 3), [1,2,3,4])
self.assertEqual(func([1,2,3,4,5], 4, -1), [1,2,3,4])
self.assertEqual(func([0,0,1,2,3,3,4,7,7,8], 3, 5), [3,3,4])
self.assertEqual(func([0,1,2,2,2,3,6,8,8,9], 5, 9), [3,6,8,8,9])
if __name__ == '__main__':
unittest.main()
|
[
"lilcolinn@gmail.com"
] |
lilcolinn@gmail.com
|
033a22f493ad32f11f1b533014f8c78daae54fa2
|
29597b67f10d456bdcc90a693ac93f571635ae34
|
/structure/recursion/keypad.py
|
b9565dcadcb6cdd6cf9c359c88e82b56b566a7ad
|
[] |
no_license
|
sh-tatsuno/python-algorithm
|
67d50f24604550c115f957ed74e81483566c560d
|
2800050077562eef50b6f0bd8ba6733068469c4c
|
refs/heads/master
| 2020-05-17T09:46:05.714449
| 2019-07-25T15:43:20
| 2019-07-25T15:43:20
| 183,641,527
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,577
|
py
|
def get_characters(num):
if num == 2:
return "abc"
elif num == 3:
return "def"
elif num == 4:
return "ghi"
elif num == 5:
return "jkl"
elif num == 6:
return "mno"
elif num == 7:
return "pqrs"
elif num == 8:
return "tuv"
elif num == 9:
return "wxyz"
else:
return ""
def keypad(num):
text = str(num)
if len(text) <= 1:
return [s for s in get_characters(num)]
ret = []
for c in get_characters(int(text[0])):
ret += [c + keys for keys in keypad(int(text[1:]))]
return ret
def test_keypad(input, expected_output):
if sorted(keypad(input)) == expected_output:
print("Yay. We got it right.")
else:
print("Oops! That was incorrect.")
# Base case: list with empty string
input = 0
expected_output = [""]
test_keypad(input, expected_output)
# Example case
input = 23
expected_output = sorted(["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"])
test_keypad(input, expected_output)
# Example case
input = 32
expected_output = sorted(["da", "db", "dc", "ea", "eb", "ec", "fa", "fb", "fc"])
test_keypad(input, expected_output)
# Example case
input = 8
expected_output = sorted(["t", "u", "v"])
test_keypad(input, expected_output)
input = 354
expected_output = sorted(["djg", "ejg", "fjg", "dkg", "ekg", "fkg", "dlg", "elg", "flg", "djh", "ejh", "fjh", "dkh", "ekh", "fkh", "dlh", "elh", "flh", "dji", "eji", "fji", "dki", "eki", "fki", "dli", "eli", "fli"])
test_keypad(input, expected_output)
|
[
"wasabi.mandarin@gmail.com"
] |
wasabi.mandarin@gmail.com
|
89ab0709a97719352d3faf225b1ef3224e177f24
|
30c820b171447ab772e58f04ac0dc55c4d5ffbdf
|
/transax/setup.py
|
fca863366f71e276dc836f489d4594540c33f89a
|
[] |
no_license
|
TransactiveSCC/TRANSAX
|
3b58cff757fb646a825872dc243e04eea3d0b712
|
13c45a1254cb14607d1bfa86267dbde9e61fd538
|
refs/heads/main
| 2023-05-13T10:50:20.868093
| 2021-05-15T02:45:53
| 2021-05-15T02:45:53
| 316,015,185
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
from setuptools import setup
setup(
name='transax',
version='1.0',
description='',
author='Scott Eisele',
author_email='scott.r.eisele@vanderbilt.edu',
packages=['transax'], #same as name
install_requires=['scipy','pycurl'], #external packages as dependencies
)
|
[
"eiselesr@gmail.com"
] |
eiselesr@gmail.com
|
6d7534730773802df64b5660a516a95e01c824f8
|
3953a4cf5dee0667c08e1fe1250a3067090e3f24
|
/mural/config/settings/base.py
|
4b847aae2bbfb7548c7bdbde55ea49cc1d1314f1
|
[] |
no_license
|
DigitalGizmo/msm_mural_project
|
41960242c84050ee578da90afabcb7f9bc1923df
|
5566a2b6f7445dc53d8aaf96cf7d24236fd5ed96
|
refs/heads/master
| 2020-03-07T11:04:49.633149
| 2019-02-13T18:10:44
| 2019-02-13T18:10:44
| 127,447,243
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,677
|
py
|
"""
Django settings for mural project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
# import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
from unipath import Path
BASE_DIR = Path(__file__).ancestor(3)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%-qkyq-id+=w)vd8p3+1#apulkq^@1h%vaq&lk1hsy(ww@h56h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['msm-mural.digitalgizmo.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'panels.apps.PanelsConfig',
'pops.apps.PopsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR.child("templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'msm_mural_db',
'USER': 'msm_mural_user',
'PASSWORD': 'Moser$1872',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = BASE_DIR.ancestor(2).child("msm_mural_static")
STATICFILES_DIRS = (
BASE_DIR.child("local_static"),
)
|
[
"donpublic@digitalgizmo.com"
] |
donpublic@digitalgizmo.com
|
8f1c32b9228c8eae48071a9322737aa0ae75e2ee
|
306afd5282d9c24d58297478a1728a006c29e57e
|
/python3/905_Sort_Array_By_Parity.py
|
3a705fc16ba5f96d63db2ec53acf28e955804135
|
[] |
no_license
|
ytatus94/Leetcode
|
d2c1fe3995c7a065139f772569485dc6184295a9
|
01ee75be4ec9bbb080f170cb747f3fc443eb4d55
|
refs/heads/master
| 2023-06-08T17:32:34.439601
| 2023-05-29T04:33:19
| 2023-05-29T04:33:19
| 171,921,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
class Solution:
def sortArrayByParity(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
even =[]
odd = []
for i in A:
if i % 2 == 0:
even.append(i)
else:
odd.append(i)
return even + odd
class Solution:
def sortArrayByParity(self, A):
# 看到一個別的方法
return sorted(A, key=lambda x: x%2)
|
[
"noreply@github.com"
] |
ytatus94.noreply@github.com
|
c559b7dcb34495138c12b520236775a346862bd8
|
9d30dcfcedc98306e60779d25cad83345b4f032c
|
/src/pip/_internal/models/target_python.py
|
7ad5786c4354cb0e520e8f9493c72619c29ebf82
|
[
"MIT"
] |
permissive
|
loke5555/pip
|
1cb04e69eecb9969cf663a2a1bf5095b84cdff55
|
a8510bc5e6b7c4849a0351deab3c1d41fd9a63d1
|
refs/heads/master
| 2020-06-14T08:22:50.373830
| 2019-07-02T09:00:02
| 2019-07-02T09:00:02
| 194,958,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,734
|
py
|
import sys
from pip._internal.pep425tags import get_supported, version_info_to_nodot
from pip._internal.utils.misc import normalize_version_info
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Optional, Tuple
from pip._internal.pep425tags import Pep425Tag
class TargetPython(object):
"""
Encapsulates the properties of a Python interpreter one is targeting
for a package install, download, etc.
"""
def __init__(
self,
platform=None, # type: Optional[str]
py_version_info=None, # type: Optional[Tuple[int, ...]]
abi=None, # type: Optional[str]
implementation=None, # type: Optional[str]
):
# type: (...) -> None
"""
:param platform: A string or None. If None, searches for packages
that are supported by the current system. Otherwise, will find
packages that can be built on the platform passed in. These
packages will only be downloaded for distribution: they will
not be built locally.
:param py_version_info: An optional tuple of ints representing the
Python version information to use (e.g. `sys.version_info[:3]`).
This can have length 1, 2, or 3 when provided.
:param abi: A string or None. This is passed to pep425tags.py's
get_supported() function as is.
:param implementation: A string or None. This is passed to
pep425tags.py's get_supported() function as is.
"""
# Store the given py_version_info for when we call get_supported().
self._given_py_version_info = py_version_info
if py_version_info is None:
py_version_info = sys.version_info[:3]
else:
py_version_info = normalize_version_info(py_version_info)
py_version = '.'.join(map(str, py_version_info[:2]))
self.abi = abi
self.implementation = implementation
self.platform = platform
self.py_version = py_version
self.py_version_info = py_version_info
# This is used to cache the return value of get_tags().
self._valid_tags = None # type: Optional[List[Pep425Tag]]
def format_given(self):
# type: () -> str
"""
Format the given, non-None attributes for display.
"""
display_version = None
if self._given_py_version_info is not None:
display_version = '.'.join(
str(part) for part in self._given_py_version_info
)
key_values = [
('platform', self.platform),
('version_info', display_version),
('abi', self.abi),
('implementation', self.implementation),
]
return ' '.join(
'{}={!r}'.format(key, value) for key, value in key_values
if value is not None
)
def get_tags(self):
# type: () -> List[Pep425Tag]
"""
Return the supported tags to check wheel candidates against.
"""
if self._valid_tags is None:
# Pass versions=None if no py_version_info was given since
# versions=None uses special default logic.
py_version_info = self._given_py_version_info
if py_version_info is None:
versions = None
else:
versions = [version_info_to_nodot(py_version_info)]
tags = get_supported(
versions=versions,
platform=self.platform,
abi=self.abi,
impl=self.implementation,
)
self._valid_tags = tags
return self._valid_tags
|
[
"chris.jerdonek@gmail.com"
] |
chris.jerdonek@gmail.com
|
3fcfcef8fa826d5b89ab602739c53b107e8c0050
|
14f223f1855215f6cbeaba533bcfe26532161918
|
/basics and advanced/armstrong_pract.py
|
0e16e80f5392370354718a9842eda8c0614e3e04
|
[] |
no_license
|
iiibsceprana/pranavsai
|
1026519a44eac429db8c4a6e3664277839d5dd52
|
ffd8c937c50814676b0ee1eabdfd461087d52b96
|
refs/heads/master
| 2020-03-09T23:36:23.258180
| 2018-04-11T08:53:55
| 2018-04-11T08:53:55
| 129,061,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
inp=int(input("Enter an Integer:"))
num=inp
print("num:",num)
sum=0
while inp>0:
n=inp%10
print("n:",n)
sum+=n*n*n
print("sum:",sum)
inp=inp//10
print("inp:",inp)
if num==sum:
print("Given number is Armtrong")
else:
print("Given number is not Armstrong")
|
[
"vineel2006@gmail.com"
] |
vineel2006@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.