blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
233f449c43dd3969ed4906d7bec5daaf2246e1ff | 3d5ad10941251a01afa660de95286e2d56ccef98 | /bot.py | 9d0777ba68625a32390d2645809c10accacd8a9b | [] | no_license | brunoss18/BOTcommentator | fa32def336684afccc87d028ea99b89803c70f0c | 58054ab9a54a2b5578ad71adc5121600af1b0a6a | refs/heads/main | 2023-07-02T05:59:30.997079 | 2021-08-03T05:49:55 | 2021-08-03T05:49:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,809 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import random
class InstagramBot:
def __init__(self, username, password):
self.username = username
self.password = password
firefoxProfile = webdriver.FirefoxProfile()
firefoxProfile.set_preference("intl.accept_languages", "pt,pt-BR")
firefoxProfile.set_preference("dom.webnotifications.enabled", False)
self.driver = webdriver.Firefox(
firefox_profile=firefoxProfile, executable_path=r"geckodriver"
)
def login(self):
driver = self.driver
driver.get("https://www.instagram.com")
time.sleep(2)
'''
login_button = driver.find_element_by_xpath(
"//a[@href='/accounts/login/?source=auth_switcher']"
)
login_button.click()
'''
time.sleep(1)
user_element = driver.find_element_by_xpath(
"//input[@name='username']")
user_element.clear()
user_element.send_keys(self.username)
time.sleep(1)
password_element = driver.find_element_by_xpath(
"//input[@name='password']")
password_element.clear()
password_element.send_keys(self.password)
time.sleep(1)
password_element.send_keys(Keys.RETURN)
time.sleep(2)
self.comente_nas_fotos_com_a_hashtag()
@staticmethod
def type_like_a_person(sentence, single_input_field):
""" Este código irá basicamente permitir que você simule a digitação como uma pessoa """
print("Digitando comentário...")
for letter in sentence:
single_input_field.send_keys(letter)
time.sleep(random.randint(1,4))
def comente_nas_fotos_com_a_hashtag(self, variavel_com_url_do_post=None):
a = 0
while (1):
''' Aqui você coloca uma variável e atribui no valor o link do post da promoção. Por exemplo:
# sorteio_cozinha = "https://www.instagram.com/ ......"
'''
sorteio_pesca = 'https://www.instagram.com/p//'
##Nessa lista de sorteios, você insere todos as variáveis que você criou acima.
sorteios = [
sorteio_pesca
]
'''
Este random existe para que a cada execução ele pegue um sorteio diferente.
Para minimizar a sensação que é um robô comentando
'''
sorteio_da_vez = random.choice(sorteios)
driver = self.driver
time.sleep(4)
driver.get(sorteio_da_vez)
driver.execute_script(
"window.scrollTo(0, document.body.scrollHeight);")
try:
'''
Dentro dessa lista comments, você insere diversos @, para que a cada comentário ele sorteie algum e nunca repita o comentário.
Dica: Se você for em algum video do youtube que fale sobre comentar em sorteio, nos comentários terão várias pessoas dizendo: "Pode me marcar, eu não me importo"
Pegue o @ delas e coloque nessa lista, igual o exemplo abaixo.
Coloque bastante @, tipo uns 30, 40, 50!!
'''
comments = [
"13",
"14",
"15"
]
driver.find_element_by_class_name("Ypffh").click()
comment_input_box = driver.find_element_by_class_name("Ypffh")
time.sleep(random.randint(1,5))
'''
Essa lógica abaixo, pessoa_1, pessoa_2 (...) existe pois em cada sorteio as pessoas pedem algo diferente
Ou precisa marcar 1 pessoa, ou 2, 3, uma palavra qualquer, enfim. Então, dependendo do sorteio da vez, você precisará
definir qual das variáveis pessoa irá utilizar.
'''
pessoa_1 = random.choice(comments)
pessoa_2 = random.choice(comments)
pessoa_3 = random.choice(comments)
marcar_2_pessoas = pessoa_1 + " " + pessoa_2
marcar_1_pessoa = pessoa_1
'''Isto é o que comentei acima. Se for o sorteio da cozinha por exemplo, então comente utilizando a variável marcar_2_pessoas'''
if sorteio_da_vez == sorteio_pesca :
self.type_like_a_person(marcar_1_pessoa, comment_input_box)
print("Comentei: ",marcar_1_pessoa, " no post: ",sorteio_da_vez, "sorteio_pesca")
## if sorteio_da_vez == variavel_com_url_do_post:
## self.type_like_a_person(marcar_2_pessoas, comment_input_box)
## print("Comentei: ", marcar_2_pessoas, " no post: ", sorteio_da_vez, "")
time.sleep(random.randint(1, 5))
driver.find_element_by_xpath(
"//button[contains(text(), 'Publicar')]"
).click()
a = a + 1
'''Aqui ele te informará quantas vezes já comentou o todo, desde o momento do start do script'''
print('Vezes- comentadas:')
print(a)
# A linha abaixo foi colocada a partir de uma sugestão no Youtube. Ela pode ser removida, caso você queira.
for i in range(1, 4): driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(60)
# Sugestão: Mude o trecho acima para time.sleep(60) para fazer um comentário a cada minuto e diminuir a possibilidade de ser bloqueado.
except Exception as e:
print(e)
time.sleep(2)
BRUNoBot = InstagramBot("@brunxsb", "minha_senha")
BRUNoBot.login()
| [
"noreply@github.com"
] | noreply@github.com |
7ab40d2eabfadbc816c5944cc521cefd12a0cfe9 | 3b19ca40d0dac17891600a99a4377b489f28e84c | /lib/nginx_log_parser.py | 0d5dd9ef784c53a25210a38e06a19c5dfd8ad2d7 | [] | no_license | danshipt/xddos | f65859e949ea098ff7a4562542f33161b925a18c | c61dac6114862fb53935413765d87e27573d4412 | refs/heads/master | 2021-10-08T15:20:24.348491 | 2017-03-22T14:31:08 | 2017-03-22T14:31:08 | 43,751,055 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py | from __future__ import unicode_literals
import re
class AccessLogRecord(object):
def __init__(self):
self.date = ''
self.domain = ''
self.ip = ''
self.http_code = ''
self.request_uri = ''
class NginxLogParser(object):
pattern = re.compile(r''
'(\d+.\d+.\d+.\d+)\s-\s-\s' # IP address
'\[(.+)\]\s' # datetime
'"(.+)\s\w+/.+"(\s)["]*(\d+)["]*\s' # request and HTTP code
'\d+\s"(.+)"\s' # referrer
'"(.+)"\s+".+"\s+' # user agent
'(.+)' # domain
)
def __init__(self, data_provider):
assert data_provider
self.data_provider = data_provider
def __iter__(self):
for log_line in self.data_provider:
rec = self._parse_request(log_line)
if rec:
yield rec
def _parse_request(self, log_line):
record = None
log_line = log_line.strip()
mt = re.match(self.pattern, log_line)
if mt:
record = AccessLogRecord()
record.date = mt.group(2)
record.request_uri = mt.group(3)
record.ip = mt.group(1)
record.http_code = int(mt.group(5))
record.domain = mt.group(8).lower()
return record
| [
"bxtgroup@gmail.com"
] | bxtgroup@gmail.com |
ef1eee54f8116f47fc4c4fbdd3d54647d86bdf18 | 097157f85655ae0c847d7fa4caa7fe404c2bbac4 | /run.py | fe3e795fed34d8c80d3877b9178717d1f9980031 | [] | no_license | XiaoFJU/enlightened_flask | d0556a29906fe74c1b7eb76deebe8585bbea2ea6 | 5071b8a2ff7a9098e42fdc7ea408ab4b80900006 | refs/heads/master | 2020-03-27T08:20:59.800394 | 2018-08-27T04:23:46 | 2018-08-27T04:23:46 | 146,246,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | from application import create_application
if __name__ == '__main__':
app = create_application()
app.run(host="0.0.0.0", port=80)
| [
"403262631@gapp.fju.edu.tw"
] | 403262631@gapp.fju.edu.tw |
fb85a52144a9c86363881b3524450dd3278eed03 | 64054ffdbde22ac17d2caff30cd53752c29e7bf4 | /courses/migrations/0003_auto_20180225_2023.py | c5d7affdffe11313a0b80a866e33744e8103b25d | [] | no_license | fernandovbs/simplemooc | 76fac35133d9b5acf5ae7b47fa8449a6842206e5 | ca68fbb503fc2cb7c58dd6ec718c8ecf153e957b | refs/heads/master | 2021-04-30T00:08:36.157836 | 2018-03-30T21:57:02 | 2018-03-30T21:57:02 | 121,570,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | # Generated by Django 2.0.1 on 2018-02-25 20:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0002_auto_20180214_2338'),
]
operations = [
migrations.AlterModelOptions(
name='course',
options={'verbose_name': 'Curso', 'verbose_name_plural': 'Cursos'},
),
migrations.AddField(
model_name='course',
name='about',
field=models.TextField(blank=True, verbose_name='Sobre o Curso'),
),
migrations.AlterField(
model_name='course',
name='description',
field=models.TextField(blank=True, verbose_name='Descrição'),
),
]
| [
"fernandostorm1@gmail.com"
] | fernandostorm1@gmail.com |
77ede1b1575ce944244f3616ca02e2c565b653b0 | 9dfebb5d0eaee7fea3d3dede8c4f2c6401be8d1d | /sdk/python/pulumi_kubernetes/coordination/v1/LeaseList.py | b3f9eda310279157d9d48bc14b03d353bf822620 | [
"Apache-2.0"
] | permissive | kulado/kulado-kubernetes | b129f1051c6baf28928393af516c27d9896acaac | ecb72f9b25f6dbbae41f00c82388b1ca32329cc7 | refs/heads/master | 2020-06-24T21:23:25.852741 | 2019-07-27T00:13:22 | 2019-07-27T00:13:22 | 199,093,978 | 0 | 1 | Apache-2.0 | 2019-08-20T22:57:47 | 2019-07-26T23:58:50 | TypeScript | UTF-8 | Python | false | false | 2,024 | py | # *** WARNING: this file was generated by the Kulado Kubernetes codegen tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import kulado
import kulado.runtime
import warnings
from ... import tables, version
class LeaseList(kulado.CustomResource):
"""
LeaseList is a list of Lease objects.
"""
def __init__(self, resource_name, opts=None, items=None, metadata=None, __name__=None, __opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, kulado.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'coordination.k8s.io/v1'
__props__['kind'] = 'LeaseList'
if items is None:
raise TypeError('Missing required property items')
__props__['items'] = items
__props__['metadata'] = metadata
if opts is None:
opts = kulado.ResourceOptions()
if opts.version is None:
opts.version = version.get_version()
super(LeaseList, self).__init__(
"kubernetes:coordination.k8s.io/v1:LeaseList",
resource_name,
__props__,
opts)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
| [
"eric@kulado.com"
] | eric@kulado.com |
cfd8efd73f8b8c8ecd63146360d4fa65c54c9af8 | 842ff0f54a6635ca0daefc00fe45ed8d9fc2bd99 | /SPLC.py | 6be8e495757eee2acd448bd96ed8a496db08cddc | [] | no_license | Munanna/Rosalind-solutions | c8420357ac8301abdd0c0d4ebbed69e586b84bf0 | d364412a5e55436d1901f2b1d61490a729db4156 | refs/heads/master | 2020-06-02T17:04:56.113141 | 2018-02-26T10:24:01 | 2018-02-26T10:24:01 | 94,099,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 12 00:51:51 2017
@author: OMNISLO
"""
f = open('rosalind_splc.txt', 'r')
seqs = []
l = ''
counter=0
for line in f.readlines():
if line[0] != '>':
l += line.strip('\n')
else:
seqs.append(l)
l = ''
counter +=1
seqs.append(l)
f.close()
seqs.remove('')
full_sequence = seqs[0]
introns = seqs[1:]
sequence = full_sequence
for i in introns:
sequence = sequence.replace(i, '')
dna_codons = {' ':['TAA','TAG','TGA'], 'F':['TTT','TTC'], 'S':['TCT','TCG','TCC','TCA','AGT','AGC'], 'Y':['TAT','TAC'], 'C':['TGT','TGC'], 'W' : ['TGG'], 'L':['TTA','TTG','CTT','CTA','CTC','CTG'], 'P':['CCT','CCC','CCG','CCA'], 'R':['CGT','CGC','CGG','CGA','AGA','AGG' ], 'H':['CAT', 'CAC'], 'Q':['CAA','CAG'], 'I':['ATT','ATC','ATA'], 'M':['ATG'], 'T':['ACT','ACC','ACA','ACG'], 'N':['AAT','AAC'], 'K':['AAA','AAG'], 'V':['GTT','GTC','GTA','GTG'], 'A':['GCT','GCC','GCG','GCA'], 'D':['GAT','GAC'], 'E':['GAA','GAG'], 'G':['GGT','GGC','GGA','GGG']}
protein = ''
counter = 0
while counter <= len(sequence):
codon = sequence[counter:counter+3]
for c in dna_codons:
if codon in dna_codons[c]:
protein += c
counter += 3
print (protein) | [
"nat15alo@student.lu.se"
] | nat15alo@student.lu.se |
954328033e830e24754e6bdfd16070c83a6e687a | f11be78c01892f7c9dc44178ceeaacc0283f582f | /jsonschema_marshmallow/codegen/__init__.py | e031ea4e64bb66032fcc999588b2c073ec93bcfd | [
"MIT"
] | permissive | gijzelaerr/jsonschema-marshmallow | c6c8e5097f57e609832df30f02513b5d3e4737d0 | e73e523cd32f2235525a9c61e731c741268e4164 | refs/heads/main | 2023-06-24T00:43:45.410868 | 2021-07-20T11:57:54 | 2021-07-20T11:57:54 | 387,666,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | from jsonschema_marshmallow.codegen.cli import codegen
| [
"gijs@pythonic.nl"
] | gijs@pythonic.nl |
7befb3c8b70e9b0355a8ee12802fd32712ce8bce | 668b98e9a4c2c8cc86135a620ead342b4195ecbe | /generate | 102b97c8b233ad329f9de3164d1192eab2460a85 | [] | no_license | treeman/why_cryptocurrencies | 0b4851ad8791ec35a0e5a8d8fa2d7ce2395242f4 | 9b9b771c7a89f4263aa9bcb11d3fc2210c747f33 | refs/heads/master | 2023-06-21T12:47:49.646774 | 2023-06-09T07:14:16 | 2023-06-09T07:14:16 | 179,041,862 | 45 | 11 | null | 2023-02-23T08:31:06 | 2019-04-02T09:17:18 | Perl | UTF-8 | Python | false | false | 2,289 | #!/usr/bin/python3
import glob
import subprocess
import os.path
import shutil
import errno
# Can upload with rsync -a -v _site/ <dest>/
# Could extend this with using a cache of sha256 output files
# to see if we should regenerate or not,
# but just cleaning _site/ is easier before build
# and using raco to update during development.
def main():
dirname = os.path.dirname(__file__)
output_dir = os.path.join(dirname, "_site/")
print("Generating site to", output_dir)
# First make sure we've generated all files.
for f in glob.glob('*.pm'):
output = os.path.splitext(f)[0]
if os.path.isfile(output):
print("Skip", f)
continue
subprocess.call(["raco", "pollen", "render", f])
# sassc sass/main.scss --style compressed > css/main.css
with open('css/main.css', 'w') as main:
print("Generating css/main.css")
subprocess.call(["sassc", "sass/main.scss", "--style", "compressed"],
stdout=main)
# Clean destination directory.
if not os.path.exists(output_dir):
print("Creating", output_dir)
os.makedirs(output_dir)
else:
print("Cleaning", output_dir)
for root, dirs, files in os.walk(output_dir):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
# Then copy all relevant files.
for match in ["*.html", "*.xml", "css/", "files/",
"images/", "fonts/", "favicon*"]:
for src in glob.glob(match):
dst = os.path.join(output_dir, src)
src = os.path.join(dirname, src)
copy(src, dst)
# Need to remove files generated from .p extensions.
# Easier to just do it after than change capture above.
for pfile in glob.glob("*.p"):
actual = os.path.join(output_dir, os.path.splitext(pfile)[0])
if os.path.isfile(actual):
os.unlink(actual)
print("Done")
# Copying in python seems... Difficult.
def copy(src, dst):
try:
shutil.copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
if __name__ == '__main__':
main()
| [
"mail@jonashietala.se"
] | mail@jonashietala.se | |
f8ec33f3874cbc2a02a7e9af12d1ccae1936b34e | 22ce20964370770564d10d9b96c253511ad4a591 | /Spiders/Spider_xixizhan.py | 205ae6919d24f4b3a1ad69b1ecc882902c4d0bc4 | [] | no_license | rainlow/AwsomeSpider | 46683f265eab62bf36dd36eef34f96cead36b079 | 4665fa20cdade144432f2491f096b37997d47cf7 | refs/heads/master | 2023-05-05T01:45:27.467248 | 2019-10-23T01:59:43 | 2019-10-23T01:59:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,547 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-10-14 00:11:29
# @Author : kangvcar (kangvcar@126.com)
# @Link : http://www.github.com/kangvcar/
# @Version : $Id$
import re
import urllib2
from bs4 import BeautifulSoup
import MySQLdb
import time
class xixiItem(object):
''' 定义xixiItem类'''
mtype = None
mtitle = None
mauthor = None
mtime = None
downloadlink = None
class getMovieInfo(object):
''' 爬虫 www.xixizhan.com '''
def __init__(self, url):
self.url = url
self.filename = self.getFileName(self.url)
print u'获取文件名成功.' + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
self.urls = self.getUrls(self.url)
print u'获取urls成功.' + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
self.items = self.spider(self.urls)
print u'已爬取所有网页.' + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
self.pipeline2print = self.pipeline2print(self.items)
print u'已全部输出到屏幕' + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
self.pipeline2file = self.pipeline2file(self.items)
print u'写入文件完成' + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
self.pipeline2mysql = self.pipeline2mysql(self.items)
print u'写入数据库成功'
def getUrls(self, url):
''' 获取所有页面的url '''
urls = []
html = self.getSourcePage(url)
s = '共(.*?)页'
dPage = re.compile(s)
fPage = re.search(dPage, html).group(1)
# print fPage
ul= self.url.split('-')
# print ul
# for page in range(1, int(fPage)+1):
for page in range(1, 11):
ul[-1] = str(page) + '.html'
url = '-'.join(ul)
urls.append(url)
# print urls
return urls
def spider(self, urls):
''' 爬取xixiItem类定义的信息 '''
items = []
for i,url in enumerate(urls):
print u'正在爬取第' + str(i+1) + u'页数据...'
html = self.getSourcePage(url)
soup = BeautifulSoup(html, 'lxml')
table = soup.find('table', attrs={'id':'threadlisttableid'})
# print table
tbodys = table.find_all('tbody')
# print len(tbody)
for tbody in tbodys:
item = xixiItem()
try:
lineurl = tbody.th.find('a', attrs={'class': 's xst'})['href']
# print lineurl
item.downloadlink = self.getDownloadlink(lineurl)
# print downloadlink
# print tbody.em.get_text().strip('[]')
item.mtype = tbody.em.get_text().strip('[]')
# print tbody.th.find('a', attrs={'class':'s xst'}).get_text().split('][')[1]
item.mtitle = tbody.th.find('a', attrs={'class':'s xst'}).get_text().split('][')[1]
# print tbody.find('td', attrs={'class':'by'}).cite.get_text().strip()
item.mauthor = tbody.find('td', attrs={'class':'by'}).cite.get_text().strip()
# print tbody.find('td', attrs={'class':'by'}).em.get_text()
item.mtime = tbody.find('td', attrs={'class':'by'}).em.get_text()
except:
pass
continue
else:
items.append(item)
try:
print u'已爬取-->' + item.mtitle
except:
pass
return items
def getDownloadlink(self, lineurl):
''' 爬取电影的下载链接 '''
try:
soup = BeautifulSoup(self.getSourcePage(lineurl), 'lxml')
inlink = soup.find('ignore_js_op').a['href']
# print inlink
inlink = 'http://www.xixizhan.com/' + inlink
# print inlink
soup1 = BeautifulSoup(self.getSourcePage(inlink), 'lxml')
downloadlink = soup1.find('div', attrs={'class': 'dxksst'}).div.a['href']
# print downloadlink
except:
downloadlink = None
finally:
return downloadlink
def getFileName(self, url):
''' 爬取信息用于文件命名 '''
html = self.getSourcePage(url)
soup = BeautifulSoup(html, 'lxml')
filename = soup.find('div', attrs={'class':'bm_h cl'}).h1.a.get_text().strip().encode('utf-8')
return filename
def pipeline2print(self, items):
''' 打印已爬取的信息到屏幕 '''
for item in items:
print('类型:%s\t片名:%s\n下载链接: %s\n发布者:%s\t发布时间:%s\n' %(item.mtype.encode('utf-8'), item.mtitle.encode('utf-8'), item.downloadlink, item.mauthor.encode('utf-8'), item.mtime.encode('utf-8')))
def pipeline2file(self, items):
''' 把已爬取的信息写入文件 '''
filename = self.filename.decode('utf-8') + '.txt'
# print self.filename
with open(filename, 'w') as fp:
for item in items:
fp.write('类型:%s\t片名:%s\n下载链接: %s\n发布者:%s\t发布时间:%s\n\n' %(item.mtype.encode('utf-8'), item.mtitle.encode('utf-8'), item.downloadlink, item.mauthor.encode('utf-8'), item.mtime.encode('utf-8')))
try:
print u'已写入-->' + item.mtitle + u' 到文件-->' + filename
except:
pass
def pipeline2mysql(self, items):
''' 把已爬取的信息写入数据库 '''
conn = MySQLdb.connect(
host='192.168.10.10',
port=3306,
user='crawl123',
passwd='crawl123',
db='scrapyDB',
charset='utf8')
cur = conn.cursor()
for item in items:
mtype = item.mtype.encode('utf-8')
mtitle = item.mtitle.encode('utf-8')
downloadlink = item.downloadlink
mauthor = item.mauthor.encode('utf-8')
mtime = item.mtime.encode('utf-8')
cur.execute("INSERT INTO xixizhan(mtype, mtitle, mauthor, mtime, downloadlink) values(%s,%s,%s,%s,%s)",
(mtype, mtitle, mauthor, mtime, downloadlink))
cur.close()
conn.commit()
conn.close()
def getSourcePage(self, url):
''' 获取网页源代码 '''
response = urllib2.urlopen(url)
html = response.read()
return html
test = getMovieInfo('http://www.xixizhan.com/forum-41-1.html')
#http://www.xixizhan.com/forum-39-1.html | [
"kangvcar@126.com"
] | kangvcar@126.com |
3b53916e8912c613bc14b8d7042442b25b0154cd | 3c2e1395b0af0f0cbde14628f92d0ca075a1f089 | /loop.py | 112932b422e970b4866ef8ee67c65fb388763173 | [] | no_license | tylerweis14/SeniorDesignIIProject | e69ec54c0116305c243e32603fcc66179dd27663 | 0a55b66686e805c405585e35623c04c2c8bffb28 | refs/heads/master | 2022-06-15T03:30:55.908037 | 2020-05-06T15:21:08 | 2020-05-06T15:21:08 | 258,570,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,135 | py | import numpy as np
from scipy.interpolate import interp1d
from scipy.integrate import ode
from scipy.integrate import odeint
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
def dydt(t,y, params):
x, y, z_f, z_c=y
alpha,lamb,beta,c_pf,c_pc,m_f,m_c,W_ce,T_cine,a_f,n_e,alpha_f,alpha_c,h=params
T_fe=T_cine+(1/(2*W_ce*c_pc)+(1/h))*a_f*n_e #equillibrium of fuel temp
T_ce=T_cine+(a_f*n_e/(2*W_ce*c_pc)) #equillibrium of coolant temp
u=(T_cine-T_cine)/T_cine
w=(1300-W_ce)/W_ce
Power = 1 #percentage
p_c=(Power-(x))*10
p=p_c+alpha_c*T_ce*z_c+alpha_f*T_fe*z_f
dydt1 = -(beta*x/alpha)+(beta*y/alpha)+(p/alpha)+(p*x/alpha)
dydt2 = (x-y)*lamb
dydt3 = ((a_f*n_e*x)/(m_f*c_pf*T_fe))-(h*z_f/(m_f*c_pf))+(h*T_ce*z_c/(m_f*c_pf*T_fe))
dydt4 = (h*T_fe*z_f/(m_c*c_pc*T_ce))-((2*c_pc*W_ce+h)*z_c/(m_c*c_pc))+((2*W_ce*T_cine*u)/(m_c*T_ce))
-(2*W_ce*w*(T_ce-T_cine)/(m_c*T_ce))-(2*W_ce*w*z_c/m_c)+(2*W_ce*T_cine*u*w/(m_c*T_ce))
derivs=[dydt1, dydt2, dydt3, dydt4]
return derivs
def tempoutput(params2):
c_pc,W_ce,T_cine,a_f,h,finaltempchange=params2
T_fe=T_cine+(1/(2*W_ce*c_pc)+(1/h))*a_f*n_e #equillibrium of fuel temp
T_ce=T_cine+(a_f*n_e/(2*W_ce*c_pc)) #equillibrium of coolant temp
Tout = (T_fe-T_ce)/(Rf*W_ce*c_pc) + T_ce + finaltempchange
power = W_ce*c_pc*(Tout-T_cine)
return Tout, power/1e7
finaltemps = []
T_cine = 600
for j in range(10):
alpha=0.001
lamb=0.1
beta=7.5*10**-3
c_pf=717 #specific heat of graphite moderator
c_pc=2414.7 #specific heat of FliBE
m_f=470000*(1.5/1000) #mass of u235 in 470,000 pellets
m_c=90830.8 #mass of coolant
W_ce=1500 #mass flow rate
# T_cine = newtemp
# T_cine=600 #Temperature in
a_f=7.0e6
n_e=200.0
alpha_f=-3.8e-5 #Change in reactivity based on temp of fuel
alpha_c=-1.8e-5 #Change in reactivity based on temp for moderator
h=4700*1940 #heat transfer coefficient and total area of fuel
Rf = .0005 #fouling factor
params=[alpha,lamb,beta,c_pf,c_pc,m_f,m_c,W_ce,T_cine,a_f,n_e,alpha_f,alpha_c,h]
x0=0.0 #starting neutron pop
y0=0.0 #starting precursors
z_f0=1.0 #starting fuel temp
z_c0=1.0 #starting moderator temp
y0=[x0,y0,z_f0,z_c0]
t0=0
A=[]
# Solver
r = ode(dydt).set_integrator('dopri5', method='nsteps')
r.set_initial_value(y0, t0).set_f_params(params)
t1 =3600.0
dt = 0.1
T=[]
while r.successful() and r.t < t1:
r.integrate(r.t+dt)
T=np.append(T,r.t)
A=np.append(A,r.y)
#print np.size(A)
B= A.reshape(np.size(T),4)
finaltempchange = sum(B[:,3])
params2 = [c_pc,W_ce,T_cine,a_f,h,finaltempchange]
finaltemps.append(tempoutput(params2)[0])
T_h_in= finaltemps[j] #ALL TEMPERATURES LISTED IN KELVIN, K
T_c_in= 300
T_c_out= 600
#FLUID PROPERTIES -- FLiBe (shellside hot)
rho_h= 2518-0.406*(T_h_in+T_c_in)/2 #density in kg/m^3
mu_h= 0.000116*np.exp(3755/((T_h_in+T_c_in)/2)) #viscosity in Pa*s
c_ph= 2415.78 #specific heat in J/kg*K
k_h= 0.629697+0.0005*((T_h_in+T_c_in)/2) #thermal conductivity in W/mK
#FLUID PROPERTIES -- Solar Salt (tubeside cold)
rho_c= 1804
mu_c= 0.00169
c_pc= 1520
k_c= 0.530
#TUBE PROPERTIES
d_o= 0.02 #outer tube diameter in m
t_w= 0.001 #tube wall thickness
d_i= d_o-2*t_w #inner tube diameter
#GUESSES
U= 100
U_guess= 200 #Overall HT Coefficient in W/m^2*K
v_tube_guess= 1.5 #Tube velocity in m/s
#Energy Balance
mdot_h= 1500 #mass flow rate in kg/s
mdot_c= 1550
Qdot= mdot_c*c_pc*(T_c_out-T_c_in)
T_h_out= T_h_in-mdot_c*c_pc*(T_c_out-T_c_in)/(mdot_h*c_ph)
T_cine = T_h_out
plt.plot(finaltemps,[1,2,3,4,5,6,7,8,9,10])
plt.xlabel('Temperature')
plt.ylabel('Cycles')
plt.title('Temperature Vs Cycles') | [
"noreply@github.com"
] | noreply@github.com |
eebf0e423107f15c0c1e58dc38ee919714391509 | 82287f24760966e5d5d99fcd3c4d96bbc4a0eb81 | /First Test.py | 9968a6eacd1a5549d317d38d9ebf082bed005982 | [] | no_license | JHarzenetter/Planetensimulation | 766f42d22e12cd822d2b5c7a99c09286f1beaa31 | 61a591179dcea7485d6d9dc0392a963932bff3ba | refs/heads/master | 2020-04-05T03:15:16.682453 | 2018-11-07T08:08:30 | 2018-11-07T08:08:30 | 156,507,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | import math
print("hallo")
def calc(number):
return number**2
print(str(calc(8)))
print(str(math.sqrt(calc(6))))
| [
"johannes.harzenetter@gmail.com"
] | johannes.harzenetter@gmail.com |
6a90b6ebc4229778b978fc7673384247a2d2f335 | d60951deacf824b7cd34fef618d42336d8e0d6d3 | /src/laser_slam/src/DataAssociator.py | e4e82d132713a78db6847d695a15db0757ba2dce | [] | no_license | kevinatorchen/Husky | 4ad3663ae4f94d961b6b9091c40d8988f01a75e5 | c76710bb06b0d3ad4b89b2d5626d79b156cad2f2 | refs/heads/master | 2021-05-02T09:30:04.785522 | 2018-04-23T12:01:44 | 2018-04-23T12:01:44 | 120,824,581 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,600 | py | import numpy as np
MAX_MULTI_DIST = 0.4
JOINT_PENALITY = 100
MAX_INDIV_DIST = 4
MAX_RANGE = 10
MIN_RANGE = 0
class DataAssociator:
def __init__(self, laserSLAM):
self.slamInstance = laserSLAM
@staticmethod
def joint_compat_score(hypothesis, Z, X, d_z, d_x):
"""
compute the joint compat of the pairing
this will be done by :
joint_compat_score = D
D**2 = h^t C^-1 h
C = HPH^t + GSG^t
where h is the innovation of the pairings
:param hypothesis: id_x and id_z pairs
:return: D
"""
# recompose the X vector and the Z vector
# compute C matrix
# compute D from C^-1
res = 0
for row in hypothesis:
if row[0, 0] != -1:
# print("row:"+str(row))
id_x = row[0, 0]
id_z = row[0, 1]
# Z_expected = X[id_x: id_x + 2, 0]
# Z_observed = Z[id_z : id_z + 2, 0]
res += DataAssociator.multi_compat_score(hypothesis, id_x, id_z, Z, X, d_z, d_x)
# res += np.sum(np.square(Z_expected - Z_observed))
else:
res += JOINT_PENALITY
return res # sum([(lx_x - lz_x) ** 2 + (lx_y - lz_y) ** 2 for ((lx_x, lx_y), (lz_x, lz_y)) in pairings])
@staticmethod
def individual_compatibility_score(id_x, id_z, Z, X):
"""
return true if x (position of the landmark in the map) and y (position of the observed landmark) are compatible
:param id_x: id of the landmark (estimation)
:param id_z: id of the landmark (observation)
:param Z: observation vector
:return: True if they are compatible, False otherwise
"""
if id_x == -1:
return 2 * ((2 * MAX_INDIV_DIST) ** 2)
else:
return np.sum(
np.absolute(Z[id_z:id_z + 2, 0] - X[id_x: id_x + 2, 0])
)
@staticmethod
def multi_compat_score(H, id_x, id_z, Z, X, d_z, d_x):
"""
check if the inter-distance constraint is verified
:param H:
:param id_x:
:param id_z:
:param Z:
:return:
"""
error = 0
if (H is None) or (id_x == -1):
return np.inf
for row in H:
id_xi = row[0, 0]
id_zi = row[0, 1]
if id_xi != -1:
error += d_x[min(id_x, id_xi), max(id_x, id_xi)] - \
d_z[min(id_z, id_zi), max(id_z, id_zi)] \
** 2
# else:
# error += MAX_MATCH_DIST
return error / len(H)
def JCBB_wrapper(self, Z):
"""
associate a sequence of Z measurments to ids in map
:param Z: measurment made
:return: list of id_Z and id_X pairs
"""
idX = []
idZ = list(range(0, len(Z) - 1, 2))
X = np.mat(np.zeros_like(self.slamInstance.X))
h = self.slamInstance.get_h()
for idx in self.slamInstance.idx:
landmark = h(self.slamInstance.X[idx:idx+2, 0])
X[idx:idx+2, 0] = landmark
if (abs(landmark[0, 0]) < MAX_RANGE) and (abs(landmark[1, 0]) < MAX_RANGE)\
and (abs(landmark[0, 0]) > MIN_RANGE) and (abs(landmark[1, 0]) > MIN_RANGE):
idX.append(idx)
d_x = {}
for i in range(0, len(idX), 1):
for j in range(i, len(idX), 1):
diff = (X[idX[i]:idX[i]+2, 0] - X[idX[j]:idX[j]+2, 0])
d_x[(idX[i], idX[j])] = np.sqrt(np.sum(np.square(diff)))
d_z = {}
for i in range(0, len(idZ), 1):
for j in range(i, len(idZ), 1):
diff = (Z[idZ[i]:idZ[i]+2, 0] - Z[idZ[j]:idZ[j]+2, 0])
d_z[(idZ[i], idZ[j])] = np.sqrt(np.sum(np.square(diff)))
# print("***************************************************")
# print("trace:")
# print("ids_Z:")
# print(idZ)
# print("ids_X:")
# print(idX)
# print("Z:")
# print(Z)
# print("***************************************************")
(best_H, best_score) = self.JCBB(None, idZ, idX, Z, X, d_z, d_x, np.inf, [])
return best_H
def JCBB(self, Hyp, ids_Z, ids_X, Z, X, d_z, d_x, best_score, best_Hyp):
"""
find pairing using branch and bound.
It construct the solution H, by matching elements from Z with elements from X.
:param Hyp: the current pairings (mat format: 2 columns first for id_Z, second for id_X)
:param ids_Z: ids of the observation to be matched
:param ids_X: ids in the map that can still be matched
:param best_score: the best score
:return: a tuple containing the best score an the best pairing
"""
if ids_X is None:
ids_X = []
# leaf node exploration
if len(ids_Z) == 0:
current_score = self.joint_compat_score(Hyp, Z, X, d_z, d_x)
if current_score < best_score:
return Hyp, current_score
else:
return best_Hyp, best_score
else:
# depth first exploration
# sort by individual distance
ids_Z = ids_Z[:]
id_z = ids_Z.pop()
# # crappy fix
# if (idX is None) or (idX is []):
# return best_Hyp, best_score
pot_match_X = ids_X[:]
# pot_match_X = sorted(pot_match_X, key=lambda id_x: self.multi_compat_score(Hyp, id_x, id_z, Z, X, d_z, d_x))
pot_match_X = filter(
lambda id_x: self.individual_compatibility_score(id_x, id_z, Z, X) < MAX_INDIV_DIST,
pot_match_X)
if Hyp is not None:
pot_match_X = filter(
lambda id_x:
all(
True if row[0, 0] == -1 else (abs(d_z[min(id_z, row[0, 1]), max(id_z, row[0, 1])] - d_x[min(id_x, row[0, 0]), max(id_x, row[0, 0])]) < MAX_MULTI_DIST)
for row in Hyp
),
pot_match_X)
# pot_match_X = filter(
# lambda id_x: self.multi_compat_score(Hyp, id_x, id_z, Z, X, d_z, d_x) < MAX_MULTI_DIST ** 2,
# pot_match_X)
if len(pot_match_X) == 0:
pot_match_X = [-1] # case where no x is associated to z0
for id_x in pot_match_X:
if Hyp is None:
next_H = np.mat([[id_x, id_z]])
else:
next_H = np.matrix(Hyp, copy=True)
next_H = np.vstack((next_H, np.mat([[id_x, id_z]])))
if self.joint_compat_score(
next_H, Z, X, d_z, d_x) <= best_score: # assuming the joint compat grows monotonically with the depth
# print("H:", H)
# print("next H", next_H)
# print("pot x", pot_match_X)
# print("x:", x)
if id_x != -1:
next_X = ids_X[:]
next_X.remove(id_x) # as x is matched, we won't be able to match it after
else:
next_X = ids_X[:]
(new_H, new_best) = self.JCBB(next_H, ids_Z, next_X, Z, X, d_z, d_x, best_score, best_Hyp)
if new_best < best_score:
best_Hyp = new_H
best_score = new_best
return best_Hyp, best_score
| [
"thibaut@homiwoo.com"
] | thibaut@homiwoo.com |
a3d375e158820a56402b5129f60972b9f1c6a911 | 5dde747548ef6e78e073e8a14d2c4a3ccbab0d39 | /Experiments with pywhatkit/pywhatkit.py | 5ae68a0a685d178ed49a09647523c9583cd73646 | [] | no_license | nandinichhajed/Python-Projects | 9ba9ecae0717791e98a4ff4440be7a0bf2cc99e7 | 07dfa4b99c5cebf3abfd2089f23506debf2e0a8c | refs/heads/main | 2023-08-18T08:34:27.135830 | 2021-10-06T11:23:37 | 2021-10-06T11:23:37 | 382,581,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | import pywhatkit as kit
# Sending whatsApp message
kit.sendwhatmsg_instantly("+919406655668", "Hiiiii")
kit.sendwhatmsg_to_group("group name", "Hiiiii", 22,33)
kit.sendwhats_image("+91**********", img_path="path", caption="caption")
# To search something on web browser
kit.search("python")
# To find information
kit.info("Python programing language")
# To convert text to hand written characters
kit.text_to_handwriting("Hiii Nandini This side", rgb = (0,0,225))
# To convert any image to ASCII art.
kit.image_to_ascii_art(r"Path")
# To search and play a particular video on YouTube by using just the keyword
kit.playonyt("2002 song")
print("done") | [
"nandinichhajed08@gmail.com"
] | nandinichhajed08@gmail.com |
c1207cd99cf050f5920e60b5acdcba64e82686ab | 34dab2e882e304f1b371c7e1dabcc6eacbc67dc9 | /contrib/bitrpc/bitrpc.py | e3b01c117726add9397660c62e7553c5664a2ecf | [
"MIT"
] | permissive | worldgold/test | 6ac526c4e80b94e27fd0751b48b0bddb6134c6da | 9645229c1d2e30e215e9e0424a52121606fefec9 | refs/heads/master | 2021-01-19T18:39:55.932086 | 2017-08-23T09:48:59 | 2017-08-23T09:48:59 | 101,150,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,838 | py | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:10332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:10332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Eb3coin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Eb3coin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| [
"inder@system.(none)"
] | inder@system.(none) |
7b835c930ee026e800a8f2c827ca7cc2dd2b6951 | b4ec04d6a2a4ba57d11b577326086c14d9b9408b | /freshontheboattests/testUserRegistration.py | 48a96a3d9990ad9a29a052739e0cbb4104299f60 | [] | no_license | petergzli/FreshOnTheBoat | 91268d43f91c85da0bacafa268b42e2f1e3dfe6c | 6320bcd798ad23d6ed936fddeb51a040a28853b2 | refs/heads/master | 2021-01-20T10:06:13.318571 | 2015-11-25T18:52:36 | 2015-11-25T18:52:36 | 41,778,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | #The Following Script is a sample POST tester, to see if your entry was successfully added to database.
import requests
url = 'http://127.0.0.1:5000/users/new/'
params = {"firstname": "Carly", "lastname": "Jepsen", "username": "callbaby", "encrypted_password": "baby"}
HTTPresponse = requests.post(url, data = params)
print HTTPresponse.json()
| [
"petergzli@gmail.com"
] | petergzli@gmail.com |
523f716dc6a787ce7d6eb918648030f3ec9e97be | 203f8465075e098f69912a6bbfa3498c36ce2a60 | /sandbox/person_follower/src/follower/msg/_WaitActionGoal.py | 518c8b03a5d36551ca3d0537d1a0fbd58db54de3 | [] | no_license | robcn/personalrobots-pkg | a4899ff2db9aef00a99274d70cb60644124713c9 | 4dcf3ca1142d3c3cb85f6d42f7afa33c59e2240a | refs/heads/master | 2021-06-20T16:28:29.549716 | 2009-09-04T23:56:10 | 2009-09-04T23:56:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,433 | py | # autogenerated by genmsg_py from WaitActionGoal.msg. Do not edit.
import roslib.message
import struct
## \htmlinclude WaitActionGoal.msg.html
class WaitActionGoal(roslib.message.Message):
_md5sum = "54f5dc6d242ed96aa3e20c82006143e4"
_type = "follower/WaitActionGoal"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int32 num_events
string topic_name
"""
__slots__ = ['num_events','topic_name']
_slot_types = ['int32','string']
## Constructor. Any message fields that are implicitly/explicitly
## set to None will be assigned a default value. The recommend
## use is keyword arguments as this is more robust to future message
## changes. You cannot mix in-order arguments and keyword arguments.
##
## The available fields are:
## num_events,topic_name
##
## @param args: complete set of field values, in .msg order
## @param kwds: use keyword arguments corresponding to message field names
## to set specific fields.
def __init__(self, *args, **kwds):
super(WaitActionGoal, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.num_events is None:
self.num_events = 0
if self.topic_name is None:
self.topic_name = ''
## internal API method
def _get_types(self): return WaitActionGoal._slot_types
## serialize message into buffer
## @param buff StringIO: buffer
def serialize(self, buff):
try:
buff.write(struct.pack('<i', self.num_events))
length = len(self.topic_name)
#serialize self.topic_name
buff.write(struct.pack('<I%ss'%length, length, self.topic_name))
except struct.error, se: self._check_types(se)
except TypeError, te: self._check_types(te)
## unpack serialized message in str into this message instance
## @param str str: byte array of serialized message
def deserialize(self, str):
try:
end = 0
start = end
end += 4
(self.num_events,) = struct.unpack('<i',str[start:end])
start = end
end += 4
(length,) = struct.unpack('<I',str[start:end])
#deserialize self.topic_name
pattern = '<%ss'%length
start = end
end += struct.calcsize(pattern)
(self.topic_name,) = struct.unpack(pattern, str[start:end])
return self
except struct.error, e:
raise roslib.message.DeserializationError(e) #most likely buffer underfill
| [
"ethandreyfuss@f5854215-dd47-0410-b2c4-cdd35faa7885"
] | ethandreyfuss@f5854215-dd47-0410-b2c4-cdd35faa7885 |
91129a536dccabd4d97cb50c44fef92adc6b28fc | e8204c87247729a4dd7c302e6949daeb371a9996 | /main.py | 84772714c518e4615e508acbd607fb2162e06f25 | [] | no_license | umarshabir/Turtle-crossing-game-Frogger- | 0080adef09b29cda64bfd50874dda6f677117541 | 720c9a6c1ebdb542d14e8a5fcde509e40a5c89fe | refs/heads/main | 2023-03-18T12:50:06.694265 | 2021-03-11T14:59:09 | 2021-03-11T14:59:09 | 346,738,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | import time
from turtle import Screen
from player import Player
from car_manager import CarManager
from scoreboard import Scoreboard
player = Player()
car_manager = CarManager()
scoreboard = Scoreboard()
screen = Screen()
screen.setup(width=600, height=600)
screen.tracer(0)
screen.listen()
screen.onkey(player.move, "Up")
game_is_on = True
while game_is_on:
time.sleep(0.1)
screen.update()
car_manager.create_cars()
car_manager.move_cars()
# Detect collison with car
for car in car_manager.all_cars:
if car.distance(player) < 20:
game_is_on = False
scoreboard.game_over()
# detect a successful crossing
if player.is_at_finnish_line():
player.go_to_start()
car_manager.level_up()
screen.exitonclick()
| [
"noreply@github.com"
] | noreply@github.com |
b53d51e90634a68addf27b8fb44bc961f55f096a | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/web/v20190801/web_app_diagnostic_logs_configuration.py | b955573f291c7fb008c4a782dcab13d6ce640e51 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,078 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['WebAppDiagnosticLogsConfigurationArgs', 'WebAppDiagnosticLogsConfiguration']
@pulumi.input_type
class WebAppDiagnosticLogsConfigurationArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
application_logs: Optional[pulumi.Input['ApplicationLogsConfigArgs']] = None,
detailed_error_messages: Optional[pulumi.Input['EnabledConfigArgs']] = None,
failed_requests_tracing: Optional[pulumi.Input['EnabledConfigArgs']] = None,
http_logs: Optional[pulumi.Input['HttpLogsConfigArgs']] = None,
kind: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a WebAppDiagnosticLogsConfiguration resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input['ApplicationLogsConfigArgs'] application_logs: Application logs configuration.
:param pulumi.Input['EnabledConfigArgs'] detailed_error_messages: Detailed error messages configuration.
:param pulumi.Input['EnabledConfigArgs'] failed_requests_tracing: Failed requests tracing configuration.
:param pulumi.Input['HttpLogsConfigArgs'] http_logs: HTTP logs configuration.
:param pulumi.Input[str] kind: Kind of resource.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if application_logs is not None:
pulumi.set(__self__, "application_logs", application_logs)
if detailed_error_messages is not None:
pulumi.set(__self__, "detailed_error_messages", detailed_error_messages)
if failed_requests_tracing is not None:
pulumi.set(__self__, "failed_requests_tracing", failed_requests_tracing)
if http_logs is not None:
pulumi.set(__self__, "http_logs", http_logs)
if kind is not None:
pulumi.set(__self__, "kind", kind)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the app.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group to which the resource belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="applicationLogs")
def application_logs(self) -> Optional[pulumi.Input['ApplicationLogsConfigArgs']]:
"""
Application logs configuration.
"""
return pulumi.get(self, "application_logs")
@application_logs.setter
def application_logs(self, value: Optional[pulumi.Input['ApplicationLogsConfigArgs']]):
pulumi.set(self, "application_logs", value)
@property
@pulumi.getter(name="detailedErrorMessages")
def detailed_error_messages(self) -> Optional[pulumi.Input['EnabledConfigArgs']]:
"""
Detailed error messages configuration.
"""
return pulumi.get(self, "detailed_error_messages")
@detailed_error_messages.setter
def detailed_error_messages(self, value: Optional[pulumi.Input['EnabledConfigArgs']]):
pulumi.set(self, "detailed_error_messages", value)
@property
@pulumi.getter(name="failedRequestsTracing")
def failed_requests_tracing(self) -> Optional[pulumi.Input['EnabledConfigArgs']]:
"""
Failed requests tracing configuration.
"""
return pulumi.get(self, "failed_requests_tracing")
@failed_requests_tracing.setter
def failed_requests_tracing(self, value: Optional[pulumi.Input['EnabledConfigArgs']]):
pulumi.set(self, "failed_requests_tracing", value)
@property
@pulumi.getter(name="httpLogs")
def http_logs(self) -> Optional[pulumi.Input['HttpLogsConfigArgs']]:
"""
HTTP logs configuration.
"""
return pulumi.get(self, "http_logs")
@http_logs.setter
def http_logs(self, value: Optional[pulumi.Input['HttpLogsConfigArgs']]):
pulumi.set(self, "http_logs", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
class WebAppDiagnosticLogsConfiguration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_logs: Optional[pulumi.Input[pulumi.InputType['ApplicationLogsConfigArgs']]] = None,
detailed_error_messages: Optional[pulumi.Input[pulumi.InputType['EnabledConfigArgs']]] = None,
failed_requests_tracing: Optional[pulumi.Input[pulumi.InputType['EnabledConfigArgs']]] = None,
http_logs: Optional[pulumi.Input[pulumi.InputType['HttpLogsConfigArgs']]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Configuration of App Service site logs.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ApplicationLogsConfigArgs']] application_logs: Application logs configuration.
:param pulumi.Input[pulumi.InputType['EnabledConfigArgs']] detailed_error_messages: Detailed error messages configuration.
:param pulumi.Input[pulumi.InputType['EnabledConfigArgs']] failed_requests_tracing: Failed requests tracing configuration.
:param pulumi.Input[pulumi.InputType['HttpLogsConfigArgs']] http_logs: HTTP logs configuration.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WebAppDiagnosticLogsConfigurationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Configuration of App Service site logs.
:param str resource_name: The name of the resource.
:param WebAppDiagnosticLogsConfigurationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WebAppDiagnosticLogsConfigurationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_logs: Optional[pulumi.Input[pulumi.InputType['ApplicationLogsConfigArgs']]] = None,
detailed_error_messages: Optional[pulumi.Input[pulumi.InputType['EnabledConfigArgs']]] = None,
failed_requests_tracing: Optional[pulumi.Input[pulumi.InputType['EnabledConfigArgs']]] = None,
http_logs: Optional[pulumi.Input[pulumi.InputType['HttpLogsConfigArgs']]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WebAppDiagnosticLogsConfigurationArgs.__new__(WebAppDiagnosticLogsConfigurationArgs)
__props__.__dict__["application_logs"] = application_logs
__props__.__dict__["detailed_error_messages"] = detailed_error_messages
__props__.__dict__["failed_requests_tracing"] = failed_requests_tracing
__props__.__dict__["http_logs"] = http_logs
__props__.__dict__["kind"] = kind
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20150801:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20150801:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20160801:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20180201:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20181101:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20200601:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20200901:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20201001:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20201001:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20201201:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20201201:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-native:web/v20210101:WebAppDiagnosticLogsConfiguration"), pulumi.Alias(type_="azure-nextgen:web/v20210101:WebAppDiagnosticLogsConfiguration")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppDiagnosticLogsConfiguration, __self__).__init__(
'azure-native:web/v20190801:WebAppDiagnosticLogsConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppDiagnosticLogsConfiguration':
"""
Get an existing WebAppDiagnosticLogsConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = WebAppDiagnosticLogsConfigurationArgs.__new__(WebAppDiagnosticLogsConfigurationArgs)
__props__.__dict__["application_logs"] = None
__props__.__dict__["detailed_error_messages"] = None
__props__.__dict__["failed_requests_tracing"] = None
__props__.__dict__["http_logs"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
return WebAppDiagnosticLogsConfiguration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="applicationLogs")
def application_logs(self) -> pulumi.Output[Optional['outputs.ApplicationLogsConfigResponse']]:
"""
Application logs configuration.
"""
return pulumi.get(self, "application_logs")
@property
@pulumi.getter(name="detailedErrorMessages")
def detailed_error_messages(self) -> pulumi.Output[Optional['outputs.EnabledConfigResponse']]:
"""
Detailed error messages configuration.
"""
return pulumi.get(self, "detailed_error_messages")
@property
@pulumi.getter(name="failedRequestsTracing")
def failed_requests_tracing(self) -> pulumi.Output[Optional['outputs.EnabledConfigResponse']]:
"""
Failed requests tracing configuration.
"""
return pulumi.get(self, "failed_requests_tracing")
@property
@pulumi.getter(name="httpLogs")
def http_logs(self) -> pulumi.Output[Optional['outputs.HttpLogsConfigResponse']]:
"""
HTTP logs configuration.
"""
return pulumi.get(self, "http_logs")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | noreply@github.com |
52a5a034b32eedf99db0c7b0b8e8069b46531061 | a54dc0d9ea54cb568f66f2ff24f1971f77eac19c | /code/message/start_face_recognition_message.py | 8bf6faa09233eb803189942dc734e303ef52a4be | [
"MIT"
] | permissive | ITE-5th/skill-image-caption | 25a42b90e9058eb40959fd01c96ee8f0d73616dc | 1a77d27b4fbadd89a6390e8707d4a7975b1edb8d | refs/heads/master | 2020-03-16T23:29:31.414857 | 2018-07-27T18:06:38 | 2018-07-27T18:06:38 | 133,079,676 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | from .message import Message
class StartFaceRecognitionMessage(Message):
pass
| [
"m.zaher.airout@gmail.com"
] | m.zaher.airout@gmail.com |
08aea1c4cf86277a51c4d590dbf843a9e116acea | 3ccd609f68016aad24829b8dd3cdbb535fb0ff6d | /python/bpy/types/FILEBROWSER_UL_dir.py | d242f98ab9b1a289208ea3db9e875d5ed1fb5d58 | [] | no_license | katharostech/blender_externs | 79b2eed064fd927e3555aced3e2eb8a45840508e | fdf7f019a460de0fe7e62375c1c94f7ab0e9f68d | refs/heads/master | 2020-04-11T14:00:29.393478 | 2018-10-01T00:40:51 | 2018-10-01T00:40:51 | 161,838,212 | 1 | 1 | null | 2018-12-14T20:41:32 | 2018-12-14T20:41:32 | null | UTF-8 | Python | false | false | 140 | py | class FILEBROWSER_UL_dir:
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
pass
| [
"troyedwardsjr@gmail.com"
] | troyedwardsjr@gmail.com |
cb3c52836c92de725f4b0b5bc037f530ce63d13a | 656b431bf7ac23d5593ddf4fb69c29c251d744cb | /zen/layer/base/node.py | 91c5f8a19f460b42f4d3cf942d8f853c60c39140 | [] | no_license | knighton/zen-0.14 | 2c8e4f0aa2e6c862d4022eb346a619268250273e | 7936e43a115d00888bf6c523525bf9f3e7a49256 | refs/heads/master | 2021-01-21T05:33:01.494392 | 2018-05-17T15:01:30 | 2018-05-17T15:01:30 | 101,927,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,619 | py | from copy import deepcopy
from ..arch.vee import Vee
class Node(Vee):
"""
A node of a neural network.
They consist of input and non-input nodes (Inputs and LayerNodes).
"""
def __init__(self):
self._out_shape = None
self._out_dtype = None
self._out_data = None
self._out_nodes = []
def out_shape(self):
"""
-> shape (must be built)
"""
return self._out_shape
def out_dtype(self):
"""
-> dtype (must be built)
"""
return self._out_dtype
def out_data(self):
"""
-> data (must be forward()'ed)
"""
return self._out_data
def add_out_node(self, node):
"""
node ->
"""
self._out_nodes.append(node)
def out_nodes(self):
"""
-> node
"""
return self._out_nodes
def try_to_build(self):
raise NotImplementedError
def is_built(self):
raise NotImplementedError
def params(self):
raise NotImplementedError
class InteriorNode(Node):
"""
A non-input node (the normal case).
"""
def __init__(self):
super().__init__()
self._in_nodes = None
self._num_ready_in_nodes = 0
def _gather_shapes_dtypes_for_build(self):
assert self._in_nodes, 'Tried to build an internal node with no inputs.'
in_shapes = []
in_dtypes = []
for node in self._in_nodes:
shape = node.out_shape()
if shape is None:
return False, None, None
in_shapes.append(shape)
dtype = node.out_dtype()
if dtype is None:
return False, None, None
in_dtypes.append(dtype)
return True, in_shapes, in_dtypes
def in_nodes(self):
return self._in_nodes
def to_spec_or_specs(self):
raise NotImplementedError
class LayerNode(InteriorNode):
"""
Neural network node wrapping a single layer.
"""
def __init__(self, spec, in_nodes=None):
super().__init__()
if in_nodes:
for node in in_nodes:
node.add_out_node(self)
self._in_nodes = in_nodes
self._spec = spec
self._layer = None
def __call__(self, *in_nodes):
"""
Return a copy of ourself that is connected to the given feed nodes.
This is how graphs are constructed.
"""
assert not self._in_nodes
return LayerNode(deepcopy(self._spec), in_nodes)
def try_to_build(self):
"""
Try to construct the internal layer of a node given the shapes and
dtypes of its input nodes. Tries to build its output nodes.
Returns true if this node could be built (output nodes will fail if not
all inputs are built yet during graph building).
"""
can_build, in_shapes, in_dtypes = self._gather_shapes_dtypes_for_build()
if not can_build:
return False
self._layer, self._out_shape, self._out_dtype = \
self._spec.build_multi_input(in_shapes, in_dtypes)
for node in self._out_nodes:
node.try_to_build()
return True
def is_built(self):
return self._layer is not None
def params(self):
"""
Build the node if not built, then collect the node's trainable
parameters for the optimizer.
"""
assert self._layer, \
'Not all input nodes have been built (the graph is missing an ' + \
'input or inputs).'
return self._layer.params()
def in_node_is_ready(self, is_training):
"""
Receive notification that one of our input nodes has data. If they all
do, perform a forward pass and notify the nodes that we feed into.
"""
assert self._in_nodes, \
'Called in_node_is_ready() on a node with no inputs.'
assert self._layer, \
'Not all input nodes have been built (the graph is missing an ' + \
'input or inputs).'
self._num_ready_in_nodes += 1
if self._num_ready_in_nodes < len(self._in_nodes):
return
xx = []
for node in self._in_nodes:
x = node.out_data()
assert x is not None
xx.append(x)
self._out_data = self._layer.forward_multi_input(xx, is_training)
for node in self._out_nodes:
node.in_node_is_ready(is_training)
self._num_ready_in_nodes = 0
def to_spec_or_specs(self):
return self._spec
| [
"iamknighton@gmail.com"
] | iamknighton@gmail.com |
10ef64737d16248a9f71ed4a09aa9ea77ca86f54 | c109c11c93a628086e1181d16906304070c396b6 | /Test04_散点图.py | 6af7672e98029cb137f1729f3e15fc253b7c5a55 | [] | no_license | Monkey-D-Luffy-star/Py_DataAnalyse01 | 24e3aea4fac2b079f7823d2abf164c24a9714bb1 | 3fba96133d675294d61b067d03fbe7932ff098ae | refs/heads/master | 2022-11-30T03:10:51.376333 | 2020-08-08T01:41:03 | 2020-08-08T01:45:02 | 285,835,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | from matplotlib import pyplot as plt
from matplotlib import font_manager
y_3 = [11, 17, 16, 11, 12, 11, 12, 6, 6, 7, 8, 9, 12, 15, 14, 17, 18, 21, 16, 17, 20, 14, 15, 15, 15, 19, 21, 22, 22,
22, 23]
y_10 = [26, 26, 28, 19, 21, 17, 16, 19, 18, 20, 20, 19, 22, 23, 17, 20, 21, 20, 22, 15, 11, 15, 5, 13, 17, 10, 11, 13,
12, 13, 6]
# 设置中文字体
myfont = font_manager.FontProperties(fname='C:\WINDOWS\FONTS\SIMKAI.TTF')
# 设置画布的大小
plt.figure(figsize=(20, 8), dpi=80)
x_3 = range(1, 32)
x_10 = range(41, 72) # 数字跟两幅图的间距有关,为什么?
# 答:因为scatter画图的时候使用了该值
# plt.figure()
plt.scatter(x_3, y_3,color='red',label='3月份')
plt.scatter(x_10, y_10,color='yellow',label='10月份')
# 画坐标轴
_x = ['3月{}号'.format(i) for i in x_3]
_x += ['10月{}号'.format(i) for i in x_3]
plt.xticks(list(x_3)+list(x_10),_x,fontproperties=myfont,size=12,rotation=45)
# 显示图像标记信息
plt.legend(loc='best',prop=myfont)
plt.title('3月和10月温度信息对比',fontproperties=myfont,size=16)
plt.xlabel('日期',fontproperties=myfont,size=16)
plt.ylabel('温度(℃)',fontproperties=myfont,size=16)
# 显示图像
plt.show()
| [
"1451955616@qq.com"
] | 1451955616@qq.com |
8bb474e67284156e8874fe976147f794bece629e | 9d767c7df630aa7782264cc51073065e1f5d4c5d | /mlia-examples/src/book/itemsets/fpgrowth.py | 63673ca3cc752ccdb6d86d30bef3666a042b1e84 | [] | no_license | GomesNayagam/workspace | 497e6eaad2785875a02f870cd384516b72501110 | d23e806cbbe0decc8a34bcd61636468a46f439a4 | refs/heads/master | 2016-09-06T17:45:52.800243 | 2014-09-25T13:51:20 | 2014-09-25T13:51:20 | 24,454,554 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,319 | py | from __future__ import division
from numpy import *
class TreeNode:
def __init__(self, nameValue, numOccur, parentNode):
self.name = nameValue
self.count = numOccur
self.nodeLink = None
self.parent = parentNode
self.children = {}
def inc(self, numOccur):
self.count += numOccur
def disp(self, ind=1):
print " "* ind, self.name, self.count
for child in self.children.values():
child.disp(ind + 1)
def createTree(dataSet, minSupport=1):
headerTable = {}
for trans in dataSet:
for item in trans:
headerTable[item] = headerTable.get(item, 0) + dataSet[trans]
headerTable = dict(filter(lambda x: x[1] >= minSupport, headerTable.items()))
freqItemSet = set(headerTable.keys())
if len(freqItemSet) == 0:
return None, None
for k in headerTable:
headerTable[k] = [headerTable[k], None]
recTree = TreeNode("Null", 1, None)
for tranSet, count in dataSet.items():
localD = {}
for item in tranSet:
if item in freqItemSet:
localD[item] = headerTable[item][0]
if len(localD) > 0:
orderedItems = [x[0] for x in sorted(localD.items(),
key=lambda p: p[1], reverse=True)]
updateTree(orderedItems, recTree, headerTable, count)
return recTree, headerTable
def updateTree(items, inTree, headerTable, count):
if items[0] in inTree.children:
inTree.children[items[0]].inc(count)
else:
inTree.children[items[0]] = TreeNode(items[0], count, inTree)
if headerTable[items[0]][1] == None:
headerTable[items[0]][1] = inTree.children[items[0]]
else:
updateHeader(headerTable[items[0]][1], inTree.children[items[0]])
if len(items) > 1:
updateTree(items[1::], inTree.children[items[0]], headerTable, count)
def updateHeader(nodeToTest, targetNode):
while nodeToTest.nodeLink != None:
nodeToTest = nodeToTest.nodeLink
nodeToTest.nodeLink = targetNode
def loadSimpleData():
simpDat = [['r', 'z', 'h', 'j', 'p'],
['z', 'y', 'x', 'w', 'v', 'u', 't', 's'],
['z'],
['r', 'x', 'n', 'o', 's'],
['y', 'r', 'x', 'z', 'q', 't', 'p'],
['y', 'z', 'x', 'e', 'q', 's', 't', 'm']]
return simpDat
def createInitSet(dataSet):
retDict = {}
for trans in dataSet:
retDict[frozenset(trans)] = 1
return retDict
def ascendTree(leafNode, prefixPath):
if leafNode.parent != None:
prefixPath.append(leafNode.name)
ascendTree(leafNode.parent, prefixPath)
def findPrefixPath(basePath, treeNode):
condPats = {}
while treeNode != None:
prefixPath = []
ascendTree(treeNode, prefixPath)
if len(prefixPath) > 1:
condPats[frozenset(prefixPath[1:])] = treeNode.count
treeNode = treeNode.nodeLink
return condPats
def mineTree(inTree, headerTable, minSup, prefix, freqItemList):
bigL = [v[0] for v in sorted(headerTable.items(),
key=lambda p: p[1])]
for basePath in bigL:
newFreqSet = prefix.copy()
newFreqSet.add(basePath)
freqItemList.append(newFreqSet)
condPathBases = findPrefixPath(basePath, headerTable[basePath][1])
myCondTree, myHeaderTable = createTree(condPathBases, minSup)
if myHeaderTable != None:
print "conditional tree for:", newFreqSet
myCondTree.disp()
mineTree(myCondTree, myHeaderTable, minSup, newFreqSet, freqItemList)
def main():
# ### Tiny data for testing fpgrowth tree
# root = TreeNode("pyramid", 9, None)
# root.children["eye"] = TreeNode("eye", 13, None)
# root.children["phoenix"] = TreeNode("phoenix", 3, None)
# root.disp()
# ### slightly larger data for testing functionality of fpgrowth
# data = loadSimpleData()
# #print data
# initSet = createInitSet(data)
# #print initSet
# myFPTree, myHeaderTab = createTree(initSet, 3)
# myFPTree.disp()
# for name in myHeaderTab.keys():
# print "prefix pattern(" + name + ")", findPrefixPath(name, myHeaderTab[name][1])
# freqItems = []
# mineTree(myFPTree, myHeaderTab, 3, set([]), freqItems)
# print "frwquent items=", freqItems
### kosarak data (medium)
parsedDat = [line.split() for line in open("kosarak.dat").readlines()]
initSet = createInitSet(parsedDat)
myFPTree, myHeaderTab = createTree(initSet, 100000)
myFreqList = []
mineTree(myFPTree, myHeaderTab, 100000, set([]), myFreqList)
for itemset in myFreqList:
print itemset
if __name__ == "__main__":
main()
| [
"gomes.uma@gmail.com"
] | gomes.uma@gmail.com |
5e959bda895dddc623e59dd72adc9c4788f1414b | 59adbf277a13f3a2ba3983827765f5fd390e76de | /Keras_CNN_PPO_StarCraft/ppo.py | 4d81b733f7820dc50266f61004ac78bf8bea082f | [] | no_license | oppa3109/Reinforcement-Learning-2 | dde58b783bbaa7a2780c37b2aa8565eb4e34493c | 2eead1fe012c23b1cf774ffa45cd96e4e8fd321c | refs/heads/master | 2020-04-01T16:40:32.413900 | 2018-10-17T03:31:32 | 2018-10-17T03:31:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,452 | py | import numpy as np
import copy
from keras.layers import Dense, Flatten, Input
from keras.layers.convolutional import Conv2D
from keras import backend as K
from keras.optimizers import Adam
from keras.models import Model
class PPOIQN:
def __init__(self):
self.state_size = (16,16,2)
self.action_size = 3
self.learning_rate = 0.001
self.gamma = 0.95
self.clip_value = 0.2
self.c_1 = 1
self.c_2 = 0.005
self.act_probs, self.spatial_probs, self.v_preds = self.build_act_spatial_value()
self.act_probs_old, self.spatial_probs_old, self.v_preds_old = self.build_act_spatial_value()
self.act_spatial_updater = self.act_spatial_optimizer()
self.value_updater = self.value_optimizer()
def build_act_spatial_value(self):
##### non-spatial action policy, spatial action policy
input = Input(shape=self.state_size, name='obs')
conv = Conv2D(filters=16, kernel_size=[5, 5], strides=[1, 1], padding='same', activation='relu')(input)
conv = Conv2D(filters=32, kernel_size=[3, 3], strides=[1, 1], padding='same', activation='relu')(conv)
conv = Conv2D(filters=64, kernel_size=[3, 3], strides=[1, 1], padding='same', activation='relu')(conv)
conv = Conv2D(filters=128, kernel_size=[3, 3], strides=[1, 1], padding='same', activation='relu')(conv)
conv = Flatten()(conv)
dense_1 = Dense(units=256, activation=K.relu)(conv)
vision_model = Model(input, dense_1)
# Then define the tell-digits-apart model
digit_a = Input(shape=self.state_size)
digit_b = Input(shape=self.state_size)
# The vision model will be shared, weights and all
out_a = vision_model(digit_a)
out_b = vision_model(digit_b)
o_act_probs = Dense(units=3, activation=K.softmax)(out_a)
o_spatial_probs = Dense(units=16 * 16, activation=K.softmax)(out_b)
act_probs = Model(inputs=digit_a, outputs=o_act_probs)
spatial_probs = Model(inputs=digit_b, outputs=o_spatial_probs)
self.act_spatial_probs = Model(inputs=[digit_a, digit_b], outputs=[o_act_probs, o_spatial_probs])
act_probs.summary()
spatial_probs.summary()
##### value-state
dense_2 = Dense(units=64, activation=K.relu)(dense_1)
o_v_preds = Dense(units=1, activation=None, trainable=True, kernel_initializer='glorot_uniform')(dense_2)
v_preds = Model(inputs=input, outputs=o_v_preds)
v_preds.summary()
return act_probs, spatial_probs, v_preds
def get_action(self, obs):
action_policy = []
spatial_policy = []
v_predict = []
state = np.reshape(obs, (-1, 16, 16, 2))
o_action_policy = self.act_probs.predict(state)[0]
action_policy.append([])
for i in range(len(o_action_policy)):
action_policy[0].append(o_action_policy[i])
o_spatial_policy = self.spatial_probs.predict(state)[0]
spatial_policy.append([])
for i in range(len(o_spatial_policy)):
spatial_policy[0].append(o_spatial_policy[i])
o_v_predict = self.v_preds.predict(state)[0]
v_predict.append([])
v_predict[0].append(o_v_predict[0])
return np.array(action_policy), np.array(spatial_policy), np.array(v_predict)
def get_gaes(self, rewards, v_preds, v_preds_next):
deltas = [r_t + self.gamma * v_next - v for r_t, v_next, v in zip(rewards, v_preds_next, v_preds)]
# calculate generative advantage estimator(lambda = 1), see ppo paper eq(11)
gaes = copy.deepcopy(deltas)
for t in reversed(range(len(gaes) - 1)): # is T-1, where T is time step which run policy
gaes[t] = gaes[t] + self.gamma * gaes[t + 1]
return gaes
def act_spatial_optimizer(self):
'''
self.actions = tf.placeholder(dtype=tf.int32, shape=[None], name='actions')
self.space = tf.placeholder(dtype=tf.int32, shape=[None], name='space')
self.rewards = tf.placeholder(dtype=tf.float32, shape=[None], name='rewards')
self.v_preds_next = tf.placeholder(dtype=tf.float32, shape=[None], name='v_preds_next')
self.gaes = tf.placeholder(dtype=tf.float32, shape=[None], name='gaes')
'''
actions = K.placeholder(dtype='int32', shape=[None], name='actions')
space = K.placeholder(dtype='int32', shape=[None], name='space')
rewards = K.placeholder(dtype='float32', shape=[None], name='rewards')
v_preds_next = K.placeholder(dtype='float32', shape=[None], name='v_preds_next')
gaes = K.placeholder(dtype='float32', shape=[None], name='gaes')
'''
act_probs = self.Policy.act_probs
spatial_probs = self.Policy.spatial_probs
act_probs_old = self.Old_Policy.act_probs
spatial_probs_old = self.Old_Policy.spatial_probs
act_probs = act_probs * tf.one_hot(indices=self.actions, depth=act_probs.shape[1])
act_probs = tf.reduce_sum(act_probs, axis=1)
spatial_probs = spatial_probs * tf.one_hot(indices=self.space, depth=spatial_probs.shape[1])
spatial_probs = tf.reduce_sum(spatial_probs, axis=1)
action_probs = tf.clip_by_value(act_probs * spatial_probs, 1e-10, 1.0)
act_probs_old = act_probs_old * tf.one_hot(indices=self.actions, depth=act_probs_old.shape[1])
act_probs_old = tf.reduce_sum(act_probs_old, axis=1)
spatial_probs_old = spatial_probs_old * tf.one_hot(indices=self.space, depth=spatial_probs_old.shape[1])
spatial_probs_old = tf.reduce_sum(spatial_probs_old, axis=1)
action_probs_old = tf.clip_by_value(act_probs_old * spatial_probs_old, 1e-10, 1.0)
'''
act_probs = self.act_probs.output * K.one_hot(actions, self.act_probs.output_shape[1])
act_probs = K.sum(act_probs, axis=1)
spatial_probs = self.spatial_probs.output * K.one_hot(space, self.spatial_probs.output_shape[1])
spatial_probs = K.sum(spatial_probs, axis=1)
action_probs = K.clip(act_probs * spatial_probs, 1e-10, 1.0)
act_probs_old = self.act_probs_old.output * K.one_hot(actions, self.act_probs_old.output_shape[1])
act_probs_old = K.sum(act_probs_old, axis=1)
spatial_probs_old = self.spatial_probs_old.output * K.one_hot(space, self.spatial_probs_old.output_shape[1])
spatial_probs_old = K.sum(spatial_probs_old, axis=1)
action_probs_old = K.clip(act_probs_old * spatial_probs_old, 1e-10, 1.0)
'''
with tf.variable_scope('loss/clip'):
spatial_ratios = tf.exp(tf.log(action_probs)-tf.log(action_probs_old))
clipped_spatial_ratios = tf.clip_by_value(spatial_ratios, clip_value_min=1-clip_value, clip_value_max=1+clip_value)
loss_spatial_clip = tf.minimum(tf.multiply(self.gaes, spatial_ratios), tf.multiply(self.gaes, clipped_spatial_ratios))
loss_spatial_clip = tf.reduce_mean(loss_spatial_clip)
tf.summary.scalar('loss_spatial', loss_spatial_clip)
'''
spatial_ratios = K.exp(K.log(action_probs) - K.log(action_probs_old))
clipped_spatial_ratios = K.clip(spatial_ratios, 1 - self.clip_value, 1 + self.clip_value)
loss_spatial_clip = K.minimum(gaes * spatial_ratios,
gaes * clipped_spatial_ratios)
loss_spatial_clip = K.mean(loss_spatial_clip)
'''
with tf.variable_scope('loss/vf'):
v_preds = self.Policy.v_preds
loss_vf = tf.squared_difference(self.rewards + self.gamma * self.v_preds_next, v_preds)
loss_vf = tf.reduce_mean(loss_vf)
tf.summary.scalar('loss_vf', loss_vf)
'''
loss_vf_differnece = rewards + self.gamma * v_preds_next - self.v_preds.output
loss_vf_squared = K.square(loss_vf_differnece)
loss_vf = K.mean(loss_vf_squared)
'''
with tf.variable_scope('loss/entropy'):
act_probs = self.Policy.act_probs
spatial_probs = self.Policy.spatial_probs
act_entropy = -tf.reduce_sum(self.Policy.act_probs * tf.log(tf.clip_by_value(self.Policy.act_probs, 1e-10, 1.0)), axis=1)
spatial_entropy = -tf.reduce_sum(self.Policy.spatial_probs * tf.log(tf.clip_by_value(self.Policy.spatial_probs, 1e-10, 1.0)), axis=1)
act_entropy = tf.reduce_mean(act_entropy, axis=0)
spatial_entropy = tf.reduce_mean(spatial_entropy, axis=0)
entropy = act_entropy + spatial_entropy
tf.summary.scalar('entropy', entropy)
'''
act_entropy = -K.sum(self.act_probs.output * K.log(K.clip(self.act_probs.output, 1e-10, 1.0)), axis=1)
spatial_entropy = -K.sum(self.spatial_probs.output * K.log(K.clip(self.spatial_probs.output, 1e-10, 1.0)), axis=1)
act_entropy = K.mean(act_entropy, axis=0)
spatial_entropy = K.mean(spatial_entropy, axis=0)
entropy = act_entropy + spatial_entropy
'''
with tf.variable_scope('loss'):
loss = loss_spatial_clip - c_1 * loss_vf + c_2 * entropy
loss = -loss # minimize -loss == maximize loss
tf.summary.scalar('loss', loss)
'''
loss = loss_spatial_clip - self.c_1 * loss_vf + self.c_2 * entropy
loss = -loss # minimize -loss == maximize loss
optimizer = Adam(lr=self.learning_rate)
updates = optimizer.get_updates(self.act_spatial_probs.trainable_weights, [], loss)
train = K.function([self.act_probs.input, self.spatial_probs.input, self.act_probs_old.input,
self.spatial_probs_old.input, self.v_preds.input, space, actions, rewards,
v_preds_next, gaes], [loss], updates=updates)
return train
def value_optimizer(self):
rewards = K.placeholder(shape=[None],dtype='float32', name='vrewards')
v_preds_next = K.placeholder(shape=[None],dtype='float32', name='vv_preds_next')
##### with tf.variable_scope('loss/vf'):
##### loss_vf = tf.squared_difference(self.rewards + self.gamma * self.v_preds_next, self.v_preds)
##### loss_vf = tf.reduce_mean(loss_vf)
loss_vf_difference = rewards + self.gamma * v_preds_next - self.v_preds.output
loss_vf_squared = K.square(loss_vf_difference)
loss_vf = K.mean(loss_vf_squared)
optimizer = Adam(lr=self.learning_rate)
updates = optimizer.get_updates(self.v_preds.trainable_weights, [], loss_vf)
train = K.function([self.v_preds.input, rewards, v_preds_next], [loss_vf], updates=updates)
return train
def assign_policy_parameters(self):
self.act_probs_old.set_weights(self.act_probs.get_weights())
self.spatial_probs_old.set_weights(self.spatial_probs.get_weights())
self.v_preds_old.set_weights(self.v_preds.get_weights())
def train(self, obs, spatial, actions, rewards, v_preds_next, gaes):
state = K.reshape(obs, [-1, 16, 16, 2])
self.act_probs_old.trainable = self.spatial_probs_old.trainable = self.v_preds.trainable = False
self.act_spatial_updater([state, state, state, state, state, spatial, actions, rewards, v_preds_next, gaes])
self.act_probs_old.trainable = self.spatial_probs_old.trainable = self.v_preds.trainable = True
self.value_updater([state, rewards, v_preds_next])
| [
"jangikim@JaeYoons-MacBook-Pro.local"
] | jangikim@JaeYoons-MacBook-Pro.local |
b316ac46f5b5539ea3578aae0e065e9d9d12f5d0 | c7145148ad0d68b2ae6a94c5295a5e2e8189388b | /solutions/1-introduction/starred/11799.py | 8ab72169f18bb00acb4bd46cd027972fb3159a84 | [] | no_license | Ochirgarid/uhunt | 63a94d6ff3474110e596ccb9153e52f96f6a9c09 | 45d9de7c8bb8fe73958fe132e483ae6d52bcd0f7 | refs/heads/master | 2023-02-06T01:07:40.379692 | 2020-12-31T14:52:34 | 2020-12-31T14:52:34 | 324,241,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | if __name__ == "__main__":
t = int(input())
for c in range(t):
h = list(map(int, input().split()[1:]))
print("Case {}: {}".format(c+1, max(h))) | [
"ochir.garid@gmail.com"
] | ochir.garid@gmail.com |
1d6c708e713687a606bcec30490c9970a32b2031 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/94/usersdata/203/55258/submittedfiles/mediaLista.py | 9f8eabcc98947ef4aefb6758c5a6a0a6eab90482 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | # -*- coding: utf-8 -*-
n=int(input('tamanho da lista: '))
l=[]
soma=0
for i in range (1,n+1,1):
l.append(input('elemento da lista: '))
for i in range (0,n-1,1):
soma=soma+l[i]
media=soma/n
print ('%.2f' %l[0])
print ('%.2f' %l[n-1])
print ('%.2f' %media)
print ('%.2f' %l) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
ce9aee9d33bc3e4d2e66296e526a530465aed07b | 1b4162fe830f2612d701efaddc62b591fec9c8c6 | /Single_Number.py | 515a51fc0b62e03b7d2b22e9f5df2ed92ba80550 | [] | no_license | Tammon23/30-Day-LeetCoding-Challenge-April-2020 | 018e2bd5f8afeafa561c24dd8e9b134d0c82892a | a34ebc1bee64b9f56aa35f1898a86210816e05dd | refs/heads/master | 2021-05-25T19:47:57.803140 | 2020-04-14T17:16:44 | 2020-04-14T17:16:44 | 253,896,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | class Solution:
def singleNumber(self, nums: List[int]) -> int:
x = 0
for num in nums:
x ^= num
return x
| [
"noreply@github.com"
] | noreply@github.com |
93fdc72b73f69d9440a0a0c7fba0adaaf73f0d90 | e92ef35f88c743597cfabe1a9e584ed659695c4a | /fr_util.py | b07ebf42256fe56194849637e319e54d98b09dff | [] | no_license | yshenkai/face_decetion_rec | 2c1f3d8e06b164d59fdccbcc9eb5e0f438e50f00 | 0cd66665e781e05625c21588e17d60d13202e43b | refs/heads/master | 2020-08-08T11:27:35.655213 | 2019-10-09T04:36:29 | 2019-10-09T04:36:29 | 213,821,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,203 | py | import tensorflow as tf
import numpy as np
import os
from numpy import genfromtxt
from keras.models import Model
from keras.layers import Input,Conv2D,Dense,MaxPooling2D,AveragePooling2D,BatchNormalization,Activation,ZeroPadding2D,concatenate,Flatten,Lambda
import h5py
import matplotlib.pyplot as plt
import cv2
from keras import backend as K
_Flaot="float32"
WEIGHTS = [
'conv1', 'bn1', 'conv2', 'bn2', 'conv3', 'bn3',
'inception_3a_1x1_conv', 'inception_3a_1x1_bn',
'inception_3a_pool_conv', 'inception_3a_pool_bn',
'inception_3a_5x5_conv1', 'inception_3a_5x5_conv2', 'inception_3a_5x5_bn1', 'inception_3a_5x5_bn2',
'inception_3a_3x3_conv1', 'inception_3a_3x3_conv2', 'inception_3a_3x3_bn1', 'inception_3a_3x3_bn2',
'inception_3b_3x3_conv1', 'inception_3b_3x3_conv2', 'inception_3b_3x3_bn1', 'inception_3b_3x3_bn2',
'inception_3b_5x5_conv1', 'inception_3b_5x5_conv2', 'inception_3b_5x5_bn1', 'inception_3b_5x5_bn2',
'inception_3b_pool_conv', 'inception_3b_pool_bn',
'inception_3b_1x1_conv', 'inception_3b_1x1_bn',
'inception_3c_3x3_conv1', 'inception_3c_3x3_conv2', 'inception_3c_3x3_bn1', 'inception_3c_3x3_bn2',
'inception_3c_5x5_conv1', 'inception_3c_5x5_conv2', 'inception_3c_5x5_bn1', 'inception_3c_5x5_bn2',
'inception_4a_3x3_conv1', 'inception_4a_3x3_conv2', 'inception_4a_3x3_bn1', 'inception_4a_3x3_bn2',
'inception_4a_5x5_conv1', 'inception_4a_5x5_conv2', 'inception_4a_5x5_bn1', 'inception_4a_5x5_bn2',
'inception_4a_pool_conv', 'inception_4a_pool_bn',
'inception_4a_1x1_conv', 'inception_4a_1x1_bn',
'inception_4e_3x3_conv1', 'inception_4e_3x3_conv2', 'inception_4e_3x3_bn1', 'inception_4e_3x3_bn2',
'inception_4e_5x5_conv1', 'inception_4e_5x5_conv2', 'inception_4e_5x5_bn1', 'inception_4e_5x5_bn2',
'inception_5a_3x3_conv1', 'inception_5a_3x3_conv2', 'inception_5a_3x3_bn1', 'inception_5a_3x3_bn2',
'inception_5a_pool_conv', 'inception_5a_pool_bn',
'inception_5a_1x1_conv', 'inception_5a_1x1_bn',
'inception_5b_3x3_conv1', 'inception_5b_3x3_conv2', 'inception_5b_3x3_bn1', 'inception_5b_3x3_bn2',
'inception_5b_pool_conv', 'inception_5b_pool_bn',
'inception_5b_1x1_conv', 'inception_5b_1x1_bn',
'dense_layer'
]
conv_shape = {
'conv1': [64, 3, 7, 7],
'conv2': [64, 64, 1, 1],
'conv3': [192, 64, 3, 3],
'inception_3a_1x1_conv': [64, 192, 1, 1],
'inception_3a_pool_conv': [32, 192, 1, 1],
'inception_3a_5x5_conv1': [16, 192, 1, 1],
'inception_3a_5x5_conv2': [32, 16, 5, 5],
'inception_3a_3x3_conv1': [96, 192, 1, 1],
'inception_3a_3x3_conv2': [128, 96, 3, 3],
'inception_3b_3x3_conv1': [96, 256, 1, 1],
'inception_3b_3x3_conv2': [128, 96, 3, 3],
'inception_3b_5x5_conv1': [32, 256, 1, 1],
'inception_3b_5x5_conv2': [64, 32, 5, 5],
'inception_3b_pool_conv': [64, 256, 1, 1],
'inception_3b_1x1_conv': [64, 256, 1, 1],
'inception_3c_3x3_conv1': [128, 320, 1, 1],
'inception_3c_3x3_conv2': [256, 128, 3, 3],
'inception_3c_5x5_conv1': [32, 320, 1, 1],
'inception_3c_5x5_conv2': [64, 32, 5, 5],
'inception_4a_3x3_conv1': [96, 640, 1, 1],
'inception_4a_3x3_conv2': [192, 96, 3, 3],
'inception_4a_5x5_conv1': [32, 640, 1, 1,],
'inception_4a_5x5_conv2': [64, 32, 5, 5],
'inception_4a_pool_conv': [128, 640, 1, 1],
'inception_4a_1x1_conv': [256, 640, 1, 1],
'inception_4e_3x3_conv1': [160, 640, 1, 1],
'inception_4e_3x3_conv2': [256, 160, 3, 3],
'inception_4e_5x5_conv1': [64, 640, 1, 1],
'inception_4e_5x5_conv2': [128, 64, 5, 5],
'inception_5a_3x3_conv1': [96, 1024, 1, 1],
'inception_5a_3x3_conv2': [384, 96, 3, 3],
'inception_5a_pool_conv': [96, 1024, 1, 1],
'inception_5a_1x1_conv': [256, 1024, 1, 1],
'inception_5b_3x3_conv1': [96, 736, 1, 1],
'inception_5b_3x3_conv2': [384, 96, 3, 3],
'inception_5b_pool_conv': [96, 736, 1, 1],
'inception_5b_1x1_conv': [256, 736, 1, 1],
}
def load_weight():
weight_path="./weights"
filename=filter(lambda f:not f.startswith("."),os.listdir(weight_path))
paths={}
weight_dict={}
for name in filename:
paths[name.replace(".csv","")]=weight_path+"/"+name
#print(paths)
for name in WEIGHTS:
if "conv" in name:
conv_w=genfromtxt(paths[name+"_w"],delimiter=",",dtype=None)
conv_b=genfromtxt(paths[name+"_b"],delimiter=",",dtype=None)
conv_w=np.reshape(conv_w,conv_shape[name])
conv_w=np.transpose(conv_w,[2,3,1,0])
weight_dict[name]=[conv_w,conv_b]
elif "bn" in name:
bn_w=genfromtxt(paths[name+"_w"],delimiter=",",dtype=None)
bn_b=genfromtxt(paths[name+"_b"],delimiter=",",dtype=None)
bn_m=genfromtxt(paths[name+"_m"],delimiter=",",dtype=None)
bn_v=genfromtxt(paths[name+"_v"],delimiter=",",dtype=None)
weight_dict[name]=[bn_w,bn_b,bn_m,bn_v]
elif "dense" in name:
dense_w=genfromtxt(weight_path+"/dense_w.csv",delimiter=",",dtype=None)
dense_b=genfromtxt(weight_path+"/dense_b.csv",delimiter=",",dtype=None)
dense_w=np.reshape(dense_w,(128,736))
dense_w=np.transpose(dense_w,[1,0])
weight_dict[name]=[dense_w,dense_b]
return weight_dict
def load_weight_to_FaceNet(FRModel):
weights=WEIGHTS
weight_dict=load_weight()
for name in weights:
if FRModel.get_layer(name) is not None:
FRModel.get_layer(name).set_weights(weight_dict[name])
def load_dataset():
train_dataset = h5py.File('datasets/train_happy.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('datasets/test_happy.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
def image_to_encoding(image_path,model):#
img1=cv2.imread(image_path,1)
img1=img1[:,:,(2,1,0)]
img=np.around(img1/255.,decimals=12)
x_train=np.array(img)
x_train=np.expand_dims(x_train,0)
#print(x_train.shape)
encoding=model.predict(x_train)
return encoding
#we will test the image_to_encoding
#image_to_encoding("images/camera_2.jpg")
def image_to_encoding_image(image,model):
img = np.around(image / 255.,decimals=12)
x_train = np.array(img)
x_train = np.expand_dims(x_train, 0)
# print(x_train.shape)
encoding = model.predict(x_train)
return encoding
def inception_block_1a(x):
x_3x3=Conv2D(96,(1,1),name="inception_3a_3x3_conv1")(x)
x_3x3=BatchNormalization(axis=3,epsilon=0.00001,name="inception_3a_3x3_bn1")(x_3x3)
x_3x3=Activation("relu")(x_3x3)
x_3x3=ZeroPadding2D(padding=(1,1))(x_3x3)
x_3x3=Conv2D(128,(3,3),name="inception_3a_3x3_conv2")(x_3x3)
x_3x3=BatchNormalization(axis=3,epsilon=0.00001,name="inception_3a_3x3_bn2")(x_3x3)
x_3x3=Activation("relu")(x_3x3)
x_5x5=Conv2D(16,(1,1),name="inception_3a_5x5_conv1")(x)
x_5x5=BatchNormalization(axis=3,epsilon=0.00001,name="inception_3a_5x5_bn1")(x_5x5)
x_5x5=Activation("relu")(x_5x5)
x_5x5=ZeroPadding2D(padding=(2,2))(x_5x5)
x_5x5=Conv2D(32,(5,5),name="inception_3a_5x5_conv2")(x_5x5)
x_5x5=BatchNormalization(axis=3,epsilon=0.00001,name="inception_3a_5x5_bn2")(x_5x5)
x_5x5=Activation("relu")(x_5x5)
x_pool=MaxPooling2D(pool_size=3,strides=2)(x)
x_pool=Conv2D(32,(1,1),name="inception_3a_pool_conv")(x_pool)
x_pool=BatchNormalization(axis=3,epsilon=0.00001,name="inception_3a_pool_bn")(x_pool)
x_pool=Activation("relu")(x_pool)
x_pool=ZeroPadding2D(padding=((3,4),(3,4)))(x_pool)
x_1x1=Conv2D(64,(1,1),name="inception_3a_1x1_conv")(x)
x_1x1=BatchNormalization(axis=3,epsilon=0.00001,name="inception_3a_1x1_bn")(x_1x1)
x_1x1=Activation("relu")(x_1x1)
inception=concatenate([x_3x3,x_5x5,x_pool,x_1x1],axis=3)
return inception
def inception_block_1b(x):
x_3=Conv2D(96,(1,1),name="inception_3b_3x3_conv1")(x)
x_3=BatchNormalization(axis=3,epsilon=0.00001,name="inception_3b_3x3_bn1")(x_3)
x_3=Activation("relu")(x_3)
x_3=ZeroPadding2D(padding=(1,1))(x_3)
x_3=Conv2D(128,(3,3),name="inception_3b_3x3_conv2")(x_3)
x_3=BatchNormalization(axis=3,epsilon=0.00001,name="inception_3b_3x3_bn2")(x_3)
x_3=Activation("relu")(x_3)
x5=Conv2D(32,(1,1),name="inception_3b_5x5_conv1")(x)
x5=BatchNormalization(axis=3,epsilon=0.00001,name="inception_3b_5x5_bn1")(x5)
x5=Activation("relu")(x5)
x5=ZeroPadding2D(padding=(2,2))(x5)
x5=Conv2D(64,(5,5),name="inception_3b_5x5_conv2")(x5)
x5=BatchNormalization(axis=3,epsilon=0.00001,name="inception_3b_5x5_bn2")(x5)
x5=Activation("relu")(x5)
xpool=AveragePooling2D(pool_size=(3,3),strides=(3,3))(x)
xpool=Conv2D(64,(1,1),name="inception_3b_pool_conv")(xpool)
xpool=BatchNormalization(axis=3,epsilon=0.00001,name="inception_3b_pool_bn")(xpool)
xpool=Activation("relu")(xpool)
xpool=ZeroPadding2D(padding=(4,4))(xpool)
x1=Conv2D(64,(1,1),name="inception_3b_1x1_conv")(x)
x1=BatchNormalization(axis=3,epsilon=0.00001,name="inception_3b_1x1_bn")(x1)
x1=Activation("relu")(x1)
inception=concatenate([x_3,x5,xpool,x1],axis=3)
return inception
def conv2d_and_bn(x,layer=None,cv1_filter=(1,1),cv1_strides=(1,1),cv1_out=None,cv2_filter=(3,3),cv2_strides=(1,1),cv2_out=None,padding=None):
if cv2_out is None:
name=""
else:
name="1"
x=Conv2D(cv1_out,cv1_filter,strides=cv1_strides,name=layer+"_conv"+name)(x)
x=BatchNormalization(axis=3,epsilon=0.00001,name=layer+"_bn"+name)(x)
x=Activation("relu")(x)
if padding is None:
return x
x=ZeroPadding2D(padding=padding)(x)
if cv2_out is None:
return x
x=Conv2D(cv2_out,cv2_filter,strides=cv2_strides,name=layer+"_conv2")(x)
x=BatchNormalization(axis=3,epsilon=0.00001,name=layer+"_bn2")(x)
x=Activation("relu")(x)
return x
def inception_block_1c(x):
x3=conv2d_and_bn(x,layer="inception_3c_3x3",cv1_out=128,cv1_filter=(1,1),cv2_out=256,cv2_filter=(3,3),cv2_strides=(2,2),padding=(1,1))
x5=conv2d_and_bn(x,layer="inception_3c_5x5",cv1_out=32,cv1_filter=(1,1),cv2_out=64,cv2_filter=(5,5),cv2_strides=(2,2),padding=(2,2))
xpool=MaxPooling2D(pool_size=3,strides=2)(x)
xpool=ZeroPadding2D(padding=((0,1),(0,1)))(xpool)
inception=concatenate([x3,x5,xpool],axis=3)
return inception
def inception_block_2a(x):
x3=conv2d_and_bn(x,layer="inception_4a_3x3",cv1_filter=(1,1),cv1_out=96,cv2_out=192,cv2_filter=(3,3),cv2_strides=(1,1),padding=(1,1))
x5=conv2d_and_bn(x,layer="inception_4a_5x5",cv1_out=32,cv1_filter=(1,1),cv2_out=64,cv2_filter=(5,5),cv2_strides=(1,1),padding=(2,2))
xpool=AveragePooling2D(pool_size=(3,3),strides=(3,3))(x)
xpool=conv2d_and_bn(xpool,layer="inception_4a_pool",cv1_out=128,cv1_filter=(1,1),padding=(2,2))
x1=conv2d_and_bn(x,layer="inception_4a_1x1",cv1_out=256,cv1_filter=(1,1))
inception=concatenate([x3,x5,xpool,x1],axis=3)
return inception
def inception_block_2b(x):
x3=conv2d_and_bn(x,layer="inception_4e_3x3",cv1_out=160,cv1_filter=(1,1),cv2_out=256,cv2_filter=(3,3),cv2_strides=(2,2),padding=(1,1))
x5=conv2d_and_bn(x,layer="inception_4e_5x5",cv1_filter=(1,1),cv1_out=64,cv2_out=128,cv2_filter=(5,5),cv2_strides=(2,2),padding=(2,2))
xpool=MaxPooling2D(pool_size=3,strides=2)(x)
xpool=ZeroPadding2D(padding=((0,1),(0,1)))(xpool)
inception=concatenate([x3,x5,xpool],axis=3)
return inception
def inception_block_3a(x):
x3=conv2d_and_bn(x,layer="inception_5a_3x3",cv1_out=96,cv1_filter=(1,1),cv2_out=384,cv2_filter=(3,3),cv2_strides=(1,1),padding=(1,1))
xpool=AveragePooling2D(pool_size=(3,3),strides=(3,3))(x)
xpool=conv2d_and_bn(xpool,layer="inception_5a_pool",cv1_out=96,cv1_filter=(1,1),padding=(1,1))
x1=conv2d_and_bn(x,layer="inception_5a_1x1",cv1_out=256,cv1_filter=(1,1))
inception=concatenate([x3,xpool,x1],axis=3)
return inception
def inception_block_3b(x):
x3=conv2d_and_bn(x,layer="inception_5b_3x3",cv1_out=96,cv1_filter=(1,1),cv2_out=384,cv2_filter=(3,3),cv2_strides=(1,1),padding=(1,1))
xpool=MaxPooling2D(pool_size=3,strides=2)(x)
xpool=conv2d_and_bn(xpool,layer="inception_5b_pool",cv1_out=96,cv1_filter=(1,1))
xpool=ZeroPadding2D(padding=(1,1))(xpool)
x1=conv2d_and_bn(x,layer="inception_5b_1x1",cv1_out=256,cv1_filter=(1,1))
inception=concatenate([x3,xpool,x1],axis=3)
return inception
def get_faceModel(input_shape):
x_input=Input(shape=input_shape)
x=ZeroPadding2D((3,3))(x_input)
x=Conv2D(64,(7,7),strides=(2,2),name="conv1")(x)
x=BatchNormalization(axis=3,name="bn1")(x)
x=Activation("relu")(x)
x=ZeroPadding2D((1,1))(x)
x=MaxPooling2D(pool_size=(3,3),strides=2)(x)
x=Conv2D(64,(1,1),strides=(1,1),name="conv2")(x)
x=BatchNormalization(axis=3,epsilon=0.00001,name="bn2")(x)
x=ZeroPadding2D((1,1))(x)
x=Conv2D(192,(3,3),strides=(1,1),name="conv3")(x)
x=BatchNormalization(axis=3,epsilon=0.00001,name="bn3")(x)
x=Activation("relu")(x)
x=ZeroPadding2D((1,1))(x)
x=MaxPooling2D(pool_size=3,strides=2)(x)
# Inception 1: a/b/c
x = inception_block_1a(x)
x = inception_block_1b(x)
x = inception_block_1c(x)
# Inception 2: a/b
x = inception_block_2a(x)
x = inception_block_2b(x)
# Inception 3: a/b
x = inception_block_3a(x)
x = inception_block_3b(x)
x=AveragePooling2D(pool_size=(3,3),strides=(1,1))(x)
x=Flatten()(x)
x=Dense(128,name="dense_layer")(x)
x=Lambda(lambda x:K.l2_normalize(x,axis=1))(x)
model=Model(inputs=x_input,outputs=x,name="faceModel")
return model
frModel=get_faceModel(input_shape=(96,96,3))
#print("total param",frModel.count_params())
#print(frModel.summary())
#we will define the model's cost function to compile this model
#so we start do this job#三元损失函数
def compute_face_model_cost(y_true,y_pred,alpha=0.2):
anchor,positive,negative=y_pred[0],y_pred[1],y_pred[2]
post_dist=tf.reduce_sum(tf.square(anchor-positive))
neg_dist=tf.reduce_sum(tf.square(anchor-negative))
basic_loss=post_dist-neg_dist+alpha
loss=tf.reduce_sum(tf.maximum(basic_loss,0.))
return loss
#frModel.compile(loss=compute_face_model_cost,optimizer="adam",metrics=["accuracy"])
load_weight_to_FaceNet(frModel)
database={}
database["danielle"] = image_to_encoding("images/danielle.png", frModel)
database["younes"] = image_to_encoding("images/younes.jpg", frModel)
database["tian"] = image_to_encoding("images/tian.jpg", frModel)
database["andrew"] = image_to_encoding("images/andrew.jpg", frModel)
database["kian"] = image_to_encoding("images/kian.jpg", frModel)
database["dan"] = image_to_encoding("images/dan.jpg", frModel)
database["sebastiano"] = image_to_encoding("images/sebastiano.jpg", frModel)
database["bertrand"] = image_to_encoding("images/bertrand.jpg", frModel)
database["kevin"] = image_to_encoding("images/kevin.jpg", frModel)
database["felix"] = image_to_encoding("images/felix.jpg", frModel)
database["benoit"] = image_to_encoding("images/benoit.jpg", frModel)
database["arnaud"] = image_to_encoding("images/arnaud.jpg", frModel)
#下面定义一个验证函数
def verify(img_path,identity,database,model):
out_encoding=image_to_encoding(img_path,model)
#下面计算l2损失
cost=np.linalg.norm(out_encoding-database[identity])
print(cost)
if cost<0.7:
print("it is "+str(identity)+"!!!!!")
else:
print("it is not "+str(identity)+"!!!!")
#下面定义一个识别函数
def face_recognition(img_path,database,model):
out_encoding=image_to_encoding(img_path,model)
max_cost=100
for (name,enc) in database.items():
cur_cost=np.linalg.norm(out_encoding-database[name])
if cur_cost<max_cost:
max_cost=cur_cost
identity=name
if max_cost>0.7:
return "unknow"
else:
return identity+" "+str(1-max_cost)
def face_recognition_image(image,model=frModel,database=database):
out_encoding=image_to_encoding_image(image,model)
max_cost=100
for (name,enc) in database.items():
cur_cost=np.linalg.norm(out_encoding-database[name])
if cur_cost<max_cost:
max_cost=cur_cost
identity=name
if max_cost>0.7:
return "unknow"
else:
return identity+" "+str(1-max_cost)
##let me test this verify function
#verify("images/camera_0.jpg", "kian", database,frModel)
frModel.save("face_model.h5")
#face_recognition("images/camera_3.jpg",database,frModel)
| [
"mlshenkai@163.com"
] | mlshenkai@163.com |
81b8a4f9af2aa380377c9d6d5cced5d7971abdd4 | ead4170bcd27518252592faad66be2e1febf6133 | /prg19.py | aef99b960f5161874142cd1fc3f7c2ce95c0eaea | [] | no_license | peazybabz/Python-Tutorials-for-beginners | 657b562dc3e8ef4e5026ed15adc3ae11b0d4f3e9 | 5df118b2ff99f8b678ce5ebce695896387a40fe4 | refs/heads/master | 2020-04-01T11:12:16.217665 | 2018-10-17T18:24:34 | 2018-10-17T18:24:34 | 153,151,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | #19. Python Program to Check Armstrong Number
# take input from the user
num = int(input("Enter a number: "))
# initialize sum
sum = 0
# find the sum of the cube of each digit
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** 3
temp //= 10
# display the result
if num == sum:
print(num,"is an Armstrong number")
else:
print(num,"is not an Armstrong number") | [
"npeace81@yahoo.com"
] | npeace81@yahoo.com |
97067240f3603e9eccc91bbf918e2013e744e8ff | 077edc89068e3024904db9973214ab0d7dd14eac | /airflow/plugins/operators/load_fact.py | 1abea7288a489cf3dc9bbb5455102e9b94c07060 | [] | no_license | eflemist/airflowdatapipelines | 2b2aedd996f2ceef31b1d52a04171a37528730f9 | cc675028341f18fa94959ea899c251d5b70f8aa6 | refs/heads/master | 2023-03-27T12:48:18.587203 | 2021-03-25T19:36:55 | 2021-03-25T19:36:55 | 351,539,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class LoadFactOperator(BaseOperator):
ui_color = '#F98866'
insert_sql = """
INSERT INTO {}
{}
"""
@apply_defaults
def __init__(self,
redshift_conn_id="",
table="",
sql_insert="",
*args, **kwargs):
super(LoadFactOperator, self).__init__(*args, **kwargs)
# Map params here
# Example:
# self.conn_id = conn_id
self.redshift_conn_id = redshift_conn_id
self.table = table
self.sql_insert = sql_insert
def execute(self, context):
self.log.info('LoadFactOperator started...')
redshift_hook = PostgresHook(postgres_conn_id=self.redshift_conn_id)
redshift_hook.run(LoadFactOperator.insert_sql.format(self.table,self.sql_insert))
| [
"eflemist@gmail.com"
] | eflemist@gmail.com |
53e8ec09614013b9b3ef59d207b098d21fc40046 | cacae0bef45a3716c64463709cb29804a47a7a36 | /crypto/backtest/multicoinbacktester/HistDataManager.py | 144566d8416be6948e47f3d8e6255be5adbd732d | [] | no_license | prometuse/myquant | d2c89612b41294bb128210469330f083a5c098d0 | 2ff0fa845bc7aa719546c3826d5a01cbd054a203 | refs/heads/master | 2023-07-08T10:39:51.760310 | 2021-08-12T08:12:10 | 2021-08-12T08:12:10 | 383,455,610 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | """
对历史的净值、年化、回撤信息进行快照记录
每个周期都有个instance
"""
class HistDataManager(object):
def __init__(self, init_cash, start_time):
self.net_array = [init_cash]
self.end_times = [start_time]
self.btc_pct_array = [1.0]
self.btc_price_array = []
self.pct_array = [1.0]
self.cnt = 0
def update(self, net, datetime, btc_price):
self.cnt += 1
pct = self.pct_array[-1] * (1.0 + ((net - self.net_array[-1]) / self.net_array[-1]))
self.pct_array.append(pct)
self.net_array.append(net)
self.end_times.append(datetime[1:10])
if len(self.btc_price_array) == 0:
self.btc_price_array.append(btc_price)
btc_pct = self.btc_pct_array[-1] * (1.0 + (btc_price - self.btc_price_array[-1]) / self.btc_price_array[-1])
self.btc_pct_array.append(btc_pct)
self.btc_price_array.append(btc_price) | [
"kebi@xiaohongshu.com"
] | kebi@xiaohongshu.com |
7368f03dd1696be11ed81d20842c9e5a92573891 | 28605e3a678d7b18284842998652df5581f46f90 | /rooms/FluchtVorDemCode.py | 97c0e93a952bb04003c421c6abe0d58b04111ab2 | [] | no_license | TimPaet/EscapeRoom | 4d111c7af7ec4adfc0480415aed523484f592bc7 | 1d324a3d00996df179d1c5c0dc72d1023e9b6f9f | refs/heads/main | 2022-12-30T21:30:54.635582 | 2020-10-21T07:23:55 | 2020-10-21T07:23:55 | 305,713,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,406 | py | from EscapeRoom import EscapeRoom
class FluchtVorDemCode(EscapeRoom):
def __init__(self):
super().__init__()
self.set_metadata("Tim", __name__)
self.add_level(self.create_level1())
self.add_level(self.create_level2())
self.add_level(self.create_level3())
self.add_level(self.create_level4())
self.add_level(self.create_level5())
self.add_level(self.create_level6())
### LEVELS ###
def create_level1(self):
numbers = "4 5 29 54 4 0 -214 542 -64 1 -3 6 -6"
task_messages = [
"Du triffst auf eine Tür, die mit einem Code verschlossen ist.",
"Auf dem Eingabefeld für den Code steht: High and Low.",
"Über der Tür entdeckst du mehrere Zahlen "
"und nach mehrmaligen Versuchen fällt dir auf, dass du 2 verschiedene Codes eingeben musst.",
f"Die Zahlen über der Tür sind folgende: <b> {numbers} </b ",
"Am besten schreibst du die Codes auf eine Liste, um Sie nicht zu vergessen",
"Dein Input ist -numbers-"
]
hints = [
"Was könnte High and Low mit den Zahlen zutun haben?",
"Ist vielleicht die kleinste Zahl ein Code und die größte Zahl auch?",
"Gib die Codes als Liste wieder"
]
return {"task_messages": task_messages, "hints": hints, "solution_function": self.high_low, "data": numbers}
def create_level2(self):
num = 542
task_messages = [
"Die Tür hat sich geöffnet und einen Gang offenbart. Du gehst den Gang entlang und nach einiger Zeit "
"kommst du an eine Gabelung mit 2 weiteren Gängen.",
"Über jedem Gang steht eine Zahl, jedoch gibt es keinen weiteren Hinweis. Vielleicht kann dir ja eine "
"Zahl von der Liste helfen, die du noch von vorhin hast.",
"<b> Über dem linken Gang steht die Zahl 11 </b",
"<b> Über dem mittleren Gang steht die Zahl 8 </b",
"<b> Über dem rechten Gang steht die Zahl 2 </b",
"Du musst dich für eine Zahl entscheiden",
"Dein Input ist -num-"
]
hints = [
"Die Zahlen, die auf deiner Liste stehen, sind: -214 und 542",
"Du musst dich auf die höhere Zahl deiner Liste konzentrieren",
"Es gibt einen Zusammenhang zwischen der 542 und der Zahl von einem Gang",
"Du musst die Quersumme von 542 berechnen",
"Gib diese als Integer wieder"
]
return {"task_messages": task_messages, "hints": hints, "solution_function": self.qs, "data": num}
def create_level3(self):
chars = "joouqqnmmi"
task_messages = [
"Du hast den richtigen Gang gewählt und findest eine antike Steintafel und ""einen Kalender.",
" Du kannst erkennen, dass verschiedene Buchstaben in diese Steintafel gemeißelt sind.",
f"Die Buchstaben auf der Steintafel sind folgende: <b> {chars} </b",
"Dein Input ist -chars-"
]
hints = [
"Gucke dir an, wie oft es welchen Buchstaben gibt",
"Gucke dir die Buchstaben an, die sich nicht wiederholen",
"Gib diese als String wieder"
]
return {"task_messages": task_messages, "hints": hints, "solution_function": self.fnr, "data": chars}
def create_level4(self):
rnglow = 2
rnghigh = 30
task_messages = [
"Du suchst den Monat Juni und siehst, dass auf der Seite in großer roter Schrift,"
" -Primzahlen- geschrieben steht.",
"Der Juni hat insgesamt 30 Tage, jedoch ist der 1. Juni durchgestrichen.",
f"<b> Es sind also nur die Tage von {rnglow} bis {rnghigh} in dem Kalender</b",
"Du solltest deine Ergebnisse wohl besser wieder auf eine Liste schreiben",
"<b> Anmerkung:</b",
"Du bekommst als Input die Obergrenze, (also 30). Das bedeutet, dass du die Untergrenze (also 2) selber "
"definieren musst ",
"Dein Input ist -rnghigh-"
]
hints = [
"Finde die Primzahlen zwischen 2 und 30",
"Gib die Primzahlen als Liste wieder"
]
return {"task_messages": task_messages, "hints": hints, "solution_function": self.prim, "data": rnghigh}
def create_level5(self):
code = 192329
task_messages = [
"Du schaust dich in dem Raum noch ein wenig um und mit einmal entdeckst du einen Tresor, welcher auch "
"mit einem Code gesichert ist.",
"Du sollst die letzten 3 Primzahlen als Code eingeben, jedoch sollen die"
"letzten 4 Ziffern maskiert sein.",
"<b> Beispiel:</b",
"24####",
"Dein Input ist -code-"
]
hints = [
"Die Primzahlen von vorhin waren: <b> 2, 3, 5, 7, 11, 13, 17, 19, 23, 29</b",
"Gib den Code als String wieder"
]
return {"task_messages": task_messages, "hints": hints, "solution_function": self.mask, "data": code}
def create_level6(self):
pw = "Vielen Dank, dass du diesen Escape Room gespielt hast, ich hoffe es hat Spaß gemacht"
task_messages = [
"Du bist nun vor der letzten Tür und dir fehlt nur noch ein ein Passwort, um zu entkommen.",
"An einer Wand steht ein Beispiel, nach welchem Schema du auch gleich vorgehen musst.",
"<b> Du Hast es gleich geschafft! - !t ffah cs eghcie lgsetsaHuD</b",
"Nach diesem Schema musst du jetzt den Passwortsatz herausfinden. Dein gegebener Satz ist:",
f"<b> {pw}</b",
"Dein Input ist -pw-"
]
hints = [
"Probiere den Satz rückwärts zu lesen",
"Achte auf die Leerzeichen",
"Du musst den Satz umdrehen, aber die Leerzeichen müssen an derselben Stelle bleiben",
"Gib das Passwort als String wieder"
]
return {"task_messages": task_messages, "hints": hints, "solution_function": self.revsp, "data": pw}
### SOLUTIONS ###
def high_low(self, numbers):
x = []
v = []
k = []
y = numbers.split(" ")
for i in range(0, len(y)):
v.append(int(y[i]))
v.sort()
x.append(v[len(v) - 1])
x.append(v[0])
for i in range(0, len(x)):
k.append(x[i])
result = k
return result
def qs(self, num):
liste = []
final = []
num = str(num)
for i in range(0, len(num)):
liste.append(num[i])
for i in range(0, len(num)):
final.append(int(liste[i]))
result = sum(final)
return result
def fnr(self, chars):
testlist = []
charlist = list(chars)
final = []
print(len(charlist))
for i in range(0, len(charlist)):
print(testlist)
testlist.append(charlist[i])
charlist.pop(i)
if testlist[0] not in charlist:
final.append(testlist[0])
charlist.insert(i, testlist[0])
testlist = []
else:
charlist.insert(i, testlist[0])
testlist = []
result = "".join(final)
return result
def prim(self, rnghigh):
rnglow = 2
result = []
for num in range(rnglow, rnghigh):
for i in range(2, num):
if (num % i) == 0:
break
else:
result.append(num)
return result
def mask(self, code):
code = str(code)
result = code[:2] + "#" * (len(code) - 2)
return result
def revsp(self, pw):
spaces = [i for i, letter in enumerate(pw) if letter == " "]
pw = list(pw)
for i in range(len(spaces) - 1, -1, -1):
pw.pop(spaces[i])
srev = pw[::-1]
for i in range(0, len(spaces)):
srev.insert(spaces[i], " ")
final = ""
for z in srev:
final = final + z
return final
| [
"noreply@github.com"
] | noreply@github.com |
943ac7709e5f4f17286ef6b651f71be631533082 | ad8566dace0e4ab4b419b1bb5bc055b095adce72 | /ics/migrations/0092_remove_processtype_description.py | acb8d0bee88038f50cc34248f7adb737cbad17e4 | [] | no_license | mayanb/wafflecone | 6c844c4c908f7c9b8e41d0d42faeefbfa8b9573e | fcd45a3b745232e52af3bdffa3fab25f13c7464d | refs/heads/staging | 2022-12-14T03:34:14.618001 | 2018-09-26T21:35:52 | 2018-09-26T21:35:52 | 74,408,398 | 1 | 0 | null | 2022-12-08T00:46:05 | 2016-11-21T21:40:59 | Python | UTF-8 | Python | false | false | 397 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-04-10 19:41
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ics', '0091_auto_20180406_1749'),
]
operations = [
migrations.RemoveField(
model_name='processtype',
name='description',
),
]
| [
"steven@usepolymer.com"
] | steven@usepolymer.com |
cc45df1680460ba8ad0cf37c8e58af522b26b94e | 87b11aedcce596b47775eabc34e4c3c7d1e4c6a7 | /venv/Scripts/easy_install-3.7-script.py | d80ab360d01bbf3cd015cdaa955a421eef6213eb | [] | no_license | EstebanLauKoo/testPython | fa9dc29878fedaa4cafce086b62276cb5cad542f | d9d54b6ab6980f434904ee4a8606199b49b13bc8 | refs/heads/master | 2020-05-09T15:48:19.930899 | 2019-10-17T03:14:55 | 2019-10-17T03:14:55 | 181,246,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | #!C:\Users\esteb\PycharmProjects\testPython\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"4801459559Ee"
] | 4801459559Ee |
af3d099f71d75651e8da95d4362fc5e824ea06bf | 1886065d10342822b10063cd908a690fccf03d8b | /appengine/findit/waterfall/analyze_build_failure_pipeline.py | df1014f2a3c07efd6b1e1306d39bff06edb9fa1f | [
"BSD-3-Clause"
] | permissive | TrellixVulnTeam/chromium-infra_A6Y5 | 26af0dee12f89595ebc6a040210c9f62d8ded763 | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | refs/heads/master | 2023-03-16T15:33:31.015840 | 2017-01-31T19:55:59 | 2017-01-31T20:06:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,205 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from common import appengine_util
from common.pipeline_wrapper import BasePipeline
from common.pipeline_wrapper import pipeline
from libs import time_util
from model import analysis_status
from model.wf_analysis import WfAnalysis
from waterfall.detect_first_failure_pipeline import DetectFirstFailurePipeline
from waterfall.extract_deps_info_pipeline import ExtractDEPSInfoPipeline
from waterfall.extract_signal_pipeline import ExtractSignalPipeline
from waterfall.flake.trigger_flake_analyses_pipeline import (
TriggerFlakeAnalysesPipeline)
from waterfall.identify_culprit_pipeline import IdentifyCulpritPipeline
from waterfall.pull_changelog_pipeline import PullChangelogPipeline
from waterfall.start_try_job_on_demand_pipeline import (
StartTryJobOnDemandPipeline)
from waterfall.trigger_swarming_tasks_pipeline import (
TriggerSwarmingTasksPipeline)
class AnalyzeBuildFailurePipeline(BasePipeline):
def __init__(self, master_name, builder_name, build_number, build_completed,
force_rerun_try_job):
super(AnalyzeBuildFailurePipeline, self).__init__(
master_name, builder_name, build_number, build_completed,
force_rerun_try_job)
self.master_name = master_name
self.builder_name = builder_name
self.build_number = build_number
def _LogUnexpectedAborting(self, was_aborted):
"""Marks the WfAnalysis status as error, indicating that it was aborted.
Args:
was_aborted (bool): True if the pipeline was aborted, otherwise False.
"""
if not was_aborted:
return
analysis = WfAnalysis.Get(
self.master_name, self.builder_name, self.build_number)
# Heuristic analysis could have already completed, while triggering the
# try job kept failing and lead to the abortion.
if not analysis.completed:
analysis.status = analysis_status.ERROR
analysis.result_status = None
analysis.put()
def finalized(self):
self._LogUnexpectedAborting(self.was_aborted)
def _ResetAnalysis(self, master_name, builder_name, build_number):
analysis = WfAnalysis.Get(master_name, builder_name, build_number)
analysis.pipeline_status_path = self.pipeline_status_path()
analysis.status = analysis_status.RUNNING
analysis.result_status = None
analysis.start_time = time_util.GetUTCNow()
analysis.version = appengine_util.GetCurrentVersion()
analysis.end_time = None
analysis.put()
# Arguments number differs from overridden method - pylint: disable=W0221
def run(self, master_name, builder_name, build_number, build_completed,
force_rerun_try_job):
self._ResetAnalysis(master_name, builder_name, build_number)
# The yield statements below return PipelineFutures, which allow subsequent
# pipelines to refer to previous output values.
# https://github.com/GoogleCloudPlatform/appengine-pipelines/wiki/Python
# Heuristic Approach.
failure_info = yield DetectFirstFailurePipeline(
master_name, builder_name, build_number)
change_logs = yield PullChangelogPipeline(failure_info)
deps_info = yield ExtractDEPSInfoPipeline(failure_info, change_logs)
signals = yield ExtractSignalPipeline(failure_info)
heuristic_result = yield IdentifyCulpritPipeline(
failure_info, change_logs, deps_info, signals, build_completed)
# Try job approach.
with pipeline.InOrder():
# Swarming rerun.
# Triggers swarming tasks when first time test failure happens.
# This pipeline will run before build completes.
yield TriggerSwarmingTasksPipeline(
master_name, builder_name, build_number, failure_info)
# Checks if first time failures happen and starts a try job if yes.
yield StartTryJobOnDemandPipeline(
master_name, builder_name, build_number, failure_info,
signals, heuristic_result, build_completed, force_rerun_try_job)
# Trigger flake analysis on flaky tests, if any.
yield TriggerFlakeAnalysesPipeline(
master_name, builder_name, build_number)
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
45c367e2b6e55ebf0655d04ec1a4b52b8f687ab5 | ed79aa13b08d737bdb464867d3014429b536f20b | /Principles of Computing/week7/poc_wrangler_gui.py | 54b0accc8089efa6040e23debf05dc492933736b | [] | no_license | mykhailo-dmytriakha/python | d04a7fdbe5e4aa537f0b1a89fdccf3a959e58759 | 6edffd14164cdb659a64f104dd4c683d33db332c | refs/heads/master | 2023-04-07T08:43:50.055850 | 2015-11-02T09:00:49 | 2015-11-02T09:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,593 | py | """
Word Wrangler GUI
"""
import SimpleGUICS2Pygame.simpleguics2pygame as simplegui
# Global constants
FONT_SIZE = 20
OFFSET = 4
ROW_HEIGHT = FONT_SIZE + OFFSET
COLUMN_WIDTH = 80
GRID_SIZE = [25, 4]
CANVAS_WIDTH = COLUMN_WIDTH * GRID_SIZE[1]
CANVAS_HEIGHT = ROW_HEIGHT * GRID_SIZE[0]
def draw_word(canvas, word, pos):
"""
Helper function to draw word on canvas at given position
"""
box = [pos,
[pos[0], pos[1] - ROW_HEIGHT],
[pos[0] + COLUMN_WIDTH, pos[1] - ROW_HEIGHT],
[pos[0] + COLUMN_WIDTH, pos[1]],
pos]
canvas.draw_text(word, [pos[0] + 2, pos[1] - 4], FONT_SIZE, "White")
canvas.draw_polyline(box, 1, "White")
class WordWranglerGUI:
"""
Container for interactive content
"""
def __init__(self, game):
"""
Create frame and timers, register event handlers
"""
self.game = game
self.frame = simplegui.create_frame("Word Wrangler",
CANVAS_WIDTH, CANVAS_HEIGHT, 250)
self.frame.set_canvas_background("Blue")
self.enter_input = self.frame.add_input("Enter word for new game",
self.enter_start_word, 250)
labelmsg = "Stars correspond to hidden words formed using letters "
labelmsg += "from the entered word. Hidden words are listed in alphabetical order"
self.frame.add_label(labelmsg, 250)
self.frame.add_label("", 250)
self.guess_label = self.frame.add_input("Enter a word",
self.enter_guess, 250)
self.frame.add_label("For a hint, click on a starred word", 250)
self.frame.set_mouseclick_handler(self.peek)
self.frame.set_draw_handler(self.draw)
self.enter_input.set_text("python")
self.game.start_game("python")
def start(self):
"""
Start frame
"""
self.frame.start()
def enter_start_word(self, entered_word):
"""
Event handler for input field to enter letters for new game
"""
self.game.start_game(entered_word)
def enter_guess(self, guess):
"""
Event handler for input field to enter guess
"""
self.game.enter_guess(guess)
self.guess_label.set_text("")
def peek(self, pos):
"""
Event handler for mouse click, exposes clicked word
"""
[index_i, index_j] = [pos[1] // ROW_HEIGHT, pos[0] // COLUMN_WIDTH]
peek_idx = index_i + index_j * GRID_SIZE[0]
if peek_idx < len(self.game.get_strings()):
self.game.peek(peek_idx)
def draw(self, canvas):
"""
Handler for drawing subset words list
"""
string_list = self.game.get_strings()
for col in range(GRID_SIZE[1]):
for row in range(GRID_SIZE[0]):
pos = [col * COLUMN_WIDTH, (row + 1) * ROW_HEIGHT]
idx = row + col * GRID_SIZE[0]
if idx < len(string_list):
draw_word(canvas, string_list[idx], pos)
# if self.winner_flag:
# canvas.draw_text("You win!",
# [4 * ROW_HEIGHT, COLUMN_WIDTH],
# 2 * FONT_SIZE, "Yellow")
# Start interactive simulation
def run_gui(game):
"""
Encapsulate frame
"""
gui = WordWranglerGUI(game)
gui.start()
| [
"mdmytiaha@playtika.com.ua"
] | mdmytiaha@playtika.com.ua |
3e6a874a64e7d69cc870d2a47199ffe654c59f9b | 904e75e2ceff81c18a432fe1b951b683e859cbed | /views/console/voucher.py | 809b9bed03144310d4c914e793626e5bbf9acd22 | [] | no_license | PUYUP/plutoborn | a42c65fa360de41a1236af00b5718948dc1b9940 | e6b47b7f183fcff60fa803329e11c2e87de560ef | refs/heads/master | 2022-12-05T17:06:10.049472 | 2020-08-19T09:12:45 | 2020-08-19T09:12:45 | 254,116,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | from django.conf import settings
from django.views import View
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models.functions import Coalesce
from django.db.models import Q, F, Sum, Count, Case, When, Value, Subquery, OuterRef, IntegerField
from utils.pagination import Pagination
from utils.generals import get_model
Voucher = get_model('market', 'Voucher')
@method_decorator(login_required, name='dispatch')
class VoucherListView(View):
template_name = 'console/voucher/list.html'
context = dict()
def get(self, request):
vouchers = Voucher.objects \
.annotate(
total_redeem=Coalesce(Count('voucher_redeems'), 0)
).order_by('-total_redeem')
# paginator
page_num = int(self.request.GET.get('p', 0))
paginator = Paginator(vouchers, settings.PAGINATION_PER_PAGE)
try:
vouchers_pagination = paginator.page(page_num + 1)
except PageNotAnInteger:
vouchers_pagination = paginator.page(1)
except EmptyPage:
vouchers_pagination = paginator.page(paginator.num_pages)
pagination = Pagination(request, vouchers, vouchers_pagination, page_num, paginator)
self.context['vouchers'] = vouchers
self.context['vouchers_total'] = vouchers.count()
self.context['vouchers_pagination'] = vouchers_pagination
self.context['pagination'] = pagination
return render(request, self.template_name, self.context)
| [
"hellopuyup@gmail.com"
] | hellopuyup@gmail.com |
8f001741a050048888b40125820ca38f28f4b9c6 | bf470bcf0910b2d3713d898677e91af1da5a2664 | /release.py | 7c8abaa7ddd3cecad54a91e01d47d19d522aac12 | [] | no_license | JRepoInd/repository.boogie.dist | a8e18ef3b01601ccb4072d48c1e674fc7d9d4266 | c9df894edda98c4786c40a9c3eb02b758117b536 | refs/heads/master | 2021-06-12T04:17:27.213095 | 2015-08-22T22:07:03 | 2015-08-22T22:07:03 | 41,871,615 | 0 | 0 | null | 2021-06-02T15:45:21 | 2015-09-03T16:47:18 | Python | UTF-8 | Python | false | false | 6,187 | py | # -*- coding: utf-8 -*-
import sys
import os
import shutil,stat
import urllib2
import urllib
import md5
import datetime
import time
import re
from distutils.version import StrictVersion, LooseVersion
from xml.dom import minidom
import shlex
from subprocess import Popen, PIPE
def runcmd(cmd,cwd):
args = shlex.split(cmd)
proc = Popen(args, stdout=PIPE, stderr=PIPE,cwd=cwd)
out, err = proc.communicate()
exitcode = proc.returncode
#print out[:-1]
return exitcode, out, err
dirname=os.path.dirname(os.path.realpath(__file__))
username="huseyinbiyik"
password=sys.argv[1]
distrepo={"repo":"repository.boogie.dist",
"branch":"master"}
packs={"plugin.program.ump":"master",
"repository.boogie":"master"}
datesince=int(time.mktime(datetime.datetime(2000,1,1).timetuple()))
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def download_zip(pack,branch):
urllib2.urlopen("https://github.com/%s/%s/archive/%s.zip"%(uname,pack,branch))
def gitcli():
c,o,e=runcmd("git fetch --all",dirname)
c,o,e=runcmd("git reset --hard origin/master",dirname)
c,o,e=runcmd("git pull https://%s:%s@github.com/%s/%s.git %s"%(username,password,username,distrepo["repo"],distrepo["branch"]),dirname)
print "%s: Repo synched from upstream"%distrepo["repo"]
for pack,branch in packs.iteritems():
stage_path=os.path.join(dirname,"staging")
repo_path= os.path.join(stage_path,pack)
if os.path.exists(repo_path):
shutil.rmtree(repo_path,onerror=remove_readonly)
os.makedirs(repo_path)
repo_url = 'https://github.com/%s/%s.git'%(username,pack)
c,o,e=runcmd("git init",repo_path)
c,o,e=runcmd("git remote add origin %s "%repo_url,repo_path)
c,o,e=runcmd("git fetch",repo_path)
c,o,e=runcmd("git tag -l",repo_path)
c,o,e=runcmd("git show-ref --head",repo_path)
last_version="0.0.0"
last_hash=None
head_hash=None
for release in o.split("\n"):
try:
hash=release.split(" ")[0]
if "tags/" in release.split(" ")[1]:
version=release.split(" ")[1].split("/")[-1]
elif "/"+branch in release.split(" ")[1]:
head_hash=hash
continue
else:
continue
if LooseVersion(version)>LooseVersion(last_version):
last_version=version
last_hash=hash
except:
continue
if not last_version=="0.0.0":
c,o,e=runcmd("git log "+head_hash+" -n 1 --format=%at",repo_path)
head_ts=int(o)
c,o,e=runcmd("git log "+last_hash+" -n 1 --format=%at",repo_path)
last_ts=int(o)
if not last_version=="0.0.0" and head_ts>last_ts or last_version=="0.0.0":
c,o,e=runcmd("git fetch --all",repo_path)
c,o,e=runcmd("git pull https://%s:%s@github.com/%s/%s.git %s"%(username,password,username,pack,branch),repo_path)
x=open(os.path.join(repo_path,"addon.xml")).read()
new_version=LooseVersion(last_version).version
new_version[2]= str(int(new_version[2])+1)
new_version=[str(x) for x in new_version]
new_version = ".".join(new_version)
print "%s: Found new version %s since %s"%(pack,new_version,last_version)
print "Do you want to continue with the release? (y/n)"
ans=raw_input()
if not ans.lower()=="y":
sys.exit()
c,log,e=runcmd('git log --pretty=format:"%ad: %s" --date short',repo_path)
changelog=open(os.path.join(repo_path,"changelog.txt"),"w")
changelog.truncate()
changelog.write(log)
changelog.close()
addonxml = minidom.parse(os.path.join(repo_path,"addon.xml"))
addon = addonxml.getElementsByTagName("addon")
addon[0].attributes["version"].value=new_version
addonxml.writexml( open(os.path.join(repo_path,"addon.xml"), 'w'),encoding="UTF-8")
print "%s: New version bumped in addon.xml & changelog"%pack
c,o,e=runcmd("git add -A .",repo_path)
c,o,e=runcmd("git commit -m '%s Version Release'"%new_version,repo_path)
c,o,e=runcmd("git tag -a %s -m '%s Version Release'"%(new_version,new_version),repo_path)
c,o,e=runcmd("git push https://%s:%s@github.com/%s/%s.git %s"%(username,password,username,pack,branch),repo_path)
c,o,e=runcmd("git push https://%s:%s@github.com/%s/%s.git %s --tags "%(username,password,username,pack,branch),repo_path)
print "%s: Created new tag on github"%pack
##download new packet and update binaries
pack_path=os.path.join(dirname,pack)
if os.path.exists(pack_path):
shutil.rmtree(pack_path,onerror=remove_readonly)
os.makedirs(pack_path)
#urllib.urlretrieve("https://github.com/%s/%s/archive/%s.zip"%(username,pack,new_version),os.path.join(pack_path,"%s-%s.zip"%(pack,new_version)))
shutil.rmtree(os.path.join(repo_path,".git"),onerror=remove_readonly)
shutil.make_archive(os.path.join(pack_path,"%s-%s"%(pack,new_version)), 'zip', stage_path,pack)
metas=["icon.png","fanart.jpg","changelog.txt"]
for meta in metas:
if os.path.exists(os.path.join(repo_path,meta)):
if meta=="changelog.txt":
shutil.copy2(os.path.join(repo_path,meta),os.path.join(pack_path,"changelog-%s.txt"%new_version))
else:
shutil.copy2(os.path.join(repo_path,meta),os.path.join(pack_path,meta))
print "%s: New zipball created on distribution directory"%pack
##update addons.xml
create_new=True
addonsxml=minidom.parse(os.path.join(dirname,"addons.xml"))
for addontag in addonsxml.getElementsByTagName("addons")[0].getElementsByTagName("addon"):
if addontag.attributes["id"].value==pack:
create_new=False
addontag.attributes["version"].value=new_version
if create_new:
addonsxml.getElementsByTagName("addons")[0].appendChild(addon[0])
addonsxml.writexml( open(os.path.join(dirname,"addons.xml"), 'w'),encoding="UTF-8")
m = md5.new(open(os.path.join(dirname,"addons.xml")).read()).hexdigest()
open(os.path.join(dirname,"addons.xml.md5"),"wb").write(m)
print "%s: addons.xml and md5 is updated"%pack
c,o,e=runcmd("git add -A .",dirname)
c,o,e=runcmd("git commit -m '%s Version Release for %s'"%(new_version,pack),dirname)
c,o,e=runcmd("git push https://%s:%s@github.com/%s/%s.git %s"%(username,password,username,distrepo["repo"],distrepo["branch"]),dirname)
print "%s: Distribution repo updated"%pack
else:
print "%s: No new commits version:%s. Skipping"%(pack,last_version)
gitcli() | [
"huseyinbiyik@hotmail.com"
] | huseyinbiyik@hotmail.com |
f16c623f2284f4fcc342ceffbc101ff396686148 | 59b3dce3c770e70b2406cc1dd623a2b1f68b8394 | /python_1/lessons/calculations.py | 9fc441721ed85e47fac26d241c4db2cfd87301c8 | [] | no_license | patrickbeeson/python-classes | 04ed7b54fc4e1152a191eeb35d42adc214b08e39 | b5041e71badd1ca2c013828e3b2910fb02e9728f | refs/heads/master | 2020-05-20T07:17:36.693960 | 2015-01-23T14:41:46 | 2015-01-23T14:41:46 | 29,736,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | print("""---------------
Some Calculations
---------------""")
print(314159e-5)
print(10**6, 1j**2)
print(3 + 2 * 4, 1 / 3)
print("-" * 20)
print((3.14159 * 16) ** 2)
print(3.14159 * 16 ** 2)
print(20 * "-")
print("------------------\nEnd of Calculations\n--------------") | [
"patrickbeeson@gmail.com"
] | patrickbeeson@gmail.com |
8196628ce9f57044a85ddc3d821f773a8f4e9e87 | 1df91806ccab863df60c4f386fdfcae3654b5488 | /main.py | 31436318f06f60078d477d6ca797bf99452fcc48 | [] | no_license | Anurag-gg/discord-gateway | 51350e8a0f7a052ed59a6fe62b526c734eedc755 | fd8960502dd23a1bb8ebb064993a4645dd6abcfc | refs/heads/master | 2023-08-25T19:50:45.661426 | 2021-09-24T11:42:16 | 2021-09-24T11:42:16 | 408,313,939 | 2 | 0 | null | 2021-09-24T11:42:17 | 2021-09-20T04:50:38 | Python | UTF-8 | Python | false | false | 3,388 | py | import asyncio
import websockets
import json
from config import BOT_TOKEN
class Connect:
def __init__(self, token):
self.sequence = "null"
self.token = token
self.heartbeat_received = True
self.status = 'identity'
asyncio.run(self.main())
async def send_json(self, ws, message):
await ws.send(json.dumps(message))
async def rec_json(self, ws):
response = await ws.recv()
return json.loads(response)
async def send_heartbeats(self, ws, interval):
while True:
if self.heartbeat_received:
jsonPayload = {
"op": 1,
"d": self.sequence
}
await self.send_json(ws, jsonPayload)
print('HEARTBEAT SENT')
self.heartbeat_received = False
await asyncio.sleep(interval)
else:
print("no heartbeat_ack received")
ws.frames.Close(1011, reason=None)
self.status = "resume"
await self.main()
break
async def identify(self, ws):
identify_payload = {
"op": 2,
"d": {
"token": self.token,
"intents": 16383,
"properties": {
"$os": "linux",
"$browser": "my_library",
"$device": "my_library"
}
}
}
await self.send_json(ws, identify_payload)
async def resume(self, ws):
resume_payload = {
{
"op": 6,
"d": {
"token": self.token,
"session_id": self.session_id,
"seq": self.sequence
}
}
}
await self.send_json(ws, resume_payload)
async def main(self):
async with websockets.connect("wss://gateway.discord.gg/?v=9&encoding=json") as ws:
while True:
event = await self.rec_json(ws)
if event["op"] == 10:
heartbeat_interval = event["d"]["heartbeat_interval"] / 1000
print("successfully connected to gateway")
asyncio.create_task(
self.send_heartbeats(ws, heartbeat_interval))
if self.status == "identity":
await self.identify(ws)
else:
await self.resume(ws)
elif event["t"] == 'READY':
self.session_id = event['d']['session_id']
print("bot is now ready")
elif event["op"] == 11:
print('HEARTBEAT RECEIVED')
self.heartbeat_received = True
elif event["op"] == 1:
print("op code 1 received")
jsonPayload = {
"op": 1,
"d": self.sequence
}
await self.send_json(ws, jsonPayload)
elif event["op"] == 7:
print("reconnecting")
await self.resume(ws)
else:
print(event)
self.sequence = event['s']
if __name__ == "__main__":
Connect(BOT_TOKEN)
| [
"dsanurag520@gmail,com"
] | dsanurag520@gmail,com |
7e17abc002edaf1847bf7df750090433bf144d0b | f0bf7911733f0f2af23a49ac8c0da156b62add23 | /Postinstall/ubuntu-13.10-postinstall.py | 09f194a5385fcbf5a64bfed35651cc33840bcb91 | [] | no_license | MicoFive/System | 6b61b93dbf010b55fe81ca997bc2f98017ade771 | 88dee781042b39926f57090d354849195f9b5e2b | refs/heads/master | 2020-05-17T04:49:27.920818 | 2014-03-07T12:34:34 | 2014-03-07T12:34:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,702 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Mon script de post installation Ubuntu 13.10
#
# Syntax: # sudo ./ubuntupostinstall-13.10.sh
#
# Nicolargo (aka) Nicolas Hennion
# http://www.nicolargo.com
# Distributed under the GPL version 3 license
#
"""
Post installation script for Ubuntu 13.10
"""
import os
import sys
import platform
import getopt
import shutil
import logging
import getpass
import ConfigParser
# Global variables
#-----------------------------------------------------------------------------
_VERSION = "0.7.1"
_DEBUG = 1
_LOG_FILE = "/tmp/ubuntu-13.10-postinstall.log"
_CONF_FILE = "https://raw.github.com/MicoFive/System/master/mycfg64.cfg"
# System commands
#-----------------------------------------------------------------------------
_APT_ADD = "add-apt-repository -y"
_APT_INSTALL = "DEBIAN_FRONTEND=noninteractive apt-get -y -f install"
_APT_REMOVE = "DEBIAN_FRONTEND=noninteractive apt-get -y -f remove"
_APT_UPDATE = "DEBIAN_FRONTEND=noninteractive apt-get -y update"
_APT_UPGRADE = "DEBIAN_FRONTEND=noninteractive apt-get -y upgrade"
_APT_KEY = "apt-key adv --keyserver keyserver.ubuntu.com --recv-keys"
_WGET = "wget"
# Classes
#-----------------------------------------------------------------------------
class colors:
RED = '\033[91m'
GREEN = '\033[92m'
BLUE = '\033[94m'
ORANGE = '\033[93m'
NO = '\033[0m'
def disable(self):
self.RED = ''
self.GREEN = ''
self.BLUE = ''
self.ORANGE = ''
self.NO = ''
# Functions
#-----------------------------------------------------------------------------
def init():
"""
Init the script
"""
# Globals variables
global _VERSION
global _DEBUG
# Set the log configuration
logging.basicConfig( \
filename=_LOG_FILE, \
level=logging.DEBUG, \
format='%(asctime)s %(levelname)s - %(message)s', \
datefmt='%d/%m/%Y %H:%M:%S', \
)
def syntax():
"""
Print the script syntax
"""
print "Ubuntu 13.10 post installation script version %s" % _VERSION
print ""
print "Syntax: ubuntu-13.10-postinstall.py [-c cfgfile] [-h] [-v]"
print " -c cfgfile: Use the cfgfile instead of the default one"
print " -h : Print the syntax and exit"
print " -v : Print the version and exit"
print ""
print "Exemples:"
print ""
print " # ubuntu-13.10-postinstall.py"
print " > Run the script with the default configuration file"
print " %s" % _CONF_FILE
print ""
print " # ubuntu-13.10-postinstall.py -c ./myconf.cfg"
print " > Run the script with the ./myconf.cfg file"
print ""
print " # ubuntu-13.10-postinstall.py -c http://mysite.com/myconf.cfg"
print " > Run the script with the http://mysite.com/myconf.cfg configuration file"
print ""
def version():
"""
Print the script version
"""
sys.stdout.write("Script version %s" % _VERSION)
sys.stdout.write(" (running on %s %s)\n" % (platform.system(), platform.machine()))
def isroot():
"""
Check if the user is root
Return TRUE if user is root
"""
return (os.geteuid() == 0)
def showexec(description, command, exitonerror = 0, presskey = 0, waitmessage = ""):
"""
Exec a system command with a pretty status display (Running / Ok / Warning / Error)
By default (exitcode=0), the function did not exit if the command failed
"""
if _DEBUG:
logging.debug("%s" % description)
logging.debug("%s" % command)
# Wait message
if (waitmessage == ""):
waitmessage = description
# Manage very long description
if (len(waitmessage) > 65):
waitmessage = waitmessage[0:65] + "..."
if (len(description) > 65):
description = description[0:65] + "..."
# Display the command
if (presskey == 1):
status = "[ ENTER ]"
else:
status = "[Running]"
statuscolor = colors.BLUE
sys.stdout.write (colors.NO + "%s" % waitmessage + statuscolor + "%s" % status.rjust(79-len(waitmessage)) + colors.NO)
sys.stdout.flush()
# Wait keypressed (optionnal)
if (presskey == 1):
try:
input = raw_input
except:
pass
raw_input()
# Run the command
returncode = os.system ("/bin/sh -c \"%s\" >> /dev/null 2>&1" % command)
# Display the result
if ((returncode == 0) or (returncode == 25600)):
status = "[ OK ]"
statuscolor = colors.GREEN
else:
if exitonerror == 0:
status = "[Warning]"
statuscolor = colors.ORANGE
else:
status = "[ Error ]"
statuscolor = colors.RED
sys.stdout.write (colors.NO + "\r%s" % description + statuscolor + "%s\n" % status.rjust(79-len(description)) + colors.NO)
if _DEBUG:
logging.debug ("Returncode = %d" % returncode)
# Stop the program if returncode and exitonerror != 0
if ((returncode != 0) & (exitonerror != 0)):
if _DEBUG:
logging.debug ("Forced to quit")
exit(exitonerror)
def getpassword(description = ""):
"""
Read password (with confirmation)
"""
if (description != ""):
sys.stdout.write ("%s\n" % description)
password1 = getpass.getpass("Password: ");
password2 = getpass.getpass("Password (confirm): ");
if (password1 == password2):
return password1
else:
sys.stdout.write (colors.ORANGE + "[Warning] Password did not match, please try again" + colors.NO + "\n")
return getpassword()
def getstring(message = "Enter a value: "):
"""
Ask user to enter a value
"""
try:
input = raw_input
except:
pass
return raw_input(message)
def waitenterpressed(message = "Press ENTER to continue..."):
"""
Wait until ENTER is pressed
"""
try:
input = raw_input
except:
pass
raw_input(message)
return 0
def main(argv):
"""
Main function
"""
try:
opts, args = getopt.getopt(argv, "c:hv", ["config", "help", "version"])
except getopt.GetoptError:
syntax()
exit(2)
config_file = ""
config_url = _CONF_FILE
for opt, arg in opts:
if opt in ("-c", "--config"):
if arg.startswith("http://") or \
arg.startswith("https://") or \
arg.startswith("ftp://"):
config_url = arg
else:
config_file = arg
elif opt in ("-h", "--help"):
syntax()
exit()
elif opt in ('-v', "--version"):
version()
exit()
# Are your root ?
if (not isroot()):
showexec ("Script should be run as root", "tpastroot", exitonerror = 1)
# Is it saucy ?
_UBUNTU_VERSION = platform.linux_distribution()[2]
if (_UBUNTU_VERSION != "saucy"):
showexec ("Script only for Ubuntu 13.10", "tpassoussaucy", exitonerror = 1)
# Read the configuration file
if (config_file == ""):
config_file = "/tmp/ubuntu-13.10-postinstall.cfg"
showexec ("Download the configuration file", "rm -f "+config_file+" ; "+_WGET+" -O "+config_file+" "+config_url)
config = ConfigParser.RawConfigParser()
config.read(config_file)
if (config.has_section("gnome3") and config.has_section("unity")):
showexec ("Can not use both Gnome 3 and Unity, please change your .cfg file", "gnome3etunitygrosboulet", exitonerror = 1)
# Parse and exec pre-actions
for action_name, action_cmd in config.items("preactions"):
showexec ("Execute preaction "+action_name.lstrip("action_"), action_cmd)
# Parse and install repositories
pkg_list_others = {}
for item_type, item_value in config.items("repos"):
if (item_type.startswith("ppa_")):
showexec ("Install repository "+item_type.lstrip("ppa_"), _APT_ADD+" "+item_value)
elif (item_type.startswith("url_")):
showexec ("Install repository "+item_type.lstrip("url_"), _APT_ADD+" \\\"deb "+item_value+"\\\"")
elif (item_type.startswith("key_")):
showexec ("Install key for the "+item_type.lstrip("key_")+" repository", _APT_KEY+" "+item_value)
elif (item_type.startswith("pkg_")):
pkg_list_others[item_type] = item_value
# Update repos
showexec ("Update repositories", _APT_UPDATE)
# Upgrade system
showexec ("System upgrade (~20 mins, please be patient...)", _APT_UPGRADE)
# Parse and install packages
for pkg_type, pkg_list in config.items("packages"):
if (pkg_type.startswith("remove_")):
showexec ("Remove packages "+pkg_type.lstrip("remove_"), _APT_REMOVE+" "+pkg_list)
else:
showexec ("Install packages "+pkg_type, _APT_INSTALL+" "+pkg_list)
# Install packages related to repositories
#~ print pkg_list_others
for pkg in pkg_list_others.keys():
showexec ("Install packages "+pkg, _APT_INSTALL+" "+pkg_list_others[pkg])
# Allow user to read DVD (CSS)
showexec ("DVDs CSS encryption reader", "sh /usr/share/doc/libdvdread4/install-css.sh")
# Download and install dotfiles: vimrc, prompt...
if (config.has_section("dotfiles")):
# Create the bashrc.d subfolder
showexec ("Create the ~/.bashrc.d subfolder", "mkdir -p $HOME/.bashrc.d")
if (config.has_option("dotfiles", "bashrc")):
showexec ("Download bash main configuration file", _WGET+" -O $HOME/.bashrc "+config.get("dotfiles", "bashrc"))
if (config.has_option("dotfiles", "bashrc_prompt")):
showexec ("Download bash prompt configuration file", _WGET+" -O $HOME/.bashrc.d/bashrc_prompt "+config.get("dotfiles", "bashrc_prompt"))
if (config.has_option("dotfiles", "bashrc_aliases")):
showexec ("Download bash aliases configuration file", _WGET+" -O $HOME/.bashrc.d/bashrc_aliases "+config.get("dotfiles", "bashrc_aliases"))
showexec ("Install the bash configuration file", "chown -R $USERNAME:$USERNAME $HOME/.bashrc*")
# Vim
if (config.has_option("dotfiles", "vimrc")):
showexec ("Donwload the Vim configuration file", _WGET+" -O $HOME/.vimrc "+config.get("dotfiles", "vimrc"))
showexec ("Install the Vim configuration file", "chown -R $USERNAME:$USERNAME $HOME/.vimrc")
# Htop
if (config.has_option("dotfiles", "htoprc")):
showexec ("Download the Htop configuration file", _WGET+" -O $HOME/.htoprc "+config.get("dotfiles", "htoprc"))
showexec ("Install the Htop configuration file", "chown -R $USERNAME:$USERNAME $HOME/.htoprc")
# Pythonrc
if (config.has_option("dotfiles", "pythonrc")):
showexec ("Download the Pythonrc configuration file", _WGET+" -O $HOME/.pythonrc "+config.get("dotfiles", "pythonrc"))
showexec ("Install the Pythonrc configuration file", "chown -R $USERNAME:$USERNAME $HOME/.pythonrc")
# Gnome 3 configuration
if (config.has_section("gnome3")):
# Set the default theme
if (config.has_option("gnome3", "theme")):
showexec ("Set the default Gnome Shell theme to "+config.get("gnome3", "theme"), "sudo -u $USERNAME gsettings set org.gnome.desktop.interface gtk-theme "+config.get("gnome3", "theme"))
# Set the default icons
if (config.has_option("gnome3", "icons")):
showexec ("Set the default Gnome Shell icons to "+config.get("gnome3", "icons"), "sudo -u $USERNAME gsettings set org.gnome.desktop.interface icon-theme "+config.get("gnome3", "icons"))
# Set the default cursors
if (config.has_option("gnome3", "cursors")):
showexec ("Set the default Gnome Shell cursors to "+config.get("gnome3", "cursors"), "sudo -u $USERNAME gsettings set org.gnome.desktop.interface cursor-theme "+config.get("gnome3", "cursors"))
# Download and install the default Conky configuration
if (config.has_option("gnome3", "conky")):
showexec ("Download the Conky configuration file", _WGET+" -O $HOME/.conkyrc "+config.get("gnome3", "conky"))
showexec ("Install the Conky configuration file", "chown -R $USERNAME:$USERNAME $HOME/.conkyrc")
# Get the minimize/maximize button and ALT-F2 shortcut back
showexec ("Get the minimize and maximize button back in Gnome Shell", "sudo -u $USERNAME gconftool-2 -s -t string /desktop/gnome/shell/windows/button_layout \":minimize,maximize,close\"")
showexec ("Get ALT-F2 back to me", "sudo -u $USERNAME gconftool-2 --recursive-unset /apps/metacity/global_keybindings")
# Gnome Shell is the default UI
showexec ("Gnome Shell is now the default shell", "/usr/lib/lightdm/lightdm-set-defaults -s gnome-shell")
# Unity configuration
if (config.has_section("unity")):
# Set the default theme
if (config.has_option("unity", "theme")):
showexec ("Set the default Unity theme to "+config.get("unity", "theme"), "gsettings set org.gnome.desktop.interface gtk-theme "+config.get("unity", "theme"))
# Set the default icons
if (config.has_option("unity", "icons")):
showexec ("Set the default Unity icons to "+config.get("unity", "icons"), "gsettings set org.gnome.desktop.interface icon-theme "+config.get("unity", "icons"))
# Set the default cursors
if (config.has_option("unity", "cursors")):
showexec ("Set the default Unity cursors to "+config.get("unity", "cursors"), "gsettings set org.gnome.desktop.interface cursor-theme "+config.get("unity", "cursors"))
# Download and install the default Conky configuration
if (config.has_option("unity", "conky")):
showexec ("Install the Conky configuration file", _WGET+" -O $HOME/.conkyrc "+config.get("unity", "conky"))
# Unity is the default UI
showexec ("Unity is now the default shell", "/usr/lib/lightdm/lightdm-set-defaults -s unity-3d")
# Parse and exec post-actions
for action_name, action_cmd in config.items("postactions"):
showexec ("Execute postaction "+action_name.lstrip("action_"), action_cmd)
# End of the script
print("---")
print("End of the script.")
print(" - Cfg file: "+config_file)
print(" - Log file: "+_LOG_FILE)
print("")
print("Please restart your session to complete.")
print("---")
# Main program
#-----------------------------------------------------------------------------
if __name__ == "__main__":
init()
main(sys.argv[1:])
exit()
| [
"merigo4@gmail.com"
] | merigo4@gmail.com |
f45bd2b725edf19a4c9f528650707dc5900d8683 | 83959c80527cd727042bc3467b6e537fca8bef1a | /kbengine_stone_assets/scripts/common/tornado/platform/windows.py | b1d701de4fcc5ac181dde0a8d77764622db74e77 | [] | no_license | shanlihou/ttxiaoyouxi | 696697807cbf9d1fe41fb10fe64f8f29d5bd8864 | bca20863c4e1b5d6f3f835fee17c700292918a6c | refs/heads/master | 2020-04-26T13:12:13.153761 | 2019-03-03T12:36:04 | 2019-03-03T12:36:04 | 173,572,763 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 727 | py | # NOTE: win32 support is currently experimental, and not recommended
# for production use.
#from __future__ import absolute_import, division, print_function, with_statement
#import ctypes # type: ignore
#import ctypes.wintypes # type: ignore
# See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx
#SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
#SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)
#SetHandleInformation.restype = ctypes.wintypes.BOOL
#HANDLE_FLAG_INHERIT = 0x00000001
def set_close_exec(fd):
# success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 0)
# if not success:
# raise ctypes.WinError()
pass | [
"shanlihou@gmail.com"
] | shanlihou@gmail.com |
982f09e06da9b91e11bebb4ecf8d383bc704f702 | a5cf1d2fc478d490df05eb198d1a0fb77fcb0bc9 | /flask_oauthlib/contrib/client/__init__.py | 4b777b4430dcbd3daf2326ea063c4e02dca552ae | [
"BSD-3-Clause"
] | permissive | ageis/flask-oauthlib | 516df1a661441cc46c26ab5e9b07fa328066a5f4 | 9414e002505354e8b5b3aa5f54a0889c836aa732 | refs/heads/master | 2021-01-05T05:11:59.090723 | 2020-04-19T07:20:23 | 2020-04-19T07:20:23 | 240,891,932 | 1 | 0 | BSD-3-Clause | 2020-04-19T07:20:24 | 2020-02-16T12:58:27 | null | UTF-8 | Python | false | false | 3,277 | py | import copy
from flask import current_app
from werkzeug.local import LocalProxy
from .application import OAuth1Application, OAuth2Application
__all__ = ['OAuth', 'OAuth1Application', 'OAuth2Application']
class OAuth(object):
"""The extension to integrate OAuth 1.0a/2.0 to Flask applications.
oauth = OAuth(app)
or::
oauth = OAuth()
oauth.init_app(app)
"""
state_key = 'oauthlib.contrib.client'
def __init__(self, app=None):
self.remote_apps = {}
if app is not None:
self.init_app(app)
def init_app(self, app):
app.extensions = getattr(app, 'extensions', {})
app.extensions[self.state_key] = OAuthState()
def add_remote_app(self, remote_app, name=None, **kwargs):
"""Adds remote application and applies custom attributes on it.
If the application instance's name is different from the argument
provided name, or the keyword arguments is not empty, then the
application instance will not be modified but be copied as a
prototype.
:param remote_app: the remote application instance.
:type remote_app: the subclasses of :class:`BaseApplication`
:params kwargs: the overriding attributes for the application instance.
"""
if name is None:
name = remote_app.name
if name != remote_app.name or kwargs:
remote_app = copy.copy(remote_app)
remote_app.name = name
vars(remote_app).update(kwargs)
if not hasattr(remote_app, 'clients'):
remote_app.clients = cached_clients
self.remote_apps[name] = remote_app
return remote_app
def remote_app(self, name, version=None, **kwargs):
"""Creates and adds new remote application.
:param name: the remote application's name.
:param version: '1' or '2', the version code of OAuth protocol.
:param kwargs: the attributes of remote application.
"""
if version is None:
if 'request_token_url' in kwargs:
version = '1'
else:
version = '2'
if version == '1':
remote_app = OAuth1Application(name, clients=cached_clients)
elif version == '2':
remote_app = OAuth2Application(name, clients=cached_clients)
else:
raise ValueError('unkonwn version %r' % version)
return self.add_remote_app(remote_app, **kwargs)
def __getitem__(self, name):
return self.remote_apps[name]
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
app = self.remote_apps.get(key)
if app:
return app
raise AttributeError('No such app: %s' % key)
class OAuthState(object):
def __init__(self):
self.cached_clients = {}
def get_cached_clients():
"""Gets the cached clients dictionary in current context."""
if OAuth.state_key not in current_app.extensions:
raise RuntimeError('%r is not initialized.' % current_app)
state = current_app.extensions[OAuth.state_key]
return state.cached_clients
cached_clients = LocalProxy(get_cached_clients)
| [
"me@lepture.com"
] | me@lepture.com |
8113e61753b63a1adf848618b5af0bff3890f601 | eecbf2f570b46e5a890847288144f2df8097d988 | /awlsim/core/instructions/insn_zr.py | f279e701766a8124bf9e436cb8dc38b157639018 | [] | no_license | ITI/PLCNet | 8ebb34dc57862abfc3a635fb3cee197601cade71 | 7f2c1a9d3a8a0ca8d8ab9a8027c65bc0ff0db64c | refs/heads/master | 2020-06-10T00:19:14.916423 | 2016-10-01T06:53:38 | 2016-10-01T06:53:38 | 193,533,866 | 2 | 0 | null | 2019-06-24T15:42:51 | 2019-06-24T15:42:50 | null | UTF-8 | Python | false | false | 1,490 | py | # -*- coding: utf-8 -*-
#
# AWL simulator - instructions
#
# Copyright 2012-2014 Michael Buesch <m@bues.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, absolute_import, print_function, unicode_literals
from awlsim.common.compat import *
from awlsim.core.instructions.main import * #@nocy
from awlsim.core.operators import *
#from awlsim.core.instructions.main cimport * #@cy
class AwlInsn_ZR(AwlInsn): #+cdef
__slots__ = ()
def __init__(self, cpu, rawInsn):
AwlInsn.__init__(self, cpu, AwlInsn.TYPE_ZR, rawInsn)
self.assertOpCount(1)
def staticSanityChecks(self):
self.ops[0].assertType(AwlOperator.MEM_Z)
def run(self):
#@cy cdef S7StatusWord s
s = self.cpu.statusWord
self.cpu.getCounter(self.ops[0].resolve(True).value.byteOffset).run_ZR(s.VKE)
s.OR, s.NER = 0, 0
| [
"vig2208@gmail.com"
] | vig2208@gmail.com |
ddc3a481f63796292ef24894d2303cc7aa6bb7c0 | de996400d54cc2073671e2bab976534e8263bacb | /molecule-design/moldesign/sample/__init__.py | 6a4baed4f892c46a1a9b987c82b38d3e5e6e5344 | [] | no_license | tskluzac/colmena | c865e233d0f4cea20f2d3e14ef73425aee5bf78f | 042ce37e5acc8a240845b8cce11effe832c1c913 | refs/heads/master | 2022-11-28T17:52:19.819967 | 2020-08-06T19:41:49 | 2020-08-06T19:41:49 | 285,658,744 | 0 | 0 | null | 2020-08-06T19:52:04 | 2020-08-06T19:52:03 | null | UTF-8 | Python | false | false | 42 | py | """Functions for sampling new molecules""" | [
"ward.logan.t@gmail.com"
] | ward.logan.t@gmail.com |
0e296f08acd7ff148357f120355d83bb5f2da93e | 655cf8b34ba341b884efe016b475d9f3c9afa0fb | /AWS_deployment/predict.py | 0859de47b7f55cba1f7952235a1ac4ae2598d752 | [] | no_license | ruizhang84/Udacity_Deep_Learning | 9d9f845e9cb05eea3f36370397f4659615a6f4eb | 6768cbd742302869eadf30e155b4f7ffef41c3f0 | refs/heads/main | 2023-02-25T19:53:38.795753 | 2021-01-30T05:02:17 | 2021-01-30T05:02:17 | 333,578,955 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,168 | py | import argparse
import json
import os
import pickle
import sys
import sagemaker_containers
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from model import LSTMClassifier
from utils import review_to_words, convert_and_pad
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])
# Load the store model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# Load the saved word_dict.
word_dict_path = os.path.join(model_dir, 'word_dict.pkl')
with open(word_dict_path, 'rb') as f:
model.word_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def input_fn(serialized_input_data, content_type):
print('Deserializing the input data.')
if content_type == 'text/plain':
data = serialized_input_data.decode('utf-8')
return data
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
def output_fn(prediction_output, accept):
print('Serializing the generated output.')
return str(prediction_output)
def predict_fn(input_data, model):
print('Inferring sentiment of input data.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if model.word_dict is None:
raise Exception('Model has not been loaded properly, no word_dict.')
# TODO: Process input_data so that it is ready to be sent to our model.
# You should produce two variables:
# data_X - A sequence of length 500 which represents the converted review
# data_len - The length of the review
x_length = convert_and_pad(model.word_dict, review_to_words(input_data))
data_X = x_length[0]
data_len = x_length[1]
# Using data_X and data_len we construct an appropriate input tensor. Remember
# that our model expects input data of the form 'len, review[500]'.
data_pack = np.hstack((data_len, data_X))
data_pack = data_pack.reshape(1, -1)
data = torch.from_numpy(data_pack)
data = data.to(device)
# Make sure to put the model into evaluation mode
model.eval()
# TODO: Compute the result of applying the model to the input data. The variable `result` should
# be a numpy array which contains a single integer which is either 1 or 0
with torch.no_grad():
output = model(data).detach().cpu().numpy()
result = np.round(output).astype(np.int)
return result
| [
"ruizhang84.mail@gmail.com"
] | ruizhang84.mail@gmail.com |
282003de7549f1fec022667910ea65020374ae23 | 26706446b4d2b0b61fa3caf4b49b121605d22dc7 | /amazon book scraping/content/amazon_scraping/amazon_scraping/items.py | e5452cfbfec12f946210d8967d10939dec33ade4 | [] | no_license | shreenathchavan/amazon_books_scraping | 368ebecf5eea07137198bdb30e089900e99f0815 | 09c07aa846a2dace0396a87030cfcd033114c410 | refs/heads/master | 2020-08-26T11:19:06.715865 | 2019-10-23T08:24:59 | 2019-10-23T08:24:59 | 217,004,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class AmazonScrapingItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
product_name = scrapy.Field()
product_author = scrapy.Field()
product_price = scrapy.Field()
product_imagelink = scrapy.Field()
pass
| [
"noreply@github.com"
] | noreply@github.com |
100085c9ff8dfe960f7623aef842be6bb33faecb | b816109a8d8fc9c14613096179ece4e39caca26c | /demo/mysql.py | 80ccd0d77b8681e9d476676f0a4fca695e956b97 | [] | no_license | yuyiqiushui/WaiStar | 0ef16bd804ec6677c71713a876502283cfda8742 | cefa0fd706526f3785c387c10e343dc16c3d4001 | refs/heads/master | 2020-03-23T21:47:04.488667 | 2018-08-25T06:55:10 | 2018-08-25T06:55:10 | 142,132,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | import pymysql
# 打开数据库连接
db = pymysql.connect("localhost", "root", "", "cyl")
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# SQL 插入语句
sql = "SELECT * FROM EMPLOYEE \
WHERE INCOME > '%d'" % (1000)
try:
# 执行SQL语句
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
print(results)
for row in results:
fname = row[0]
lname = row[1]
age = row[2]
sex = row[3]
income = row[4]
# 打印结果
print("fname=%s,lname=%s,age=%d,sex=%s,income=%d" % \
(fname, lname, age, sex, income))
except:
print("Error: unable to fetch data")
# 关闭数据库连接
db.close() | [
"yu_qiushui@163.com"
] | yu_qiushui@163.com |
86762ac129647cb737b6d6c2cca219b9978b9803 | 57845f9780811164e16917345063485db9c2f601 | /Opinionated/Opinionated/urls.py | d61889940a3ef4e14e2b8f782895ec7c64b59bbf | [] | no_license | popsonebz/Django_Projects | 19e2f710c0d6160cff3c441f8e0f502b9a368e52 | e87b4a09d6f759a6108c96cad8e8f83dc9479e3f | refs/heads/main | 2023-06-13T03:08:41.730901 | 2021-06-27T19:26:35 | 2021-06-27T19:26:35 | 382,118,013 | 0 | 0 | null | 2021-07-01T18:06:35 | 2021-07-01T18:06:34 | null | UTF-8 | Python | false | false | 1,074 | py | """Opinionated URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import include
from django.contrib.auth import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('Blog.urls')),
path('accounts/login/', views.LoginView.as_view(template_name='registration/login.html'), name='login'),
path('accounts/logout/', views.LogoutView.as_view(), name='logout', kwargs={'next_page': '/'}),
]
| [
"odedereoluwapelumi13@gmail.com"
] | odedereoluwapelumi13@gmail.com |
f597975a435f3e4d97e6bbccfe4d07f900f11b58 | c36edf21f868315e1a1f63cf8a88398c926b3a8b | /jianzhioffer2/35_复杂链表的复制.py | bcae997330612f3f1daef99281f28dc6d597b8a6 | [] | no_license | kcmao/leetcode_exercise | 85037de7b148b7dc162c52c672510d260dfe3574 | e52e0e18b7cef923a5f0eb000968f4234199978f | refs/heads/master | 2020-04-11T01:57:50.254666 | 2019-08-04T13:02:28 | 2019-08-04T13:02:28 | 161,431,354 | 1 | 0 | null | 2019-05-27T14:35:49 | 2018-12-12T04:10:37 | Python | UTF-8 | Python | false | false | 1,874 | py | # -*- coding: utf-8 -*-
class ComplexNode(object):
def __init__(self, value, next=None, sibling=None):
self.value = value
self.next = next
self.sibling = sibling
def clone_complex_link(head):
insert_next(head)
connect_sibling_nodes(head)
return reconnect_node(head)
#链表后面添加节点
def insert_next(head):
node = head
while node:
clone_node = ComplexNode(node.value+"'")
clone_node.next = node.next
node.next = clone_node
node = clone_node.next
#连接上sibling节点
def connect_sibling_nodes(head):
node = head
while node : #注意
clone_node = node.next
if node.sibling:
clone_node.sibling = node.sibling.next
node = clone_node.next
def reconnect_node(head):
node = head
clone_node = head.next
clone_head = clone_node
node.next = clone_node.next
node = clone_node.next
while node:
clone_node.next = node.next
clone_node = node.next
node.next = clone_node.next
node = clone_node.next
return clone_head
def display(head):
if not isinstance(head, ComplexNode):
return
p = head
while p:
print(p.value, end=" ")
if p.next:
print(p.next.value, end=" ")
if p.sibling:
print(p.sibling.value, end=" ")
print()
p = p.next
if __name__ == "__main__":
node5 = ComplexNode('E')
node4 = ComplexNode('D', next=node5)
node3 = ComplexNode('C', next=node4)
node2 = ComplexNode('B', next=node3)
node1 = ComplexNode('A', next=node2)
node1.sibling = node3
node2.sibling = node5
node4.sibling = node2
head = node1
display(head)
print("haha")
clone_head = clone_complex_link(head)
display(head)
print("haha")
display(clone_head)
| [
"kc_mao@qq.com"
] | kc_mao@qq.com |
563b81ef58b37b716b6b4764e3162f6d22c8d652 | 864713da7d1f08836df68c97605fa5a006973661 | /syncer.py | dfa86b38cfa6ace20786f593ca946d2b5ba6476b | [] | no_license | svl7/mao-orchestrator | dab7151c2663aff228b41c38c9760ef88bce5a78 | 74c07e532a0edc33b19fd99bd9f693e3f0e936e1 | refs/heads/master | 2022-11-09T19:57:28.685028 | 2020-06-18T13:04:26 | 2020-06-18T13:04:26 | 273,239,229 | 0 | 0 | null | 2020-06-18T13:00:01 | 2020-06-18T13:00:00 | null | UTF-8 | Python | false | false | 5,057 | py | import git
import datetime
import configparser
import requests
import json
import schedule
import shutil
import base64
import glob
from datetime import datetime
import audit
import logging
from etcd_client import write, get
logging.basicConfig(level=logging.DEBUG)
config = configparser.ConfigParser()
config.read('config.ini')
importdir = config['WORKING_ENVIRONMENT']['IMPORTDIR']
if importdir.endswith('/'):
importdir = importdir[:-1]
def list_jobs():
return schedule.list_jobs()
def remove_job(id):
return schedule.delete_job(id)
def sync(data):
# Use new scheduler
response = {}
command = []
env = {}
renku = False
blob = get('tools/{}'.format(data['name']))
payload = json.loads(blob)
tool = payload['image']
print("Tool invoked: " + tool)
response['tool'] = tool
dataset = importdir + "/" + data['name']
print("Data directory: " + dataset)
response['datadir'] = dataset
if 'env' in data:
env = data['env']
if 'command' in data:
command = data['command']
if 'renku' in data:
renku = True
payload['data_repo'] = data['renku']
# Check if dataset has been cloned already
if not config.has_option('DATA_REPOS', data['name']):
# Clone dataset
print("Cloning dataset from: " + payload['data_repo'] + " to: " + dataset)
response['dataset'] = payload['data_repo']
try:
git.Repo.clone_from(payload['data_repo'], dataset)
print("Updating config")
if not config.has_section('DATA_REPOS'):
config.add_section('DATA_REPOS')
config.set('DATA_REPOS', data['name'], dataset)
with open('config.ini', 'w') as f:
config.write(f)
except:
print("Error cloning data")
if data['cron']:
freq = data['frequency']
json_out = {
"container": tool,
"tool": data['name'],
"dataset": dataset,
"cron": data['cron'],
"freq": freq,
"command": command,
"env": env,
"renku": renku
}
else:
json_out = {
"container": tool,
"tool": data['name'],
"dataset": dataset,
"cron": data['cron'],
"command": command,
"env": env,
"renku": renku
}
print("Message to scheduler: " + json.dumps(json_out))
response['message'] = json.dumps(json_out)
response['scheduler_output'] = schedule.schedule_run(json_out)
return response
def list_local():
response = {}
for entry in config['DATA_REPOS']:
response[entry] = config['DATA_REPOS'][entry]
return response
def remove_local(name):
shutil.rmtree(config['DATA_REPOS'][name])
config.remove_option('DATA_REPOS',name)
with open('config.ini', 'w') as f:
config.write(f)
return "Deleted {} from local filesystem".format(name)
def retrieve(name):
try:
value = get("/data/" + name)
except:
print("No such entry")
return "This name does not correspond to an entry"
try:
git.Repo.clone_from(value, importdir + "/" + name)
if not config.has_option('DATA_REPOS', name):
print("Updating config")
logging.info("updating config")
if not config.has_section('DATA_REPOS'):
config.add_section('DATA_REPOS')
config.set('DATA_REPOS', name, importdir + "/" + name)
with open('config.ini', 'w') as f:
config.write(f)
return "Cloned: {} to {}".format(value, importdir + "/" + name)
except:
print("Error cloning data, trying to pull")
logging.warning("Error cloning data, trying to pull")
try:
repo = git.Repo(importdir + "/" + name)
o = repo.remotes.origin
o.pull()
if not config.has_option('DATA_REPOS', name):
print("Updating config")
if not config.has_section('DATA_REPOS'):
config.add_section('DATA_REPOS')
config.set('DATA_REPOS', name, importdir + "/" + name)
with open('config.ini', 'w') as f:
config.write(f)
return "Pulled: {} to {}".format(value, importdir + "/" + name)
except:
print("Error pulling data.")
logging.error("Error retrieving data.")
return "Error pulling data."
def create_audit(tool):
# Creation of audit entry
issuer = config['WORKING_ENVIRONMENT']['user']
timestamp = datetime.now()
audit_id = timestamp.microsecond
write("audit/{}".format(audit_id), '{{"issuer":"{}",\
"tool":"{}",\
"timestamp":"{}"}}'.format(issuer, tool, timestamp))
# Send file if exists
if config.has_option('DATA_REPOS', tool):
filename = audit.submit(tool, audit_id, issuer)
return "Created audit {} and submitted file {}".format(audit_id, filename)
else:
return "Created audit {}. No local file to submit.".format(audit_id)
| [
"pang@zhaw.ch"
] | pang@zhaw.ch |
1bc9d03fd65b59a18d155611ec479ee53115593a | dca7588fbabca0d0e52dd4e5a29d5720aa8253a7 | /app_users/views.py | a2ee2e5dd3396feed65bdf85a5197f119b90033f | [] | no_license | raiatul14/cms | 00c93b02774f30ba6d7725527ee83608c9c2d996 | 5597ebac7bdf478865bae5351f8e47df67ed934e | refs/heads/master | 2022-12-19T19:16:17.960711 | 2020-10-13T04:32:52 | 2020-10-13T04:32:52 | 303,295,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | from django.shortcuts import render
# Create your views here.
from .views import * | [
"atul.rai@routemobile.com"
] | atul.rai@routemobile.com |
787c24412b397526436422830135624073c3a5f3 | 6c6b8b967bf1690d815ba8223a15abf3f8862fd7 | /Generation/GAN/DCGAN/Predictor.py | 37067cf92a23c6b09ca365927a39597f0b4d9ce3 | [] | no_license | ihatasi/Learning | 82450c1d05493e83409265bd28ab881dbc035222 | 89c03dcce586d403aee1e0f7c36fadbce1c8c881 | refs/heads/master | 2020-04-29T22:54:23.803216 | 2019-11-27T10:25:40 | 2019-11-27T10:25:40 | 176,462,219 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,461 | py | #!/usr/bin/python3
import chainer, argparse, os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from chainer import Variable
parser = argparse.ArgumentParser(description="DCGAN")
parser.add_argument("--n_dimz", "-z", type=int, default=100)
parser.add_argument("--dataset", "-ds", type=str, default="cifar10")
parser.add_argument("--seed", type=int, default=0)
args = parser.parse_args()
if args.dataset == "cifar10":
import Network.cifar10_net as Network
else:
import Network.mnist_net as Network
gen = Network.Generator(n_hidden=args.n_dimz)
gen.to_cpu()
load_path = 'result/{}/gen_epoch_100.npz'.format(args.dataset)
chainer.serializers.load_npz(load_path, gen)
np.random.seed(args.seed)
dtype = chainer.get_dtype()
hidden = np.random.uniform(-1, 1, (1, args.n_dimz, 1, 1)).astype(dtype)
z = Variable(np.asarray(hidden))
with chainer.using_config('train', False):
x = gen(z)
np.random.seed()
x = np.asarray(np.clip(x.data * 255, 0.0, 255.0), dtype=np.uint8)
_, _, H, W = x.shape
if args.dataset == "mnist":
x = x.reshape((1, 1, 1, H, W))
x = x.transpose(0, 3, 1, 4, 2)
x = x.reshape((1 * H, 1 * W))
else:
x = x.reshape((1, 1, 3, H, W))
x = x.transpose(0, 3, 1, 4, 2)
x = x.reshape((1 * H, 1 * W, 3))
preview_dir = './'
preview_path = preview_dir +\
'test.png'
if not os.path.exists(preview_dir):
os.makedirs(preview_dir)
Image.fromarray(x).save(preview_path)
plt.imshow(x)
plt.show()
| [
"ihatasi@gmail.com"
] | ihatasi@gmail.com |
28f96418834a90cd0b5b509bc776c9f3b0b639c5 | c49193f4ceca8d3a98ceda70549beed3ad23c87e | /dir.py | 3ac16b286eadcf03d898e7091b50677f787c2d1a | [] | no_license | myousefnezhad/easyfmri-old | bec95d2879513f55d663d9afe0bf73fe4c832c46 | 1ef75d84e57649797faad13b9f2a2770a33ac120 | refs/heads/master | 2022-05-28T08:09:17.542375 | 2018-04-10T10:09:53 | 2018-04-10T10:09:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | import os
def getDIR():
return os.path.dirname(os.path.abspath(__file__)) | [
"myousefnezhad@outlook.com"
] | myousefnezhad@outlook.com |
b344b015fd8b1fc9a5d0dde7daab89f194665ebf | bb828b46527bf15011315b7d67d8b20b05f8858c | /innovate/migrations/0003_auto_20210211_0122.py | 4ceb135c5353ec0162fc90772b8faacbc2a0c8d4 | [] | no_license | Rushilwiz/launchx | 7b334da60eab38ade5db8a9868caf62f68e207f5 | 85821a6fcd91c09c1579787f31a388119ae237c3 | refs/heads/master | 2023-03-08T19:55:15.080057 | 2021-02-21T01:27:25 | 2021-02-21T01:27:25 | 334,251,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | # Generated by Django 3.1.6 on 2021-02-11 01:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('innovate', '0002_auto_20210208_1549'),
]
operations = [
migrations.AddField(
model_name='competitor',
name='county',
field=models.CharField(blank=True, default='', max_length=20),
),
migrations.AddField(
model_name='competitor',
name='school',
field=models.CharField(blank=True, default='', max_length=20),
),
]
| [
"rushilwiz@gmail.com"
] | rushilwiz@gmail.com |
a4c56e977fcf8aa0aa8b1d5700eac711f0e99616 | e1ffebca6a0f185663c779462e3ca27866f557b8 | /GROUP_project/project/api/migrations/0002_auto_20191204_0429.py | 9dcf91bc2afb5abac10e0bf7a31e18ff8156c88e | [] | no_license | asselyer/Backend2019 | d8d85d7850261880fe4aeef9092b0a8c7b1b6767 | ec5931e2bd22ec62e68592a4199c00184f4dacc3 | refs/heads/master | 2020-07-24T13:38:21.246351 | 2019-12-04T03:16:27 | 2019-12-04T03:16:27 | 207,944,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,680 | py | # Generated by Django 2.2.3 on 2019-12-03 22:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='postfile',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='postcomment',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='favoritepost',
name='users',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='blog',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='blogs', to='api.BlogCategory'),
),
migrations.AddField(
model_name='blog',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_blogs', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='postfile',
name='posts',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='post_documents', to='api.Post'),
),
migrations.AddField(
model_name='postcomment',
name='posts',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='post_comments', to='api.Post'),
),
migrations.AddField(
model_name='post',
name='blog',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='posts', to='api.Blog'),
),
migrations.AddField(
model_name='post',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_posts', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='favoritepost',
name='posts',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='likes', to='api.Post'),
),
]
| [
"asel.yer98@gmail.com"
] | asel.yer98@gmail.com |
d50db3f0895bfcfe6b6a9eb5f62c99302983871e | a29c6e83ae4f9010941d15c8fd4cfc67680bb054 | /pandas/pandas_sample.py | 11ec8f3f3066928a773948c21c7d305883c6a906 | [] | no_license | ym0179/bit_seoul | f1ff5faf4ae20fbc8c0e2ed10a005f8bd4b2c2b8 | 14d1fb2752312790c39898fc53a45c1cf427a4d1 | refs/heads/master | 2023-02-27T19:52:23.577540 | 2021-02-08T00:30:16 | 2021-02-08T00:30:16 | 311,265,926 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,632 | py | #Day9
#2020-11-19
import pandas as pd
import numpy as np
from numpy.random import randn
np.random.seed(100)
data = randn(5,4) #5행 4열
print(data)
df = pd.DataFrame(data, index='A B C D E'.split(),
columns='가 나 다 라'.split())
print(df)
data2 = [[1,2,3,4,], [5,6,7,8], [9,10,11,12],
[13,14,15,16], [17,18,19,20]] #list
df2 = pd.DataFrame(data2, index=['A','B','C','D','E'],
columns=['가','나','다','라'])
print(df2)
# 가 나 다 라
# A 1 2 3 4
# B 5 6 7 8
# C 9 10 11 12
# D 13 14 15 16
# E 17 18 19 20
df3 = pd.DataFrame(np.array([[1,2,3],[4,5,6]]))
print(df3)
print("df2['나'] :\n",df2['나']) #2,6,10,14,18
print("df2['나','라'] :\n",df2[['나','라']]) #2,6,10,14,18
#4,8,12,16,20
# print("df2[0] : ", df2[0]) #에러, 컬럼명으로 해줘야 에러 안남
# print("df2.loc['나'] : \n", df2.loc['나']) #에러, loc 행에서만 사용 가능 (행과 함께 사용)
print("df2.iloc[:,2] : \n", df2.iloc[:, 2]) #3,7,11,15,19
# print("df2[:,2] : \n", df2[:, 2]) #에러
#행
print("df2.loc['A'] : \n", df2.loc['A']) #A행 출력
print("df2.loc['A','C'] : \n", df2.loc[['A','C']]) #A, C행 출력
print("df2.iloc[0] : \n", df2.iloc[0]) #A행 출력
print("df2.iloc[0,1] : \n", df2.iloc[[0,2]]) #A, C행 출력
#행렬
print("df2.loc[['A','B'], ['나','다']] : \n",df2.loc[['A','B'], ['나','다']])
#한개의 값만 확인
print("df2.loc['E','다'] : \n",df2.loc['E','다']) #19
print("df2.iloc[4,2] : \n",df2.iloc[4,2]) #19
print("df2.iloc[4][2] : \n",df2.iloc[4][2]) #19
| [
"ym4766@gmail.com"
] | ym4766@gmail.com |
bf56ed2037a8d92ae1cd83b1ca14a15536c85df2 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2385/60677/251112.py | a199b651fd4db5634ac382da13966aee61e6f9bc | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | times=int(input())
for i in range(times):
n=int(input())
k=1
answer=1
while n-2*k+2>0:
answer+=n-2*k+2
k+=1
if n==4:
answer=8
print((answer)%(10**9+7)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
98cd32da38e634d818253e5c656b496ead30b1f9 | 9fa1e85560401833143ce6e6f5867964e96cf070 | /fastapi_websocket/main.py | 0135ce530487b61eb699425ff410068cec8c2c60 | [] | no_license | red-fox-yj/Clock-In-Server | 50c11e6737d7fd0974e4fd39ec0c3556a801cc4d | 80dc82e6d3f85d3d98f34047ae3544866aa2dab1 | refs/heads/master | 2023-03-05T15:00:56.811226 | 2021-02-21T03:18:14 | 2021-02-21T03:18:14 | 334,340,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,388 | py | from fastapi import FastAPI, WebSocket
from fastapi.responses import HTMLResponse
import uvicorn
import ast
from distribution import _search, _modify, _read
app = FastAPI()
html = """
<!DOCTYPE html>
<html>
<head>
<title>Chat</title>
</head>
<body>
<h1>WebSocket Chat</h1>
<form action="" onsubmit="sendMessage(event)">
<input type="text" id="messageText" autocomplete="off"/>
<button>Send</button>
</form>
<ul id='messages'>
</ul>
<script>
var ws = new WebSocket("ws://192.168.0.74:8000/ws");
ws.onmessage = function(event) {
var messages = document.getElementById('messages')
var message = document.createElement('li')
var content = document.createTextNode(event.data)
message.appendChild(content)
messages.appendChild(message)
};
function sendMessage(event) {
var input = document.getElementById("messageText")
ws.send(input.value)
input.value = ''
event.preventDefault()
}
</script>
</body>
</html>
"""
@app.get("/")
async def get():
return HTMLResponse("welcome")
@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
while True:
data = await websocket.receive()
await websocket.send_json(_resolve_response(data["text"]))
# await websocket.send_text(data["text"])
# await websocket.send_text(_resolve_response(str(data)[39:-2]))
# await websocket.send_text(_resolve_response(str(data)))
def _resolve_response(response: str):
"""分发消息"""
response = list(response)
while "\n" in response:
response.remove("\n")
response = "".join(response)
# 字符串转json
response = ast.literal_eval(response)
if response["type"] == "search":
temp = {"type": "search", "data": _search(response["data"])}
elif response["type"] == "read":
temp = {"type": "read", "data": _read()}
else:
_modify(response["data"])
temp = {"type": "modify", "data": "修改成功"}
return temp
uvicorn.run(app, host="0.0.0.0", port=8000)
| [
"redfox@chancefocus.com"
] | redfox@chancefocus.com |
163bceb89be6c5814429028d9689ebf667748722 | 169776e1b0ce571a4083b650dde4637d11767084 | /challenge_3/outings.py | 8dbe59d89ab7f7e9ee3d249267e82fbe90c8d6f5 | [] | no_license | Nesken-weche/Gold_challenges | 1a97c0e20b87bde5f6057f82df5c50289ed65410 | 155bad1560db0a0fe9536338f8c09e9fdd704324 | refs/heads/master | 2020-04-24T16:32:24.664326 | 2019-02-22T18:37:31 | 2019-02-22T18:37:31 | 172,111,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py |
class Outing:
def __init__(self, eventType, attendees, date, total_cost_per, total_cost_event):
self.eventType = eventType
self.attendees = attendees
self.date = date
self.total_cost_per = total_cost_per
self.total_cost_event = total_cost_event
def __repr__(self):
return f"{self.eventType}- {self.attendees}- {self.date}- {self.total_cost_per}- {self.total_cost_event}"
if __name__ == "__main__":
name = Outing('Golf', '150', 'month', '$15', '$2000')
print(name)
| [
"wnesken@gmail.com"
] | wnesken@gmail.com |
2539ab608f51a1b8084f54a8df8cc8edca59a28f | d684ac6e0adfd85851c05bb9dac484bfb4d50eb9 | /lab9/DNS_AdrianArdizza_2006524896_C_lab9__/AdrianArdizza_C_fantasi_terakhir.py | cb0c8182697c1bb2668d332007778f25bedb8b67 | [] | no_license | Meta502/lab-ddp-1 | 743dc8f8f72ae52a505788c80fe91d28c74c681d | fc04cfc1a7e85b32eee36f1158d94fce9c9a0251 | refs/heads/master | 2023-02-08T13:23:33.128796 | 2020-12-30T19:03:10 | 2020-12-30T19:03:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,999 | py | class Hero():
def __init__(self, name, hp, attack):
"""
Constructor untuk class Hero, akan dipanggil saat inisiasi objek dan saat dipanggil dengan super().__init__
"""
self.__name = name
self.__attack = int(attack)
self.__hp = int(hp)
# Getter
def get_name(self) -> str:
"""
Getter untuk atribut name, akan return atribut nama dari Hero.
"""
return self.__name
def get_hp(self) -> int:
"""
Getter untuk atribut hp, akan return atribut HP dari Hero
"""
return self.__hp
def get_attack(self) -> int:
"""
Getter untuk atribut attack, akan return atribut attack dari HP hero
"""
return self.__attack
# Setter
def set_hp(self, hp) -> int:
"""
Setter untuk atribut hp, apabila parameter hp positif maka dia akan set atribut hp menjadi parameter, kalau tidak negatif atau 0
akan set atribut hp menjadi 0.
"""
if hp > 0:
self.__hp = hp
else:
self.__hp = 0
def attack(self, other) -> None:
"""
Method untuk menyerang lawan, base attack function memanggil method damaged di class other.
"""
other.damaged(self.get_attack())
def damaged(self, attack) -> None:
"""
Method untuk mengubah status apabila diserang lawan, set hp menjadi value hp sekarang dikurang attack yang dialami.
"""
self.set_hp(self.get_hp() - attack)
def is_alive(self) -> bool:
"""
Method untuk mengecek apakah karakter masih hidup atau sudah mati.
"""
if self.get_hp() == 0:
return False
else:
return True
def __str__(self) -> str:
"""
Method untuk mengembalikan representasi String
"""
return f"{self.get_name():20s}| {'HERO':10s}| {self.get_hp():5d}"
class Support(Hero):
def __init__(self, name, hp, attack, atribut_khusus=20):
super().__init__(name, hp, attack) # Memanggil constructor object dari parent.
self.__heal_amount = int(atribut_khusus)
def get_heal_amount(self) -> int:
"""
Getter untuk atribut heal_amount, akan return atribut heal amount dari class Support
"""
return self.__heal_amount
def heal(self, other) -> None:
"""
Method untuk menambah hp hero lain, akan inkremen nilai hp dari karakter lain sebanyak atribut heal amount
dari Healer
"""
if other != self:
other.set_hp(other.get_hp() + self.get_heal_amount())
def __str__(self) -> str:
"""
Method untuk mengembalikan representasi String
"""
return f"{self.get_name():20s}| {'Support':10s}| {self.get_hp():5d}"
def __add__(self, other):
"""
Method untuk melakukan operator overloading (+)
"""
return Support(f"{self.get_name()}_{other.get_name()}", self.get_hp() + other.get_hp(), self.get_attack() + other.get_attack(), self.get_heal_amount() + other.get_heal_amount())
class Warrior(Hero):
def __init__(self, name, hp, attack, atribut_khusus=20):
# HINT: gunakan method super() & gunakan default parameter
super().__init__(name, hp, attack)
self.__extra_attack = atribut_khusus
def get_extra_attack(self) -> int:
"""
Getter untuk atribut extra_attack akan return atribut extra_attack dari Warrior.
"""
return self.__extra_attack
def attack(self, other) -> None:
"""
Method untuk menyerang lawan, untuk Warrior akan mengurangi sebanyak atribut attack Warrior ditambah dengan atribut extra_attacknya.
"""
other.damaged(self.get_attack() + self.get_extra_attack())
def __str__(self) -> str:
"""
Method untuk mengembalikan representasi String
"""
return f"{self.get_name():20s}| {'Warrior':10s}| {self.get_hp():5d}"
def __add__(self, other):
"""
Method untuk melakukan operator overloading (+)
"""
if type(self) == type(other):
return Warrior(f"{self.get_name()}_{other.get_name()}", self.get_hp() + other.get_hp(), self.get_attack() + other.get_attack(), self.get_extra_attack() + other.get_extra_attack())
class Tank(Hero):
def __init__(self, name, hp, attack, atribut_khusus=20):
# HINT: gunakan method super() & gunakan default parameter
super().__init__(name, hp, attack)
self.__shield = int(atribut_khusus)
def get_shield(self) -> int:
"""
Getter untuk atribut shield
"""
return self.__shield
def set_shield(self, shield) -> None:
"""
Setter untuk atribut shield
"""
self.__shield = int(shield)
def damaged(self, attack) -> None:
"""
Method untuk mengubah status apabila diserang lawan, untuk shield, attack akan mengurangi value dari shield terlebih dahulu
sebelum HP, sehingga kondisi yang harus di cek adalah apakah attack lebih besar sama dengan shield atau tidak.
"""
if self.get_shield() >= attack:
self.set_shield(self.get_shield() - attack)
else:
self.set_hp(self.get_hp() + self.get_shield() - attack)
self.set_shield(0)
def __str__(self) -> str:
"""
Method untuk mengembalikan representasi String
"""
return f"{self.get_name():20s}| {'Tank':10s}| {self.get_hp():5d}"
def __add__(self, other):
"""
Method untuk melakukan operator overloading (+)
"""
if type(self) == type(other):
return Tank(f"{self.get_name()}_{other.get_name()}", self.get_hp() + other.get_hp(), self.get_attack() + other.get_attack(), self.get_shield() + other.get_shield())
# NOTE: method main() & get_hero() tidak perlu diubah
def get_hero(name, list_hero):
"""
Method untuk mengembalikan hero dengan name sesuai parameter
"""
for hero in list_hero:
if hero.get_name() == name:
return hero
def main():
list_hero = []
banyak_hero = int(input("Masukkan jumlah hero : "))
for i in range(banyak_hero):
input_hero = input("Masukkan detail hero : ")
detail_hero = input_hero.split()
tipe = detail_hero[0]
nama = detail_hero[1]
hp = int(detail_hero[2])
attack = int(detail_hero[3])
atribut_tambahan = detail_hero[4]
if tipe == "SUPPORT":
if atribut_tambahan != "DEFAULT":
list_hero.append(
Support(nama, hp, attack, int(atribut_tambahan)))
else:
list_hero.append(Support(nama, hp, attack))
elif tipe == "WARRIOR":
if atribut_tambahan != "DEFAULT":
list_hero.append(
Warrior(nama, hp, attack, int(atribut_tambahan)))
else:
list_hero.append(Warrior(nama, hp, attack))
elif tipe == "TANK":
if atribut_tambahan != "DEFAULT":
list_hero.append(Tank(nama, hp, attack, int(atribut_tambahan)))
else:
list_hero.append(Tank(nama, hp, attack))
perintah = input("Masukkan perintah : ")
list_perintah = perintah.split()
while list_perintah[0] != "EXIT":
if list_perintah[0] == "ATTACK":
karakter1 = get_hero(list_perintah[1], list_hero)
karakter2 = get_hero(list_perintah[2], list_hero)
if (karakter1 != None and karakter2 != None):
karakter1.attack(karakter2)
if not karakter2.is_alive():
list_hero.remove(karakter2)
print(f"{karakter1.get_name()} berhasil menyerang {karakter2.get_name()}")
print(f"Nyawa {karakter2.get_name()} tersisa {karakter2.get_hp()}")
else:
print("Karakter tidak ditemukan")
elif list_perintah[0] == "HEAL":
karakter1 = get_hero(list_perintah[1], list_hero)
karakter2 = get_hero(list_perintah[2], list_hero)
if (karakter1 != None and karakter2 != None):
if isinstance(karakter1, Support):
if karakter1 != karakter2:
karakter1.heal(karakter2)
print(f"{karakter1.get_name()} berhasil meng-heal {karakter2.get_name()}")
print(f"Nyawa {karakter2.get_name()} menjadi {karakter2.get_hp()}")
else:
print(f"{karakter1.get_name()} tidak dapat meng-heal dirinya sendiri")
else:
print(f"{karakter1.get_name()} bukan merupakan Support")
else:
print("Karakter tidak ditemukan")
elif list_perintah[0] == "GABUNGKAN":
karakter1 = get_hero(list_perintah[1], list_hero)
karakter2 = get_hero(list_perintah[2], list_hero)
if type(karakter1) == type(karakter2):
if (karakter1 != None and karakter2 != None):
combined_hero = karakter1 + karakter2
print(f"{karakter1.get_name()} berhasil bergabung dengan {karakter2.get_name()}", end=" ")
print(f"menjadi {combined_hero.get_name()}")
list_hero.remove(karakter1)
list_hero.remove(karakter2)
list_hero.append(combined_hero)
else:
print("Karakter tidak ditemukan")
else:
print("Gagal menggabungkan karena tipe kedua karakter berbeda")
perintah = input("Masukkan perintah : ")
list_perintah = perintah.split()
print("\nKarakter yang masih hidup:")
print("-"*40)
print("Nama | Tipe | HP ")
print("-"*40)
for hero in list_hero:
print(hero)
if __name__ == "__main__":
main()
| [
"adrian.ardizza@gmail.com"
] | adrian.ardizza@gmail.com |
2e67dafe7fac1cbbc5927705e346ad37a6ed6c89 | fcde32709c62b8ee86da459bb7c8eee52c848118 | /爬虫1903/day09/Baidu/Baidu/settings.py | 6b94193878c3f25ccff9e68ecba1f7857d9f4e73 | [] | no_license | klaus2015/py_base | 6b92d362c3d7dc0e09205a037f4d580381dac94d | ec32c731c1c2f6a0dab87f1d167397e4fa86b8de | refs/heads/master | 2022-07-28T15:49:30.383648 | 2020-05-11T15:31:43 | 2020-05-11T15:31:43 | 261,777,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,181 | py | # -*- coding: utf-8 -*-
# Scrapy settings for Baidu project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Baidu'
SPIDER_MODULES = ['Baidu.spiders']
NEWSPIDER_MODULE = 'Baidu.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'Baidu (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 20
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'Baidu.middlewares.BaiduSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'Baidu.middlewares.BaiduDownloaderMiddleware': 543,
'Baidu.middlewares.RandomUserAgentDownloaderMiddleware':200,
'Baidu.middlewares.RandomProxyDownloaderMiddleware':250,
}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'Baidu.pipelines.BaiduPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"598467866@qq.com"
] | 598467866@qq.com |
2f33946708e96988142b72cc588f156227951dad | 82aef51947f319caaae43f963139c5c73c80ad3c | /venv/bin/pip | eaf2a486ba5d3ba7b3148524cbdfcbe3888a7809 | [] | no_license | 15PFIEV-Mobile/Back_End | fe61c0fec2da03c2e986200dfa7a27661cad9030 | b6079d835c2ade86af4451c25693607cad01f70a | refs/heads/master | 2020-09-03T09:32:18.265657 | 2019-11-04T07:21:01 | 2019-11-04T07:21:01 | 219,436,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | #!/mnt/c/Work/MobileProgramProject/Back_End/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"tranhuuminhdung97@gmail.com"
] | tranhuuminhdung97@gmail.com | |
16db3ba118bc263f264b8f33598c0d0ecd59ba1a | b2805a484e5af57aaee093be1c22895621cefcce | /appsrv/xchgb/aes.py | a14400c549450725e5d23aee856e16e7eeb79d51 | [
"Unlicense"
] | permissive | jzvelc/xchgb | 2cb30a9dd9eb09f34e3ec1781af417e0bf0bac94 | 5a2e7ef7ef7686c4ba06127fa8022beb9fb54a34 | refs/heads/master | 2021-05-26T18:10:00.159785 | 2011-09-22T07:05:20 | 2011-09-22T07:05:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | import base64, random, string
from django.conf import settings
from Crypto.Cipher import AES
# the block size for the cipher object; must be 16, 24, or 32 for AES
BLOCK_SIZE = 32
# the character used for padding--with a block cipher such as AES, the value
# you encrypt must be a multiple of BLOCK_SIZE in length. This character is
# used to ensure that your value is always a multiple of BLOCK_SIZE
PADDING = '{'
# one-liner to sufficiently pad the text to be encrypted
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
# one-liners to encrypt/encode and decrypt/decode a string
# encrypt with AES, encode with base64
EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
def encrypt(s, k=settings.SECRET_KEY):
iv = "".join(random.sample(string.letters + string.digits, 16))
cipher = AES.new(k[:BLOCK_SIZE], AES.MODE_CFB, iv)
return iv, EncodeAES(cipher, s)
def decrypt(s, iv, k=settings.SECRET_KEY):
cipher = AES.new(k[:BLOCK_SIZE], AES.MODE_CFB, iv)
return DecodeAES(cipher, s) | [
"greg@ghughes.com"
] | greg@ghughes.com |
8edf96788c2caa2a1890ea500b31a820d27944dc | baf9b3674cedea6ebf75f5b0f3618528bf385bb3 | /basket/migrations/0002_auto_20210722_1001.py | 45fd523d60e3e91b853eacea86fd7423613b58a6 | [] | no_license | ElfKnell/spiridonis | 3377f73a23802017e3f97f4601bc3f8541c5cb0b | 95cb29f02ec9d0745b68520e520f80a45a618dca | refs/heads/master | 2023-07-06T15:39:09.985267 | 2021-08-17T11:59:12 | 2021-08-17T11:59:12 | 385,142,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | # Generated by Django 3.2.4 on 2021-07-22 07:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basket', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='basket',
name='summa',
),
migrations.AlterField(
model_name='basket',
name='count',
field=models.IntegerField(default=1, verbose_name='Count'),
),
migrations.AlterField(
model_name='basket',
name='is_confirmation',
field=models.BooleanField(default=False),
),
]
| [
"akyrych84@gmail.com"
] | akyrych84@gmail.com |
91079f8f88ab0a1167f775522a0138301831fe3b | 7ee7c0c67ffbae807005ed6b9f20f3f829125ae7 | /bg.py | 8c6caa1044d80a10e670364654faec627ffb78d3 | [] | no_license | bradenrupp/background | 86998c435e9702ea4fec72016ea01131aa2d878d | cbf7b3b8f88089aa215d8296742824033a2ba8bf | refs/heads/master | 2020-09-27T03:56:33.269880 | 2019-12-06T23:41:47 | 2019-12-06T23:41:47 | 226,423,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | #!/usr/bin/env python3
import ctypes
import os
import time
def change_bg():
pathToBmp = os.path.normpath("C:/Users/Public/.bg.bmp")
SPI_SETDESKTOPWALLPAPER = 20
ctypes.windll.user32.SystemParametersInfoW(
SPI_SETDESKTOPWALLPAPER, 0, pathToBmp, 0)
if __name__ == '__main__':
while True:
time.sleep(5)
change_bg()
time.sleep(5)
| [
"Rupp.Braden@principal.com"
] | Rupp.Braden@principal.com |
96156b6d36634e172c1e69fe6a700c58280d6896 | 311b5a3603fa3e321b2a0dd0d1dbf99922cf1fcd | /src/GUI/main.py | a9c2397ecccb5209c85de8946073792baf7d4f28 | [] | no_license | grmahs/sphero-project | 012215b45d757e5ef3d61734ba4624339e9ac1ba | 2779ff8d8452093b04e8914f62cff7b37f32350c | refs/heads/master | 2020-06-01T13:20:00.720139 | 2019-07-16T21:01:49 | 2019-07-16T21:01:49 | 190,792,447 | 0 | 0 | null | 2019-06-07T18:39:14 | 2019-06-07T18:39:14 | null | UTF-8 | Python | false | false | 1,372 | py | import contextlib
with contextlib.redirect_stdout(None):
import pygame
import r2d2
import server
import sys
pygame.init()
serv = server.Server(None)
serv.start()
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
PURPLE = (255, 0, 255)
size = s_w, s_h = (500, 500)
screen = pygame.display.set_mode(size)
pygame.display.set_caption('R2D2 GUI')
all_sprites_list = pygame.sprite.Group()
r2 = r2d2.R2D2()
r2.set_center_position(x=r2.rect.width/2, y=s_h-r2.rect.height/2)
r2.set_socket_delegate(serv)
all_sprites_list.add(r2)
carry_on = True # continue until explicit exit
clock = pygame.time.Clock()
def keep_in_bounds(sprite):
if sprite.my_x < 0:
sprite.set_center_position(x=0)
elif sprite.my_x > s_w:
sprite.set_center_position(x=s_w)
if sprite.my_y < 0:
sprite.set_center_position(y=0)
elif sprite.my_y > s_h:
sprite.set_center_position(y=s_h)
while carry_on:
# main event loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
carry_on = False
r2.receive_and_handle_data()
# game logic
r2.update()
keep_in_bounds(r2)
all_sprites_list.update()
# redraw code
screen.fill(WHITE) # clear screen
all_sprites_list.draw(screen)
pygame.display.flip() # update the screen
clock.tick(60)
pygame.quit()
| [
"josephcappadona27@gmail.com"
] | josephcappadona27@gmail.com |
727bfe5706d30425d6dc1953d30b21f36aeb2901 | 74eafe55252eff97fd9a2e1e6564ecf243f7c058 | /oop/squares_gen.py | ab286f9884b1857157fe12a048b69b5aa72e1f91 | [] | no_license | srikanthpragada/demo_24_june_2019 | c5ddef71eb721367d656924d312e9ca7ac80c34a | fa7aca273d1ffe6ded34795a639910ab91ce66a0 | refs/heads/master | 2020-06-11T10:19:22.384096 | 2019-08-01T15:28:26 | 2019-08-01T15:28:26 | 193,929,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | def squares(num):
for i in range(1, num + 1):
yield i * i
for n in squares(5):
print(n)
| [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
93320c27e48d82cb9a176e9aed8825a5e95f31a2 | cee3e57aaae9eaeb16f696e3cdad5f32c3af6861 | /evennia/server/portal/mssp.py | 5ff0a7b319a0e8139ee16b4a057e3b81c4e8bf0c | [
"BSD-3-Clause"
] | permissive | Sa1tC/evennia | 8ef7fae9cbeb2d46bd6cdf5c5482331f9e0846ff | 1248428d132fde1b975678b53e22c1ca68a73a43 | refs/heads/master | 2021-01-23T12:32:03.594263 | 2017-05-22T06:21:25 | 2017-05-22T06:21:25 | 93,164,000 | 0 | 1 | null | 2017-06-02T12:36:55 | 2017-06-02T12:36:55 | null | UTF-8 | Python | false | false | 6,861 | py | """
MSSP - Mud Server Status Protocol
This implements the MSSP telnet protocol as per
http://tintin.sourceforge.net/mssp/. MSSP allows web portals and
listings to have their crawlers find the mud and automatically
extract relevant information about it, such as genre, how many
active players and so on.
"""
from builtins import object
from django.conf import settings
from evennia.utils import utils
MSSP = chr(70)
MSSP_VAR = chr(1)
MSSP_VAL = chr(2)
# try to get the customized mssp info, if it exists.
MSSPTable_CUSTOM = utils.variable_from_module(settings.MSSP_META_MODULE, "MSSPTable", default={})
class Mssp(object):
"""
Implements the MSSP protocol. Add this to a variable on the telnet
protocol to set it up.
"""
def __init__(self, protocol):
"""
initialize MSSP by storing protocol on ourselves and calling
the client to see if it supports MSSP.
Args:
protocol (Protocol): The active protocol instance.
"""
self.protocol = protocol
self.protocol.will(MSSP).addCallbacks(self.do_mssp, self.no_mssp)
def get_player_count(self):
"""
Get number of logged-in players.
Returns:
count (int): The number of players in the MUD.
"""
return str(self.protocol.sessionhandler.count_loggedin())
def get_uptime(self):
"""
Get how long the portal has been online (reloads are not counted).
Returns:
uptime (int): Number of seconds of uptime.
"""
return str(self.protocol.sessionhandler.uptime)
def no_mssp(self, option):
"""
Called when mssp is not requested. This is the normal
operation.
Args:
option (Option): Not used.
"""
self.protocol.handshake_done()
def do_mssp(self, option):
"""
Negotiate all the information.
Args:
option (Option): Not used.
"""
self.mssp_table = {
# Required fields
"NAME": "Evennia",
"PLAYERS": self.get_player_count,
"UPTIME" : self.get_uptime,
# Generic
"CRAWL DELAY": "-1",
"HOSTNAME": "", # current or new hostname
"PORT": ["4000"], # most important port should be last in list
"CODEBASE": "Evennia",
"CONTACT": "", # email for contacting the mud
"CREATED": "", # year MUD was created
"ICON": "", # url to icon 32x32 or larger; <32kb.
"IP": "", # current or new IP address
"LANGUAGE": "", # name of language used, e.g. English
"LOCATION": "", # full English name of server country
"MINIMUM AGE": "0", # set to 0 if not applicable
"WEBSITE": "www.evennia.com",
# Categorisation
"FAMILY": "Custom", # evennia goes under 'Custom'
"GENRE": "None", # Adult, Fantasy, Historical, Horror, Modern, None, or Science Fiction
"GAMEPLAY": "None", # Adventure, Educational, Hack and Slash, None,
# Player versus Player, Player versus Environment,
# Roleplaying, Simulation, Social or Strategy
"STATUS": "Open Beta", # Alpha, Closed Beta, Open Beta, Live
"GAMESYSTEM": "Custom", # D&D, d20 System, World of Darkness, etc. Use Custom if homebrew
"SUBGENRE": "None", # LASG, Medieval Fantasy, World War II, Frankenstein,
# Cyberpunk, Dragonlance, etc. Or None if not available.
# World
"AREAS": "0",
"HELPFILES": "0",
"MOBILES": "0",
"OBJECTS": "0",
"ROOMS": "0", # use 0 if room-less
"CLASSES": "0", # use 0 if class-less
"LEVELS": "0", # use 0 if level-less
"RACES": "0", # use 0 if race-less
"SKILLS": "0", # use 0 if skill-less
# Protocols set to 1 or 0)
"ANSI": "1",
"GMCP": "0",
"ATCP": "0",
"MCCP": "0",
"MCP": "0",
"MSDP": "0",
"MSP": "0",
"MXP": "0",
"PUEBLO": "0",
"SSL": "1",
"UTF-8": "1",
"ZMP": "0",
"VT100": "0",
"XTERM 256 COLORS": "0",
# Commercial set to 1 or 0)
"PAY TO PLAY": "0",
"PAY FOR PERKS": "0",
# Hiring set to 1 or 0)
"HIRING BUILDERS": "0",
"HIRING CODERS": "0",
# Extended variables
# World
"DBSIZE": "0",
"EXITS": "0",
"EXTRA DESCRIPTIONS": "0",
"MUDPROGS": "0",
"MUDTRIGS": "0",
"RESETS": "0",
# Game (set to 1, 0 or one of the given alternatives)
"ADULT MATERIAL": "0",
"MULTICLASSING": "0",
"NEWBIE FRIENDLY": "0",
"PLAYER CITIES": "0",
"PLAYER CLANS": "0",
"PLAYER CRAFTING": "0",
"PLAYER GUILDS": "0",
"EQUIPMENT SYSTEM": "None", # "None", "Level", "Skill", "Both"
"MULTIPLAYING": "None", # "None", "Restricted", "Full"
"PLAYERKILLING": "None", # "None", "Restricted", "Full"
"QUEST SYSTEM": "None", # "None", "Immortal Run", "Automated", "Integrated"
"ROLEPLAYING": "None", # "None", "Accepted", "Encouraged", "Enforced"
"TRAINING SYSTEM": "None", # "None", "Level", "Skill", "Both"
"WORLD ORIGINALITY": "None", # "All Stock", "Mostly Stock", "Mostly Original", "All Original"
}
# update the static table with the custom one
if MSSPTable_CUSTOM:
self.mssp_table.update(MSSPTable_CUSTOM)
varlist = ''
for variable, value in self.mssp_table.items():
if callable(value):
value = value()
if utils.is_iter(value):
for partval in value:
varlist += MSSP_VAR + str(variable) + MSSP_VAL + str(partval)
else:
varlist += MSSP_VAR + str(variable) + MSSP_VAL + str(value)
# send to crawler by subnegotiation
self.protocol.requestNegotiation(MSSP, varlist)
self.protocol.handshake_done()
| [
"griatch@gmail.com"
] | griatch@gmail.com |
081db6e86bf88d1f2fc3d81b85d4d5b8b30fe946 | c98672a6ea8cf91130dc5acbcc1b4bba4f7d2045 | /workTool.py | 2e59ff0c5edc8b87d70571fede4dd48b674f3fa2 | [] | no_license | kexin-do/python-book-down-biquge | f8e335b3450d5a9825e445aa5a178656426d1417 | 45489a7e0a499b12758ec278f75310fc4037e7d9 | refs/heads/master | 2020-07-19T13:10:36.726886 | 2019-09-05T02:21:05 | 2019-09-05T02:21:05 | 206,455,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,092 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 12 15:13:31 2018
@author: root
"""
from bs4 import BeautifulSoup
from enum import Enum
import requests,time,sys
selectorType = Enum('selectorType',('book','menu','text'))
contents = {}
class WorkTool:
def __init__(self,target,charset='utf-8'):
self.server = 'http://www.biquge.com.tw/'
self.target = target#爬取目标地址
self.menu_selector = 'list'#目录id
self.book_selector = 'info'#书名称获取id
self.text_selector = 'content'#内容的id
self.markup_type = 'html5lib'#html解析方式
self.book_name = ''#书名
self.chapter_nums = 0#该书的章节数
self.urls = {}#书的url地址
self.names = {}#书的章节名称
#self.contents = []#书的章节内容
self.charset = charset
def _get_html_(self, url):
request = requests.get(url,timeout=5)
request.encoding = self.charset
return request.text
def print_something(*words):
print(time.strftime('%Y-%m-%d %H:%M:%S ',time.localtime()), words[1])
def _get_current_soup_(self,html,selector_type):
#解析获取到的网页
current_soup = BeautifulSoup(html,self.markup_type)
current_selector = ''
if selector_type == selectorType.book:
current_selector = self.book_selector
elif selector_type == selectorType.menu:
current_selector = self.menu_selector
elif selector_type == selectorType.text:
current_selector = self.text_selector
return current_soup.find_all(id=current_selector)
def _set_book_name_(self,html):
self.book_name = BeautifulSoup(str(self._get_current_soup_(html,selectorType.book)),self.markup_type).find_all('h1')[0].string
self.print_something('当前爬取的书名称为:《'+self.book_name+'》')
def _set_book_menu_(self,html):
target_a_s = BeautifulSoup(str(self._get_current_soup_(html,selectorType.menu)),self.markup_type).find_all('a')
self.chapter_nums = len(target_a_s);
self.print_something(' 《'+self.book_name+'》的目录正在加载中...')
i = 0
for target_a in target_a_s:
self.names[i] = target_a.string
self.urls[i] = self.server+target_a.get('href')
i = i + 1
self.print_something(' 《'+self.book_name+'》的目录加载完毕!共'+str(self.chapter_nums)+'章')
def get_content(self, url):
texts = self._get_current_soup_(self._get_html_(url), selectorType.text)
return texts[0].text.replace('\xa0'*8,'\n\n')
def set_book_text(self,key,text):
#self.print_something(' 《'+self.book_name+'》的内容正在加载中...')
#for i in range(self.chapter_nums):
#self.print_something('正在加载 《'+self.book_name+'》 '+self.names[i]);
#self.contents.append(self.get_content(self.urls[i]))
contents[key] = text;
#self.print_something(' 《'+self.book_name+'》的内容加载完毕...')
def get_down_url(self):
html = self._get_html_(self.target)
self._set_book_name_(html)
self._set_book_menu_(html)
#self._set_book_text_()
def down(self):
with open(self.book_name+'.txt','a',encoding='utf-8',errors='ignore') as book:
for textNum in range(self.chapter_nums):
name = self.names[textNum]
self.print_something('正在下载 《'+self.book_name+'》 '+name);
book.write(name+'\n')
book.writelines(contents[textNum])
book.write('\n\n')
sys.stdout.write(' \r已下载 %.1f%%' % float(((textNum+1)/self.chapter_nums)*100) )
sys.stdout.flush();
book.close();
| [
"kexin_do@163.com"
] | kexin_do@163.com |
350dcd30a907105662e6bda717ac24f31ad8370f | 2136701f48ad131084b331039d864f85988cf451 | /spider/.svn/pristine/35/350dcd30a907105662e6bda717ac24f31ad8370f.svn-base | c6842a58e1a64674b74afbe2cc40404b328236bd | [] | no_license | cuiyulin77/other | 9d374a47d482f1c3f9ef0f3ac4429487643b04b9 | c00cafaf7607452966fa523c4d0b04edb7f153e6 | refs/heads/master | 2020-05-18T04:24:26.095929 | 2019-04-30T06:37:53 | 2019-04-30T06:37:53 | 184,169,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,692 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pymysql import *
import re
class WbUserPipeline(object):
def process_item(self, item, spider):
return item
class DBPipeline(object):
def __init__(self):
# 连接数据库
self.connect = connect(
# host='47.92.77.18',
host='192.168.3.15',
# host='127.0.0.1',
db='spider',
user='root',
# password='admin8152', # 生产服务器
password='root',
port=3306,
charset='utf8'
)
self.cursor = self.connect.cursor()
def process_item(self, item, spider):
# 获取情感分类
try:
# 插入数据
print("*"*100)
user_id = re.match(r'https\:\/\/m\.weibo\.cn\/u\/(\d+)\?uid.*', item['user_url']).group(1)
print('user_id',user_id)
self.cursor.execute(
"INSERT INTO weibo_user(id,summary,user_name,user_id,user_url,fans,followers,get_time) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)",
('0', str(item['summary']),str(item['user_name']),
str(user_id),
str(item['user_url']),
str(item['fans']),
str(item['followers']),
str(item['get_time'])),
)
self.connect.commit()
print('mysql一条数据插入成功')
except Exception as e:
# 出现错误时打印错误日志
print(e)
return item
| [
"494658565@qq.com"
] | 494658565@qq.com | |
8b515c6c433d4a5d55c450239b53ce47683b707d | 31c8e8cd5e36c62f22ae39a26bc9cf73d22f62ea | /tasks/tasks/webapp/models.py | 4cb1f81a7dfea6dd6e5e2510ff3c0dfc619dd269 | [] | no_license | kadyrkulove/lab_work39 | 52ee57529dedc12e116312cbc2693df4dae3c108 | 1ca9a4217f3f68a79021b7287512d7e61890faae | refs/heads/master | 2020-04-10T17:18:43.235739 | 2018-12-10T13:08:09 | 2018-12-10T13:08:09 | 161,170,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | from django.db import models
# Create your models here.
class Task(models.Model):
title = models.CharField(max_length=50, null=False, blank=False, verbose_name='Задача')
description = models.TextField(max_length=1000, null=True, blank=True, verbose_name='Описание')
status = models.CharField(max_length=20, null=False, blank=False, default='new', verbose_name='Статус')
def __str__(self):
return f'{self.pk}. self.name (self.status)'
class Meta:
verbose_name = 'Задача'
verbose_name_plural = 'Задачи' | [
"kadyrkulove@gmail.com"
] | kadyrkulove@gmail.com |
60a0fbaf539bce85c0bf31da706243dab58d5230 | ea5ff0005d6409872dd42981cf22eb601e2f827c | /histogram.py | d295fd4c0c7eaa4ef21fc0b124ee79330e1ba2d7 | [] | no_license | muhamuttaqien/corn-soil-identifier | ccd0ed9cc91a935155287e26687d82a02e5409d4 | a235d96811bcc560dc1d7c4cb267ce5d2b2b5b70 | refs/heads/master | 2021-09-02T22:56:18.019044 | 2018-01-04T01:17:27 | 2018-01-04T01:17:27 | 115,904,668 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | #!/usr/bin/env python
import cv2
import math
import numpy as np
from matplotlib import pyplot as plt
def analyze_histogram(img):
"""
find and analyze histogram
"""
hist = cv2.calcHist([img],[0],None,[256],[0,256])
return hist
def plot_histogram(img):
"""
plot and analyze histogram for gray image
"""
hist = cv2.calcHist([img],[0],None,[256],[0,256])
plt.hist(hist,facecolor='green')
plt.title('Histogram'), plt.xlabel("Scale"), plt.ylabel("Quantity")
plt.grid(True)
plt.show()
def analyze_color_histogram(img):
"""
find and analyze histogram for color image
"""
color = ('b', 'g', 'r')
hist = []
for i, col in enumerate(color):
hist.append(cv2.calcHist([img],[i],None,[256],[0,256]))
blue = hist[0]
green = hist[1]
red = hist[2]
return blue, green, red
def plot_color_histogram(img):
"""
plot and analyze histogram for color image
"""
color = ('b', 'g', 'r')
for i, col in enumerate(color):
hist = cv2.calcHist([img],[i],None,[256],[0,256])
plt.plot(hist, color = col)
plt.xlim([0,256])
plt.title('Color Histogram'), plt.xlabel("Scale"), plt.ylabel("Quantity")
plt.grid(True)
plt.show() | [
"angga.muttaqien@ruma.co.id"
] | angga.muttaqien@ruma.co.id |
8065cd7502032e1ffa597e2ed1a2c51f7334439f | 91f9d18a82b106bdadeaccc19dd6bab658ef6ea4 | /datastructures/LinkedList.py | 8f75d58c86a7c71592719932be3b00d45f2f5f7f | [] | no_license | elianajn/CP307 | 6ae518ee449c5d9230b37ce17566e226da101625 | dd132429299e0fe570fd6f5e1e706c31fd4a16da | refs/heads/main | 2023-01-15T18:07:52.215792 | 2020-11-11T18:40:22 | 2020-11-11T18:40:22 | 305,578,531 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,512 | py | class ListNode:
def __init__(self, data):
self.data = data
self.prev = None
self.next = None
def __repr__(self):
return str(self.data)
class LinkedList:
def __init__(self):
self.first = None
self.last = None
self.current_size = 0
def size(self):
return self.current_size
#Add new item to end of list
def add(self, data):
new_node = ListNode(data)
if self.size() == 0:
self.first = new_node
self.last = new_node
elif self.size() == 1:
self.last = new_node
new_node.prev = self.first
self.first.next = new_node
else:
self.last.next = new_node
new_node.prev = self.last
self.last = new_node
self.current_size += 1
#Return item at target index
def __getitem__(self, target_index):
curr_index = 0
curr_node = self.first
while curr_node != None and curr_index < target_index:
curr_node = curr_node.next
curr_index += 1
if curr_node != None:
return curr_node.data
else:
print("ERROR: ILLEGAL INDEX")
indexErr = "Index %d out of bounds for list size: %d" % (target_index, self.size())
raise IndexError(indexErr)
def delete(self, target_index):
if target_index == 0 and self.size() > 1:
self.first.next.prev = None
self.first = self.first.next
elif target_index == 0 and self.size() == 1:
self.first = None
self.last = None
elif target_index == self.size() - 1 and self.size() > 1:
self.last.prev.next = None
self.last = self.last.prev
else:
curr_index = 0
curr_node = self.first
while curr_node != None and curr_index < target_index:
curr_node = curr_node.next
curr_index += 1
if curr_node != None:
curr_node.prev.next = curr_node.next
curr_node.next.prev = curr_node.prev
else:
raise IndexError("Index %d out of bounds for list size: %d" % (target_index, self.size()))
self.current_size -= 1
def __repr__(self):
result = ""
curr_node = self.first
while curr_node != None:
result += str(curr_node) + ", "
curr_node = curr_node.next
return result
| [
"elianajneu@gmail.com"
] | elianajneu@gmail.com |
f7d45287bca0ec55ea32d9ed5da25480ec9a3285 | e608c9525e88ba3589cb4a2fd47f6a2e0442bfb2 | /pycorrector/seq2seq/corrector_model.py | 02bd512d259867d51f01a8b89dbf7780a9222e09 | [
"Apache-2.0"
] | permissive | YC-wind/pycorrector | 9f5c14d2cc0cf6f53ff253c6035cf816e1f334d8 | c68ea194a95949f6d6ffb7cb33dfc6679e1bbc9e | refs/heads/master | 2020-03-16T21:56:53.394037 | 2018-05-02T04:05:17 | 2018-05-02T04:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,328 | py | # -*- coding: utf-8 -*-
# Author: XuMing <xuming624@qq.com>
# Brief:
import random
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
import seq2seq
from reader import PAD_ID, GO_ID
class CorrectorModel(object):
"""Sequence-to-sequence model used to correct grammatical errors in text.
NOTE: mostly copied from TensorFlow's seq2seq_model.py; only modifications
are:
- the introduction of RMSProp as an optional optimization algorithm
- the introduction of a "projection bias" that biases decoding towards
selecting tokens that appeared in the input
"""
def __init__(self, source_vocab_size, target_vocab_size, buckets, size,
num_layers, max_gradient_norm, batch_size, learning_rate,
learning_rate_decay_factor, use_lstm=False,
num_samples=512, forward_only=False, config=None,
corrective_tokens_mask=None):
"""Create the model.
Args:
source_vocab_size: size of the source vocabulary.
target_vocab_size: size of the target vocabulary.
buckets: a list of pairs (I, O), where I specifies maximum input
length that will be processed in that bucket, and O specifies
maximum output length. Training instances that have longer than I
or outputs longer than O will be pushed to the next bucket and
padded accordingly. We assume that the list is sorted, e.g., [(2,
4), (8, 16)].
size: number of units in each layer of the model.
num_layers: number of layers in the model.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training;
the model construction is independent of batch_size, so it can be
changed after initialization if this is convenient, e.g.,
for decoding.
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when
needed.
use_lstm: if true, we use LSTM cells instead of GRU cells.
num_samples: number of samples for sampled softmax.
forward_only: if set, we do not construct the backward pass in the
model.
"""
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
self.learning_rate_decay_op = self.learning_rate.assign(
self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
self.config = config
# Feeds for inputs.
self.encoder_inputs = []
self.decoder_inputs = []
self.target_weights = []
for i in range(buckets[-1][0]): # Last bucket is the biggest one.
self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="encoder{0}".format(
i)))
for i in range(buckets[-1][1] + 1):
self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="decoder{0}".format(
i)))
self.target_weights.append(tf.placeholder(tf.float32, shape=[None],
name="weight{0}".format(
i)))
# One hot encoding of corrective tokens.
corrective_tokens_tensor = tf.constant(corrective_tokens_mask if
corrective_tokens_mask else
np.zeros(self.target_vocab_size),
shape=[self.target_vocab_size],
dtype=tf.float32)
batched_corrective_tokens = tf.stack(
[corrective_tokens_tensor] * self.batch_size)
self.batch_corrective_tokens_mask = batch_corrective_tokens_mask = \
tf.placeholder(
tf.float32,
shape=[None, None],
name="corrective_tokens")
# Our targets are decoder inputs shifted by one.
targets = [self.decoder_inputs[i + 1]
for i in range(len(self.decoder_inputs) - 1)]
# If we use sampled softmax, we need an output projection.
output_projection = None
softmax_loss_function = None
# Sampled softmax only makes sense if we sample less than vocabulary
# size.
if num_samples > 0 and num_samples < self.target_vocab_size:
w = tf.get_variable("proj_w", [size, self.target_vocab_size])
w_t = tf.transpose(w)
b = tf.get_variable("proj_b", [self.target_vocab_size])
output_projection = (w, b)
def sampled_loss(labels, logits):
labels = tf.reshape(labels, [-1, 1])
return tf.nn.sampled_softmax_loss(w_t, b, labels, logits,
num_samples,
self.target_vocab_size)
softmax_loss_function = sampled_loss
# Create the internal multi-layer cell for our RNN.
single_cell = tf.nn.rnn_cell.GRUCell(size)
if use_lstm:
single_cell = tf.nn.rnn_cell.BasicLSTMCell(size)
cell = single_cell
if num_layers > 1:
cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers)
# The seq2seq function: we use embedding for the input and attention.
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
"""
:param encoder_inputs: list of length equal to the input bucket
length of 1-D tensors (of length equal to the batch size) whose
elements consist of the token index of each sample in the batch
at a given index in the input.
:param decoder_inputs:
:param do_decode:
:return:
"""
if do_decode:
# Modify bias here to bias the model towards selecting words
# present in the input sentence.
input_bias = self.build_input_bias(encoder_inputs,
batch_corrective_tokens_mask)
# Redefined seq2seq to allow for the injection of a special
# decoding function that
return seq2seq.embedding_attention_seq2seq(
encoder_inputs, decoder_inputs, cell,
num_encoder_symbols=source_vocab_size,
num_decoder_symbols=target_vocab_size,
embedding_size=size,
output_projection=output_projection,
feed_previous=do_decode,
loop_fn_factory=
apply_input_bias_and_extract_argmax_fn_factory(input_bias))
else:
return seq2seq.embedding_attention_seq2seq(
encoder_inputs, decoder_inputs, cell,
num_encoder_symbols=source_vocab_size,
num_decoder_symbols=target_vocab_size,
embedding_size=size,
output_projection=output_projection,
feed_previous=do_decode)
# Training outputs and losses.
if forward_only:
self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets,
lambda x, y: seq2seq_f(x, y, True),
softmax_loss_function=softmax_loss_function)
if output_projection is not None:
for b in range(len(buckets)):
# We need to apply the same input bias used during model
# evaluation when decoding.
input_bias = self.build_input_bias(
self.encoder_inputs[:buckets[b][0]],
batch_corrective_tokens_mask)
self.outputs[b] = [
project_and_apply_input_bias(output, output_projection,
input_bias)
for output in self.outputs[b]]
else:
self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets,
lambda x, y: seq2seq_f(x, y, False),
softmax_loss_function=softmax_loss_function)
# Gradients and SGD update operation for training the model.
params = tf.trainable_variables()
if not forward_only:
self.gradient_norms = []
self.updates = []
opt = tf.train.RMSPropOptimizer(0.001) if self.config.use_rms_prop \
else tf.train.GradientDescentOptimizer(self.learning_rate)
# opt = tf.train.AdamOptimizer()
for b in range(len(buckets)):
gradients = tf.gradients(self.losses[b], params)
clipped_gradients, norm = tf.clip_by_global_norm(
gradients, max_gradient_norm)
self.gradient_norms.append(norm)
self.updates.append(opt.apply_gradients(
zip(clipped_gradients, params),
global_step=self.global_step))
self.saver = tf.train.Saver(tf.global_variables())
def build_input_bias(self, encoder_inputs, batch_corrective_tokens_mask):
packed_one_hot_inputs = tf.one_hot(indices=tf.stack(
encoder_inputs, axis=1), depth=self.target_vocab_size)
return tf.maximum(batch_corrective_tokens_mask,
tf.reduce_max(packed_one_hot_inputs,
reduction_indices=1))
def step(self, session, encoder_inputs, decoder_inputs, target_weights,
bucket_id, forward_only, corrective_tokens=None):
"""Run a step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: list of numpy int vectors to feed as encoder inputs.
decoder_inputs: list of numpy int vectors to feed as decoder inputs.
target_weights: list of numpy float vectors to feed as target weights.
bucket_id: which bucket of the model to use.
forward_only: whether to do the backward step or only forward.
Returns:
A triple consisting of gradient norm (or None if we did not do
backward), average perplexity, and the outputs.
Raises:
ValueError: if length of encoder_inputs, decoder_inputs, or
target_weights disagrees with bucket size for the specified
bucket_id.
"""
# Check if the sizes match.
encoder_size, decoder_size = self.buckets[bucket_id]
if len(encoder_inputs) != encoder_size:
raise ValueError("Encoder length must be equal to the one in bucket,"
" %d != %d." % (len(encoder_inputs), encoder_size))
if len(decoder_inputs) != decoder_size:
raise ValueError("Decoder length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_inputs), decoder_size))
if len(target_weights) != decoder_size:
raise ValueError("Weights length must be equal to the one in bucket,"
" %d != %d." % (len(target_weights), decoder_size))
# Input feed: encoder inputs, decoder inputs, target_weights,
# as provided.
input_feed = {}
for l in range(encoder_size):
input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]
for l in range(decoder_size):
input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]
input_feed[self.target_weights[l].name] = target_weights[l]
corrective_tokens_vector = (corrective_tokens if
corrective_tokens is not None else
np.zeros(self.target_vocab_size))
batch_corrective_tokens = np.repeat([corrective_tokens_vector],
self.batch_size, axis=0)
input_feed[self.batch_corrective_tokens_mask.name] = (
batch_corrective_tokens)
# Since our targets are decoder inputs shifted by one, we need one more.
last_target = self.decoder_inputs[decoder_size].name
input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)
# Output feed: depends on whether we do a backward step or not.
if not forward_only:
output_feed = [self.updates[bucket_id], # Update Op that does SGD.
self.gradient_norms[bucket_id], # Gradient norm.
self.losses[bucket_id]] # Loss for this batch.
else:
output_feed = [self.losses[bucket_id]] # Loss for this batch.
for l in range(decoder_size): # Output logits.
output_feed.append(self.outputs[bucket_id][l])
outputs = session.run(output_feed, input_feed)
if not forward_only:
# Gradient norm, loss, no outputs.
return outputs[1], outputs[2], None
else:
# No gradient norm, loss, outputs.
return None, outputs[0], outputs[1:]
def get_batch(self, data, bucket_id):
"""Get a random batch of data from the specified bucket, prepare for
step.
To feed data in step(..) it must be a list of batch-major vectors, while
data here contains single length-major cases. So the main logic of this
function is to re-index data cases to be in the proper format for
feeding.
Args:
data: a tuple of size len(self.buckets) in which each element contains
lists of pairs of input and output data that we use to create a
batch.
bucket_id: integer, which bucket to get the batch for.
Returns:
The triple (encoder_inputs, decoder_inputs, target_weights) for
the constructed batch that has the proper format to call step(...)
later.
"""
encoder_size, decoder_size = self.buckets[bucket_id]
encoder_inputs, decoder_inputs = [], []
# Get a random batch of encoder and decoder inputs from data,
# pad them if needed, reverse encoder inputs and add GO to decoder.
for _ in range(self.batch_size):
encoder_input, decoder_input = random.choice(data[bucket_id])
# Encoder inputs are padded and then reversed.
encoder_pad = [PAD_ID] * (
encoder_size - len(encoder_input))
encoder_inputs.append(list(reversed(encoder_input + encoder_pad)))
# Decoder inputs get an extra "GO" symbol, and are padded then.
decoder_pad_size = decoder_size - len(decoder_input) - 1
decoder_inputs.append([GO_ID] + decoder_input +
[PAD_ID] * decoder_pad_size)
# Now we create batch-major vectors from the data selected above.
batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []
# Batch encoder inputs are just re-indexed encoder_inputs.
for length_idx in range(encoder_size):
batch_encoder_inputs.append(
np.array([encoder_inputs[batch_idx][length_idx]
for batch_idx in range(self.batch_size)],
dtype=np.int32))
# Batch decoder inputs are re-indexed decoder_inputs, we create weights.
for length_idx in range(decoder_size):
batch_decoder_inputs.append(
np.array([decoder_inputs[batch_idx][length_idx]
for batch_idx in range(self.batch_size)],
dtype=np.int32))
# Create target_weights to be 0 for targets that are padding.
batch_weight = np.ones(self.batch_size, dtype=np.float32)
for batch_idx in range(self.batch_size):
# We set weight to 0 if the corresponding target is a PAD
# symbol. The corresponding target is decoder_input shifted by 1
# forward.
if length_idx < decoder_size - 1:
target = decoder_inputs[batch_idx][length_idx + 1]
if length_idx == decoder_size - 1 or target == PAD_ID:
batch_weight[batch_idx] = 0.0
batch_weights.append(batch_weight)
return batch_encoder_inputs, batch_decoder_inputs, batch_weights
def project_and_apply_input_bias(logits, output_projection, input_bias):
if output_projection is not None:
logits = nn_ops.xw_plus_b(
logits, output_projection[0], output_projection[1])
# Apply softmax to ensure all tokens have a positive value.
probs = tf.nn.softmax(logits)
# Apply input bias, which is a mask of shape [batch, vocab len]
# where each token from the input in addition to all "corrective"
# tokens are set to 1.0.
return tf.multiply(probs, input_bias)
def apply_input_bias_and_extract_argmax_fn_factory(input_bias):
"""
:param encoder_inputs: list of length equal to the input bucket
length of 1-D tensors (of length equal to the batch size) whose
elements consist of the token index of each sample in the batch
at a given index in the input.
:return:
"""
def fn_factory(embedding, output_projection=None, update_embedding=True):
"""Get a loop_function that extracts the previous symbol and embeds it.
Args:
embedding: embedding tensor for symbols.
output_projection: None or a pair (W, B). If provided, each fed previous
output will first be multiplied by W and added B.
update_embedding: Boolean; if False, the gradients will not propagate
through the embeddings.
Returns:
A loop function.
"""
def loop_function(prev, _):
prev = project_and_apply_input_bias(prev, output_projection,
input_bias)
prev_symbol = math_ops.argmax(prev, 1)
# Note that gradients will not propagate through the second
# parameter of embedding_lookup.
emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
if not update_embedding:
emb_prev = array_ops.stop_gradient(emb_prev)
return emb_prev, prev_symbol
return loop_function
return fn_factory
| [
"507153809@qq.com"
] | 507153809@qq.com |
12cf3bfd6a7899ebfae769750b1a5a17ec2edcfe | 1b9fb0b96327d155b7d076daaae749000b9a12b0 | /cs214/django/mysite/west/views.py | c969fb0097c738485fc518b28576626fa514ac09 | [] | no_license | wangjiezhe/shiyanlou | c929d7ae81ccec4d019e7eaee9cc33b42632c84e | 55f1665814c8a7ce34811fd61126811a9f91e057 | refs/heads/master | 2020-04-15T19:54:02.864363 | 2016-04-27T03:03:05 | 2016-05-10T12:11:49 | 39,290,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | # -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse
from west.models import Character
from django.core.context_processors import csrf
from django import forms
# Create your views here.
class CharacterForm(forms.Form):
name = forms.CharField(max_length = 200)
def first_page(request):
return HttpResponse("<p>西餐</p>")
def staff(request):
staff_list = Character.objects.all()
# staff_str = map(str, staff_list)
# return HttpResponse("<p>" + ' '.join(staff_str) + "</p>")
# context = {'label': ' '.join(staff_str)}
# return render(request, 'templay.html', context)
return render(request, 'templay.html', {'staffs': staff_list})
def templay(request):
context = {}
context['label'] = 'Hello World!'
return render(request, 'templay.html', context)
def form(request):
return render(request, 'form.html')
def investigate(request):
if request.POST:
form = CharacterForm(request.POST)
if form.is_valid():
submitted = form.cleaned_data['name']
new_record = Character(name = submitted)
new_record.save()
form = CharacterForm()
ctx = {}
ctx.update(csrf(request))
all_records = Character.objects.all()
ctx['staff'] = all_records
ctx['form'] = form
return render(request, 'investigate.html', ctx)
| [
"wangjiezhe@gmail.com"
] | wangjiezhe@gmail.com |
aef3c3624058a9104e4a84e3fdb7e33668a84b8c | 90d4b790f9a7198760fdbcfad6abd2da851f2f4e | /0x0F-python-object_relational_mapping/3-my_safe_filter_states.py | b2b27fdbdcc3fbb2c02bd4aa205bc8225158b438 | [] | no_license | Murega14/holbertonschool-higher_level_programming | 2817a532d7d6739ed046e350903e394ed1bae0a3 | f29a4c4e74c01798cb51bfe5160432569a1ca833 | refs/heads/master | 2023-03-15T08:22:06.926537 | 2018-09-09T20:46:33 | 2018-09-09T20:46:33 | 572,548,803 | 1 | 0 | null | 2022-11-30T14:11:06 | 2022-11-30T14:10:32 | null | UTF-8 | Python | false | false | 643 | py | #!/usr/bin/python3
# gets all states via python yee boi with your own state SAFE
def main(args):
# gets all state stuff SAFELY
if len(args) != 5:
raise Exception("need 4 arguments!")
db = MySQLdb.connect(host='localhost',
user=args[1],
passwd=args[2],
db=args[3])
cur = db.cursor()
cur.execute(
"SELECT * FROM states WHERE name LIKE %s ORDER BY id ASC",
(args[4],))
states = cur.fetchall()
for state in states:
print(state)
if __name__ == "__main__":
import sys
import MySQLdb
main(sys.argv)
| [
"Dkazemian@gmail.com"
] | Dkazemian@gmail.com |
658bfe4a687bf49d9390ea8b857434f7f6b6ae1b | fe8a8101f3b028161cc5a191680b0608b48baa6e | /python-study/dataType/dataType5.py | 2c3685831c4e7c8eb30ac5234a041009e5e369db | [] | no_license | prkhyo/coding-test | b61953829b7536d4901ba386e05d64459190718f | 3938902e8d768d6da63fa338758bb92723f663a8 | refs/heads/master | 2023-08-31T21:07:08.044407 | 2021-09-26T17:05:09 | 2021-09-26T17:05:09 | 388,933,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,586 | py |
# 집합 자료형
# 집합의 특징 : 중복 허용 X, 순서 X
# 집합은 리스트 혹은 문자열을 이용해서 초기화할 수 있음
# 초기화 방법
# (1) set() 함수 이용
# (2) 중괄호{} 안에 각 원소를 콤마,를 기준으로 구분하여 삽입
# 데이터의 조회 및 수정에 있어서 O(1)의 시간에 처리 가능
# 집합 자료형 초기화 방법1
data = set([1, 1, 2, 3, 4, 5, 5])
print(data) # {1, 2, 3, 4, 5} -> 중복 원소 제거
# 집합 자료형 초기화 방법2
data = {1, 2, 4, 5, 5, 5, 6, 7}
print(data) # {1, 2, 4, 5, 6, 7} -> 중복 원소 제거
# 집합 자료형의 연산
# 기본적인 집합 연산으로는 합집합, 교집합, 차집합 연산 등이 있음
# 합집합: 집합A에 속하거나 B에 속하는 원소로 이루어진 집합 (A U B)
# 교집합: 집합A에도 속하고 B에도 속하는 원소로 이루어진 집합 (A ∩ B)
# 차집합: 집합A의 원소 중에서 B에 속하지 않는 원소들로 이루어진 집합( A - B)
a = set([1, 2, 3, 4, 5])
b = set([3, 4, 5, 6, 7])
# 합집합
print(a | b) # {1, 2, 3, 4, 5, 6, 7}
# 교집합
print(a & b) # {3, 4, 5}
# 차집합
print(a - b) # {1, 2}
# 집합 자료형 관련 함수 (상수시간 소요)
# add(num)
# 새로운 원소 추가
data = set([1, 2, 3])
print(data) # {1, 2, 3}
data.add(4)
print(data) # {1, 2, 3, 4}
# update([num1, num2])
# 새로운 원소 여러 개 추가
data = set([1, 2, 3])
print(data) # {1, 2, 3}
data.update([4, 5, 6])
print(data) # {1, 2, 3, 4, 5, 6}
# remove(num)
# 특정한 값을 갖는 원소 삭제
data = set([1, 2, 3])
print(data) # {1, 2, 3}
data.remove(3)
print(data) # {1, 2}
# 사전 자료형과 집합 자료형의 특징
# 리스트나 튜플은 순서가 있기 때문에 인덱싱을 통해 자료형의 값을 얻을 수 있음
# 사전 자료형과 집합 자료형은 순서가 없기 때문에 인덱싱으로 값을 얻을 수 없음
# 사전의 경우는 키로, 집합의 경우는 원소를 이용해 O(1)의 시간복잡도로 값을 조회할 수 있음
# 단 키나 집합의 원소로는 변경이 불가능한 문자열이나 튜플과 같은 객체가 사용되어야 함
# 자료형 비교 정리
# 리스트(List) 튜플(Tuple) 사전(Dictionary) 집합(Set)
# 특징 자료들을 리스트와 기능은 같지만 사전처럼 이름(key)과 집합의 원소들을 표현하며
# 목록 형태로 관리 삭제, 수정 불가 내용(value)을 연결 중복 값을 허용하지 않음
# 사용법 list= tuple= dict= set=
# [요소1, 요소2,...] (요소1, 요소2,...) {key1:value1, key2:value2} {요소1, 요소2,..}
# 순서 o o x x
# 인덱싱/슬라이싱 가능 인덱싱/슬라이싱 가능
# 변경 o x o o
# 중복허용 o o x x
# + 문자열(String): 튜플과 마찬가지로 변경 불가능한 객체
| [
""
] | |
ac684f0dba65c38917b56bb8e3c8b63747f120b8 | d5588bc2feccee17ed8f40d524acd66f47bdf228 | /src/Problem/Selectors/value_selector.py | 811f08df2a98b6890744d66270d54f33bd2b1788 | [] | no_license | AltiTorie/CSP | cb477edbb755686095bf72af357889cadf193fa7 | c2ad8086c1f77e66a1c85b30bb448e5647386620 | refs/heads/master | 2023-03-28T21:33:29.255717 | 2021-04-19T22:00:48 | 2021-04-19T22:00:48 | 357,532,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | from abc import ABC, abstractmethod
from src.Problem.problem import Problem
class ValueSelector(ABC):
def __init__(self, csp: Problem):
self.csp = csp
@abstractmethod
def select_values_order(self, assignment, variable, domains=None):
...
class TakeOriginalValueSelector(ValueSelector):
def select_values_order(self, assignment, variable, domains=None):
if not domains:
return self.csp.domains[variable]
else:
return domains[variable]
class LeastConstrainingValueSelector(ValueSelector):
def select_values_order(self, assignment, variable, domains=None):
if not domains:
domains = self.csp.domains
others_values_count = {}
for value in domains[variable]:
temp_assignment = assignment.copy()
temp_assignment[variable] = value
count = self.__get_count_of_possible_values(temp_assignment, variable, domains)
others_values_count[value] = count
return dict(sorted(others_values_count.items(), key=lambda item: item[1], reverse=True))
def __get_count_of_possible_values(self, assignment, checked_variable, domains):
counter = 0
for constraint in self.csp.constraints[checked_variable]:
for variable in constraint.variables:
if variable not in assignment:
for value in domains[variable]:
temp_assignment = assignment.copy()
temp_assignment[variable] = value
if self.csp.is_consistent(variable, temp_assignment):
counter += 1
return counter
| [
"Arek.ste99@wp.pl"
] | Arek.ste99@wp.pl |
6968bced836ffb47e8b4d896ae663bc05f94bc97 | 3f6f4c7360898b1761efca0f868970c7521839d7 | /install.py | 1c693aee56c974c70ac7b59ce652d41adf951f1a | [] | no_license | westporch/ipmitool_time_sync | dc1859dda0222aa2a5904d5386ea70e8998faab3 | 3ec23da4da11c604f3a04e8e515e0d71afad7fc2 | refs/heads/master | 2021-01-10T02:17:25.875101 | 2015-12-02T03:00:28 | 2015-12-02T03:00:28 | 47,167,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | #!/usr/bin/python
#Hyeongwan Seo
import os;
os.system("chmod 655 ipmitool_time_sync.sh")
ABS_PATH=os.getcwd() + "/./ipmitool_time_sync.sh"
#부팅 시 스크립트를 자동 실행하도록 /etc/rc.d/rc.local에 쉘 스크립트를 등록함.
f = open("/etc/rc.d/rc.local", "a")
data = ABS_PATH
f.write('\n' + data)
f.close()
| [
"westporch@gmail.com"
] | westporch@gmail.com |
e8602b17915bca9838d8ff2d73861d1902c2a90a | 4acdda2a7e17c8cfbaa8fe2cb6c39db964dc25d2 | /tests/test_blog.py | 3d745bac59ddce2a126679d8c6ad7ae0acc1e9b2 | [
"MIT"
] | permissive | Vitalis-Kirui/Personal-Blog | c5dafb0aac197effac92d191ca39a25495108140 | 49af71b70f32ff4a588df26cd38091a6d80eb805 | refs/heads/master | 2023-07-31T00:49:04.553066 | 2021-09-26T19:47:50 | 2021-09-26T19:47:50 | 409,132,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | import unittest
from app.models import Blog, User
class BlogTest(unittest.TestCase):
def setUp(self):
self.user_id = User(username='vitalis', password='computer', email='vitalis@gmail.com')
self.new_blog = Blog(blog_title='My Birthday',posted_at='14/3/1998', blog_content='The date I took my first breath', user_id=self.user_id.id)
def test_check_instance_variables(self):
self.assertEquals(self.new_blog.blog_title, 'My Birthday')
self.assertEquals(self.new_blog.blog_content, 'The date I took my first breath')
self.assertEquals(self.new_blog.user_id, self.user_id.id)
def test_save_blog(self):
self.new_blog.save_blog()
self.assertTrue(len(Blog.query.all()) > 0)
def test_get_blog(self):
self.new_blog.save_blog()
self.assertTrue(self) | [
"vitalis.kirui@student.moringaschool.com"
] | vitalis.kirui@student.moringaschool.com |
289b20a0b72d988bd1e32bbce7bf486152eca97b | b62bd0e4c83ce06ee0fbe1e99131fdaf79908cec | /test/integration/ggrc/models/mixins/test_with_action.py | d5b7ecd0556678d5992cb460340bf0d0d8820893 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ch-t/ggrc-core | 8cbacf674ed4a319da7f370f5d40bbfdb70791cd | c1ff8c0319dc3bad59f35525227e2b4c9a505841 | refs/heads/dev | 2021-01-19T17:05:31.961848 | 2017-08-22T05:28:39 | 2017-08-22T05:28:39 | 101,045,324 | 0 | 0 | null | 2017-08-22T09:23:09 | 2017-08-22T09:23:09 | null | UTF-8 | Python | false | false | 19,989 | py | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Integration test for WithAction mixin"""
import copy
from ggrc.models import all_models
from integration.ggrc import api_helper
from integration.ggrc import TestCase
from integration.ggrc.generator import ObjectGenerator
from integration.ggrc.models import factories
from integration.ggrc.query_helper import WithQueryApi
class TestDocumentWithActionMixin(TestCase, WithQueryApi):
"""Test case for WithAction mixin and Document actions."""
def setUp(self):
super(TestDocumentWithActionMixin, self).setUp()
self.client.get("/login")
self.api = api_helper.Api()
def test_add_url(self):
"""Test add url action."""
assessment = factories.AssessmentFactory()
context = factories.ContextFactory(related_object=assessment)
assessment.context = context
response = self.api.put(assessment, {"actions": {"add_related": [
{
"id": None,
"type": "Document",
"link": "google.com",
"title": "google.com",
"document_type": all_models.Document.URL,
}
]}})
self.assert200(response)
rel_id = response.json["assessment"]["related_destinations"][0]["id"]
relationship = all_models.Relationship.query.get(rel_id)
self.assertIsNotNone(relationship)
document = all_models.Document.query.get(relationship.destination_id)
self.assertEqual(document.link, "google.com")
self.assertEqual(document.document_type, all_models.Document.URL)
self.assertEqual(document.context_id, assessment.context_id)
def test_map_document(self):
"""Test map document action """
assessment = factories.AssessmentFactory()
document = factories.DocumentFactory()
response = self.api.put(assessment, {"actions": {"add_related": [
{
"id": document.id,
"type": "Document",
}
]}})
self.assert200(response)
rel_id = response.json["assessment"]["related_destinations"][0]["id"]
relationship = all_models.Relationship.query.get(rel_id)
self.assertEqual(relationship.destination_id, document.id)
self.assertEqual(relationship.source_id, assessment.id)
def test_wrong_add_url(self):
"""Test wrong add url action."""
assessment = factories.AssessmentFactory()
wrong_params = {
"id": None,
"type": "Document",
}
response = self.api.put(assessment, {"actions": {"add_related": [
wrong_params
]}})
self.assert400(response)
wrong_params["document_type"] = "URL"
response = self.api.put(assessment, {"actions": {"add_related": [
wrong_params
]}})
self.assert400(response)
wrong_params["link"] = "google.com"
response = self.api.put(assessment, {"actions": {"add_related": [
wrong_params
]}})
self.assert400(response)
def test_wrong_add_action(self):
"""Test wrong add action."""
assessment = factories.AssessmentFactory()
response = self.api.put(assessment, {"actions": {"add_related": []}})
self.assert400(response)
response = self.api.put(assessment, {"actions": {"add_related": [{}]}})
self.assert400(response)
response = self.api.put(assessment, {"actions": {"add_related": [
{
"type": "Document",
}
]}})
self.assert400(response)
response = self.api.put(assessment, {"actions": {"add_related": [
{
"id": None,
}
]}})
self.assert400(response)
def test_unmap_document_as_dst(self):
"""Test unmapping of documents set as relationship destination."""
assessment = factories.AssessmentFactory()
document = factories.DocumentFactory()
rel_id = factories.RelationshipFactory(source=assessment,
destination=document).id
response = self.api.put(assessment, {"actions": {"remove_related": [
{
"id": document.id,
"type": "Document",
}
]}})
self.assert200(response)
relationship = all_models.Relationship.query.get(rel_id)
self.assertIsNone(relationship)
def test_unmap_document_as_src(self):
"""Test unmapping of documents set as relationship source."""
assessment = factories.AssessmentFactory()
document = factories.DocumentFactory()
rel_id = factories.RelationshipFactory(destination=assessment,
source=document).id
response = self.api.put(assessment, {"actions": {"remove_related": [
{
"id": document.id,
"type": "Document",
}
]}})
self.assert200(response)
relationship = all_models.Relationship.query.get(rel_id)
self.assertIsNone(relationship)
def test_wrong_remove_action(self):
"""Test wrong remove action."""
assessment = factories.AssessmentFactory()
document_id = factories.DocumentFactory().id
response = self.api.put(assessment, {"actions": {"remove_related": []}})
self.assert400(response)
response = self.api.put(assessment, {"actions": {"remove_related": [{}]}})
self.assert400(response)
response = self.api.put(assessment, {"actions": {"remove_related": [
{
"id": document_id,
}
]}})
self.assert400(response)
response = self.api.put(assessment, {"actions": {"remove_related": [
{
"type": "Document",
}
]}})
self.assert400(response)
response = self.api.put(assessment, {"actions": {"remove_related": [
{
"id": None,
"type": "Document",
}
]}})
self.assert400(response)
def test_unmap_nonexistent_url(self):
"""Test unmap nonexistent url action."""
assessment = factories.AssessmentFactory()
response = self.api.put(assessment, {"actions": {"remove_related": [
{
"id": 0,
"type": "Document",
}
]}})
self.assert400(response)
def test_wrong_unmap_url(self):
"""Test wrong unmap url action."""
assessment = factories.AssessmentFactory()
response = self.api.put(assessment, {"actions": {"remove_related": [
{
"type": "Document",
}
]}})
self.assert400(response)
def test_add_evidence(self):
"""Test add evidence action."""
assessment = factories.AssessmentFactory()
response = self.api.put(assessment, {"actions": {"add_related": [
{
"id": None,
"type": "Document",
"document_type": "EVIDENCE",
"title": "evidence1",
"link": "google.com",
}
]}})
self.assert200(response)
rel_id = response.json["assessment"]["related_destinations"][0]["id"]
relationship = all_models.Relationship.query.get(rel_id)
self.assertIsNotNone(relationship)
document = all_models.Document.query.get(relationship.destination_id)
self.assertEqual(document.link, "google.com")
self.assertEqual(document.title, "evidence1")
self.assertEqual(document.document_type, all_models.Document.ATTACHMENT)
def test_wrong_add_evidence(self):
"""Test wrong add evidence action."""
assessment = factories.AssessmentFactory()
proper_values = {
"id": None,
"type": "Document",
"document_type": "EVIDENCE",
"title": "evidence1",
"link": "google.com",
}
wrong_values = copy.copy(proper_values)
del wrong_values["title"]
response = self.api.put(assessment, {"actions": {"add_related":
[wrong_values]}})
self.assert400(response)
wrong_values = copy.copy(proper_values)
del wrong_values["link"]
response = self.api.put(assessment, {"actions": {"add_related":
[wrong_values]}})
self.assert400(response)
wrong_values = copy.copy(proper_values)
wrong_values["document_type"] = "EVDNCE"
response = self.api.put(assessment, {"actions": {"add_related":
[wrong_values]}})
self.assert400(response)
def test_status_change_document(self):
"""Test auto status change after add document action"""
assessment = factories.AssessmentFactory(
status=all_models.Assessment.FINAL_STATE)
response = self.api.put(assessment, {"actions": {"add_related": [
{
"id": None,
"type": "Document",
"link": "google.com",
"title": "google.com",
"document_type": "URL",
}
]}})
self.assert200(response)
self.assertEqual(response.json["assessment"]["status"],
all_models.Assessment.PROGRESS_STATE)
def test_put_without_actions(self):
"""Test assessment put without actions"""
assessment = factories.AssessmentFactory()
response = self.api.put(assessment, {"description": "test"})
self.assert200(response)
def test_document_indexing(self):
"""Test document_indexing"""
assessment = factories.AssessmentFactory()
response = self.api.put(assessment, {"actions": {"add_related": [
{
"id": None,
"type": "Document",
"link": "google.com",
"title": "google.com",
"document_type": "URL",
}
]}})
self.assert200(response)
assessments_by_url = self.simple_query(
"Assessment",
expression=["url", "~", "google.com"]
)
self.assertEqual(len(assessments_by_url), 1)
rel_id = response.json["assessment"]["related_destinations"][0]["id"]
relationship = all_models.Relationship.query.get(rel_id)
response = self.api.put(assessment, {"actions": {"remove_related": [
{
"id": relationship.destination_id,
"type": "Document",
}
]}})
self.assert200(response)
assessments_by_url = self.simple_query(
"Assessment",
expression=["url", "~", "google.com"]
)
self.assertFalse(assessments_by_url)
class TestCommentWithActionMixin(TestCase):
"""Test case for WithAction mixin and Comment actions."""
def setUp(self):
super(TestCommentWithActionMixin, self).setUp()
self.client.get("/login")
self.api = api_helper.Api()
def test_add_comment(self):
"""Test add comment action."""
generator = ObjectGenerator()
_, reader = generator.generate_person(user_role="Reader")
self.api.set_user(reader)
assessment = factories.AssessmentFactory()
context = factories.ContextFactory(related_object=assessment)
assessment.context = context
object_person_rel = factories.RelationshipFactory(
source=assessment,
destination=reader
)
factories.RelationshipAttrFactory(
relationship_id=object_person_rel.id,
attr_name="AssigneeType",
attr_value="Creator,Assessor"
)
response = self.api.put(assessment, {"actions": {"add_related": [
{
"id": None,
"type": "Comment",
"description": "comment",
"custom_attribute_definition_id": None,
}
]}})
self.assert200(response)
rel_id = response.json["assessment"]["related_destinations"][0]["id"]
relationship = all_models.Relationship.query.get(rel_id)
self.assertIsNotNone(relationship)
comment = all_models.Comment.query.get(relationship.destination_id)
self.assertEqual(comment.description, "comment")
self.assertEqual(comment.assignee_type, "Creator,Assessor")
self.assertEqual(comment.context_id, assessment.context_id)
def test_add_custom_comment(self):
"""Test add custom attribute comment action."""
assessment = factories.AssessmentFactory()
ca_def = factories.CustomAttributeDefinitionFactory(
title="def1",
definition_type="assessment",
attribute_type="Dropdown",
multi_choice_options="no,yes",
)
response = self.api.put(assessment, {"actions": {"add_related": [
{
"id": None,
"type": "Comment",
"description": "comment",
"custom_attribute_definition_id": ca_def.id,
}
]}})
self.assert200(response)
rel_id = response.json["assessment"]["related_destinations"][0]["id"]
relationship = all_models.Relationship.query.get(rel_id)
self.assertIsNotNone(relationship)
comment = all_models.Comment.query.get(relationship.destination_id)
self.assertEqual(comment.description, "comment")
self.assertEqual(comment.custom_attribute_definition_id, ca_def.id)
def test_wrong_add_comment(self):
"""Test wrong add comment action."""
assessment = factories.AssessmentFactory()
response = self.api.put(assessment, {"actions": {"add_related": [
{
"id": None,
"type": "Comment",
}
]}})
self.assert400(response)
def test_remove_comment(self):
"""Test remove comment action."""
assessment = factories.AssessmentFactory()
comment = factories.CommentFactory(description="123")
rel_id = factories.RelationshipFactory(source=assessment,
destination=comment).id
response = self.api.put(assessment, {"actions": {"remove_related": [
{
"id": comment.id,
"type": "Comment",
}
]}})
self.assert200(response)
relationship = all_models.Relationship.query.get(rel_id)
self.assertIsNone(relationship)
def test_status_unchanged(self):
"""Test auto status isn't change after add comment action"""
assessment = factories.AssessmentFactory()
comment = factories.CommentFactory()
response = self.api.put(assessment, {"actions": {"add_related": [
{
"id": comment.id,
"type": "Comment",
}
]}})
self.assert200(response)
self.assertEqual(response.json["assessment"]["status"],
all_models.Assessment.START_STATE)
def _create_snapshot():
"""Create snapshot for test"""
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory(audit=audit)
context = factories.ContextFactory(related_object=assessment)
assessment.context = context
factories.RelationshipFactory(source=audit, destination=assessment)
control = factories.ControlFactory(description='control-9')
revision = all_models.Revision.query.filter(
all_models.Revision.resource_id == control.id,
all_models.Revision.resource_type == control.__class__.__name__
).order_by(
all_models.Revision.id.desc()
).first()
snapshot = factories.SnapshotFactory(
parent=audit,
child_id=control.id,
child_type=control.__class__.__name__,
revision_id=revision.id
)
return assessment, snapshot
class TestSnapshotWithActionMixin(TestCase, WithQueryApi):
"""Test case for WithAction mixin and Snapshot actions."""
def setUp(self):
super(TestSnapshotWithActionMixin, self).setUp()
self.client.get("/login")
self.api = api_helper.Api()
def test_add_snapshot(self):
"""Test add snapshot action."""
assessment, snapshot = _create_snapshot()
response = self.api.put(assessment, {"actions": {"add_related": [
{
"id": snapshot.id,
"type": "Snapshot",
}
]}})
self.assert200(response)
rel_id = response.json["assessment"]["related_destinations"][0]["id"]
relationship = all_models.Relationship.query.get(rel_id)
self.assertIsNotNone(relationship)
self.assertEqual(relationship.destination_id, snapshot.id)
self.assertEqual(relationship.destination_type, "Snapshot")
self.assertEqual(relationship.context_id, assessment.context_id)
audits = self.simple_query('Audit',
expression=["description", "~", "'control-9'"])
self.assertFalse(audits)
def test_wrong_add_snapshot(self):
"""Test wrong add snapshot action."""
assessment = factories.AssessmentFactory()
response = self.api.put(assessment, {"actions": {"add_related": [
{
"id": None,
"type": "Snapshot",
}
]}})
self.assert400(response)
def test_remove_snapshot(self):
"""Test remove snapshot action."""
assessment, snapshot = _create_snapshot()
rel_id = factories.RelationshipFactory(source=assessment,
destination=snapshot).id
response = self.api.put(assessment, {"actions": {"remove_related": [
{
"id": snapshot.id,
"type": "Snapshot",
}
]}})
self.assert200(response)
snapshot = all_models.Relationship.query.get(rel_id)
self.assertIsNone(snapshot)
def test_status_change_snapshot(self):
"""Test auto status change after add snapshot action"""
assessment, snapshot = _create_snapshot()
response = self.api.put(assessment, {"actions": {"add_related": [
{
"id": snapshot.id,
"type": "Snapshot",
}
]}})
self.assert200(response)
self.assertEqual(response.json["assessment"]["status"],
all_models.Assessment.PROGRESS_STATE)
class TestMultiplyActions(TestCase, WithQueryApi):
"""Test case for WithAction mixin with multiply actions."""
def setUp(self):
super(TestMultiplyActions, self).setUp()
self.client.get("/login")
self.api = api_helper.Api()
def test_multiply_actions(self):
"""Test multiply actions"""
assessment = factories.AssessmentFactory()
doc_map = factories.DocumentFactory(link="google1.com")
doc_del = factories.DocumentFactory(link="google2.com")
factories.RelationshipFactory(source=assessment,
destination=doc_del)
ca_def = factories.CustomAttributeDefinitionFactory(
title="def1",
definition_type="assessment",
definition_id=assessment.id,
attribute_type="Dropdown",
multi_choice_options="no,yes",
multi_choice_mandatory="0,3"
)
ca_val = factories.CustomAttributeValueFactory(
custom_attribute=ca_def,
attributable=assessment,
attribute_value="no"
)
response = self.api.put(assessment, {
"custom_attribute_values": [
{
"id": ca_val.id,
"custom_attribute_id": ca_def.id,
"attribute_value": "yes",
"type": "CustomAttributeValue",
}],
"actions": {"add_related": [
{
"id": None,
"type": "Document",
"document_type": "EVIDENCE",
"title": "evidence1",
"link": "google3.com",
},
{
"id": doc_map.id,
"type": "Document",
},
{
"id": None,
"type": "Comment",
"description": "comment1",
"custom_attribute_definition_id": ca_def.id,
}
], "remove_related": [
{
"id": doc_del.id,
"type": "Document",
}]}})
self.assert200(response)
preconditions_failed = response.json["assessment"]["preconditions_failed"]
self.assertIs(preconditions_failed, True)
assessment_by_url = self.simple_query(
"Assessment",
expression=["url", "~", "google1.com"]
)
self.assertEqual(len(assessment_by_url), 1)
assessment_by_url = self.simple_query(
"Assessment",
expression=["url", "~", "google2.com"]
)
self.assertFalse(assessment_by_url)
assessment_by_evidence = self.simple_query(
"Assessment",
expression=["evidence", "~", "google3.com"]
)
self.assertEqual(len(assessment_by_evidence), 1)
assessment_by_comment = self.simple_query(
"Assessment",
expression=["comment", "~", "comment1"]
)
self.assertEqual(len(assessment_by_comment), 1)
| [
"alena.sheshko@gmail.com"
] | alena.sheshko@gmail.com |
6ca84c2dba4135fe731d4c342f84ee3cb1aca083 | 3cfc01eab152e5b97554a0965e79fc1221b71321 | /pitches.py | 0248bae1ee649a43672db25dff4671911742178f | [
"MIT"
] | permissive | ZhouLihua/leetcode | 5ebe8bd9dcd04bf2c822cfb3c338d2d25231bba5 | 7a711e450756fb7b5648e938879d690e583f5957 | refs/heads/master | 2021-11-18T17:26:53.492232 | 2021-09-17T07:58:25 | 2021-09-17T07:58:25 | 127,875,412 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | import sys
import math
def total_Hours(speed, pitches):
totalHour = 0
for pitch in pitches:
totalHour += math.ceil(pitch / speed)
return totalHour
if __name__ == "__main__":
try:
line = sys.stdin.readline().strip()
if not line:
raise Exception("negative")
values = map(int, line.split())
if len(values) < 2:
raise Exception("negative")
pitches = values[:-1]
hour = values[-1]
for item in pitches:
if item <= 0:
raise Exception("negative")
if hour < len(pitches):
raise Exception("negative")
total = sum(pitches)
min_speed = math.ceil(total / hour) if total >= hour else 1
hours = sys.maxint
import pdb
pdb.set_trace()
while hours > hour:
hours = total_Hours(min_speed, pitches)
min_speed += 1
print int(min_speed - 1)
except Exception:
print -1
| [
"jerry.zhou@dell,com"
] | jerry.zhou@dell,com |
d9d4fc280b8d97cbb947ddebab7ab59ca5aa1ae2 | 512267fc9295a9296dc9da3f0947e324a2f37984 | /startingout/tests.py | 39beac1031a362881763120ddc9bc09c60cfd163 | [] | no_license | joroson/startingout | b55bd8c45c4b70ae5147f0a98a6adf6888d2ed51 | 92091ae7f73f246c6cc700d5effe4e9da91f8bef | refs/heads/master | 2023-03-02T18:01:54.362286 | 2021-02-01T10:18:10 | 2021-02-01T10:18:10 | 317,985,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | import unittest
from pyramid import testing
class ViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_my_view(self):
from .views.default import my_view
request = testing.DummyRequest()
info = my_view(request)
self.assertEqual(info['project'], 'StartingOut')
class FunctionalTests(unittest.TestCase):
def setUp(self):
from startingout import main
app = main({})
from webtest import TestApp
self.testapp = TestApp(app)
def test_root(self):
res = self.testapp.get('/', status=200)
self.assertTrue(b'Pyramid' in res.body)
| [
"joe@10kinds.tech"
] | joe@10kinds.tech |
7b1992faec0c1b2d12b7ad49f4d454f2fe93c9e7 | ac042704660f07263a9b7918c9d19e8027e2c01b | /qn 13.py | 0a8a84e201e1017ecb24dbbf67cab6fd0d002f36 | [] | no_license | Prashant414/python | 23387f2d205ceb36f141e4b4529ff9c3e80d2679 | f5ff2b280b4bf29df2723b9d1d16690e65aaf62f | refs/heads/main | 2023-03-21T10:23:32.119726 | 2021-03-09T17:59:45 | 2021-03-09T17:59:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | # Print First 10 natural numbers using while
i=0
while i<=10:
print(i)
i=i+1
| [
"prashantshyam09@gmail.com"
] | prashantshyam09@gmail.com |
9d6439a6b2d66e84bc71bc05fe37f2fd23593da6 | fc23456b470ed67b2052faa8d8b40af57243c66e | /core/mysite/submodels/image.py | f8b8dbcf44ff277c61fc933c5b5d144c3864fb99 | [] | no_license | hamedsalim1999/doprax-django-with-docker | 3b0918fe974dbf7ba8a5e5337dbcb1621be44c99 | d0ebe6268bb7d36f39934db21ab45ec479aea51f | refs/heads/master | 2023-07-18T05:15:13.816184 | 2021-09-08T17:39:04 | 2021-09-08T17:39:04 | 404,428,747 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | from django.db import models
from django.core.files import File
import secrets
import os
from django.urls import reverse,reverse_lazy
from django.conf import settings
from .base import Publish,Timestamp
from django.utils.text import slugify
class Image(Timestamp):
title = models.CharField(max_length=128, null=True)
sku = models.CharField(max_length=128,primary_key= True, default = secrets.token_urlsafe(16),editable = False,unique=True)
slug = models.SlugField(max_length=128,db_index=True,unique=True)
description = models.CharField(max_length=512,blank=True, null=True)
image = models.ImageField(max_length=128,upload_to='./info', height_field='height_field', width_field='width_field')
height_field = models.PositiveIntegerField(default='480',blank=True, null=True)
width_field = models.PositiveIntegerField(default='720',blank=True, null=True)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = slugify(f'{self.title}{secrets.token_urlsafe(4)}', allow_unicode=True)
self.sku = secrets.token_urlsafe(16)
super(Image,self).save(*args, **kwargs)
| [
"hamediran1999@gmail.com"
] | hamediran1999@gmail.com |
c1ee39b1b2a7ca3e916a559da292bc53bfdc5b74 | 017f62ebc7357dc665723a5b4fa75294f31fda8f | /lib/jnpr/eznc/resrc/srx/nat/nat_proxy_arp.py | 10fe27280370f10dad027ec9771f769faed67709 | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | cro/py-junos-eznc | c2588d9fde7b65ec523c558d741716f3a19621c7 | 4c111476cc8eb7599462379ddf55743ae30bbf5c | refs/heads/master | 2021-01-20T16:36:20.034788 | 2013-11-19T19:17:32 | 2013-11-19T19:17:32 | 14,535,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,235 | py | # 3rd-party modules
from lxml.builder import E
# module packages
from ...resource import Resource
from .... import jxml as JXML
class NatProxyArp( Resource ):
"""
[edit security nat proxy-arp interface <if_name> address <ip_prefix>]
Resource namevar:
tuple(if_name, ip_prefix)
Description:
This resource allows you to add/remove proxy-arp entries for NAT. At
this time, there are no managed properties, so you can simply add or
remove entries by the name tuple(if_name, ip_prefix)
For example, to select an entry directly:
entry = NatProxyArp(jdev, ('reth0.213','198.18.11.5'))
Or using the bind mechanism:
jdev.bind(parp=NatProxyArp)
entry = jdev.parp[('reth0.213', '198.18.11.5')]
To create it, you need to use the 'touch' option when invoking
write() since there are no properites for proxy-arp entries
if not entry.exists:
entry.write(touch=True)
And to remove the same entry:
entry.delete()
"""
def _xml_at_top(self):
return E.security(E.nat(
E('proxy-arp',
E.interface(E.name( self._name[0] ),
E.address(E.name( self._name[1]))
)
)
))
##### -----------------------------------------------------------------------
##### OVERLOADS
##### -----------------------------------------------------------------------
def rename(self, name):
""" UNSUPPORTED """
raise RuntimeError("Unsupported for Resource: %s" % self.__class__.__name__)
##### -----------------------------------------------------------------------
##### XML read
##### -----------------------------------------------------------------------
def _xml_at_res(self, xml):
return xml.find('.//proxy-arp/interface')
def _xml_to_py(self, as_xml, to_py ):
Resource._r_has_xml_status( as_xml, to_py )
##### -----------------------------------------------------------------------
##### Resource List, Catalog
##### -- only executed by 'manager' resources
##### -----------------------------------------------------------------------
def _r_list(self):
raise RuntimeError("@@@ NEED TO IMPLEMENT!")
def _r_catalog(self):
raise RuntimeError("@@@ NEED TO IMPLEMENT!")
| [
"jschulman@juniper.net"
] | jschulman@juniper.net |
2d43f4dfb1eaf4ad3fad458a1b6c1adda9da703d | e941fed78264cbb32df8909d881c64721f75b295 | /home/urls.py | faf95fcd176062a101b39b0173ae70922090e0a6 | [] | no_license | PelaNisha/third_django-user-auth- | 3f5e7418438cef9416d54de290a75e7632965876 | d6eef69e00909b4489e6258151e9345fce0cf31b | refs/heads/master | 2023-05-27T22:57:43.864612 | 2021-06-05T15:45:12 | 2021-06-05T15:45:12 | 371,736,249 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | from django.contrib import admin
from django.urls import path, include
from home import views
urlpatterns = [
path('', views.index, name = 'home'),
path('login', views.loginUser, name = 'login'),
path('logout', views.logoutuser, name = 'logout'),
] | [
"nishapela777@gmail.com"
] | nishapela777@gmail.com |
de0b7d4aab3481f7eee89ede7eb41a802bda201e | 1ac173361f4de3e0a2fed0b3454d57b6bbf77d0c | /rapi/principal/migrations/0030_auto_20160622_1713.py | 7c2757888796db93a9945d408b45416073603930 | [] | no_license | Alpha004/Raph2 | c75274d826ef2de0ac69d37e771cb11084f1d7ae | c4e0c608ea4eb8f2bcdd2c34ded48340532a05c2 | refs/heads/master | 2021-08-28T19:58:04.125143 | 2016-07-27T05:01:29 | 2016-07-27T05:01:29 | 63,810,009 | 0 | 0 | null | 2021-07-30T01:01:52 | 2016-07-20T19:52:28 | HTML | UTF-8 | Python | false | false | 607 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-06-22 17:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('principal', '0029_auto_20160608_2138'),
]
operations = [
migrations.AlterField(
model_name='atencion',
name='Nombre_U',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"jesusalren93@gmail.com"
] | jesusalren93@gmail.com |
24589718db31ecf9fa0256f8e90ded54357ef870 | 21b4410b96d7f56ce8b27dc1015680e926eb5fce | /ModuloString/aula135.py | 742f365aa3ddafc5bedff1760f647bcb804c7b2f | [
"MIT"
] | permissive | pinheirogus/Curso-Python-Udemy | ddef17ae861279ec8e6cc44183b91bbe11bbd100 | d6d52320426172e924081b9df619490baa8c6016 | refs/heads/main | 2023-07-17T12:01:23.731329 | 2021-09-01T01:49:21 | 2021-09-01T01:49:21 | 401,896,087 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py |
from string import Template
from datetime import datetime
with open('template.html', 'r', encoding='utf8') as html:
#Neste caso, o arquivo está sendo lido e passando uma string para a classe Template
template = Template(html.read())
data_atual = datetime.now().strftime('%d/%m/%Y')
corpo_msg = template.safe_substitute(nome = 'Luiz Otávio', data = data_atual )
print(corpo_msg) | [
"pinheirogustavo@gmail.com"
] | pinheirogustavo@gmail.com |
7d28b9591a02b80200b7b5c4108b1294bb6c007c | 273eab596c6c92e23b41b85b358e8e6deb463511 | /lambda_function.py | 824e62f6ff2502b04c351a0f1d25c741970611dd | [] | no_license | khanarbaj/openCV-app | 5f516b6124d6c3a98ad7caa0699e64f8a3af0075 | 9a85419d1e5e3374a679eaf33554e25eae30679c | refs/heads/master | 2022-12-13T00:45:40.491607 | 2020-09-12T19:02:02 | 2020-09-12T19:02:02 | 295,004,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | from tkinter import *
window = Tk()
window.geometry("500x500")
window.resizable(0, 0)
# Making lambda fuctions
def show1(e): return window.configure(background="red")
def show2(e): return window.configure(background="green")
def show3(e): return window.configure(background="blue")
bt1 = Button(window, text="click", font=("Arial", 20))
bt1.pack()
# Binding the button to perform the cammnd
bt1.bind("<Button-1>", show1) # for left click
bt1.bind("<Button-2>", show2) # for middle click
bt1.bind("<Button-3>", show3) # for right click
window.mainloop()
| [
"khanarbaj@gmail.com"
] | khanarbaj@gmail.com |
1a442203491aa25653aae444c840e22b780f5b94 | 35c98eaa78de7bec7f985d62febb62f17a2177fc | /nlde/engine/eddynetwork.py | eef6a1c1c01f51fac5239869918e7fcaa182346c | [] | no_license | maribelacosta/nlde | a24499e8115c62352a6e16746ef89ca06252ed40 | df6d850b23ba617ae6872798c3165f01de8c5e68 | refs/heads/master | 2022-04-25T17:54:45.593633 | 2020-04-23T10:12:25 | 2020-04-23T10:12:25 | 258,157,557 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,963 | py | """
Created on Mar 23, 2015
@author: Maribel Acosta
"""
from eddyoperator import EddyOperator
from nlde.util.sparqlparser import parse
from nlde.util.explainplan import explain_plan
from nlde.planner.optimizer import IndependentOperator, create_plan
from multiprocessing import Process, Queue
class EddyNetwork(object):
def __init__(self, query, policy, source="", n_eddy=2, explain=False):
self.query = query
self.policy = policy
self.source = source
self.n_eddy = n_eddy
self.explain = explain
self.eofs = 0
self.independent_operators = []
self.join_operators = []
self.eddy_operators = []
self.eddies_queues = []
self.operators_input_queues = {}
self.operators_left_queues = []
self.operators_right_queues = []
self.p_list = Queue()
self.tree = None
self.operators_desc = None
self.sources_desc = None
self.eofs_operators_desc = None
def execute(self, outputqueue):
# Parse SPARQL query.
queryparsed = parse(self.query)
# Create plan.
(self.tree, tree_height, self.operators_desc, self.sources_desc,
plan_order, operators_vars, independent_sources, self.eofs_operators_desc,
operators_sym, operators) = create_plan(queryparsed, self.n_eddy, self.source)
if self.explain:
explain_plan(self.tree)
#print "Plan"
#print str(self.tree)
self.eofs = independent_sources
self.policy.initialize_priorities(plan_order)
# Create eddies queues.
for i in range(0, self.n_eddy+1):
self.eddies_queues.append(Queue())
# Create operators queues (left and right).
for op in operators:
self.operators_input_queues.update({op.id_operator: []})
for i in range(0, op.independent_inputs):
self.operators_input_queues[op.id_operator].append(Queue())
#for i in range(0, len(self.operators_desc)):
#for i in self.operators_desc.keys():
# self.operators_input_queues.update({i: []})
# for j in self.operators_desc[i].keys():
#self.operators_input_queues[i].update(j:{})#append(Queue())
#self.operators_right_queues.append(Queue())
# Create eddy operators and execute them.
for i in range(1, self.n_eddy+1):
eddy = EddyOperator(i, self.policy, self.eddies_queues, self.operators_desc, self.operators_input_queues,
operators_vars, outputqueue, independent_sources, self.eofs_operators_desc,
operators_sym, operators)
p = Process(target=eddy.execute)
p.start()
self.p_list.put(p.pid)
self.tree.execute(self.operators_input_queues, self.eddies_queues, self.p_list, self.operators_desc)
| [
"maribel.acosta@kit.edu"
] | maribel.acosta@kit.edu |
0f3fc8831e53c6a2ac259688095352d63dd2b74e | 3f893fef9b7b22bd6fd801b9bcb956e7912f302c | /aliu/packages.py | 9ff4a1b654df8f45e44eb4ca90b90335ed7d0325 | [
"MIT"
] | permissive | a2liu/pytill | 63eb54a9bf8fb3554c8f5052232662cd58f2e949 | f05587707495471bc7403f9582cc89ad455a6a5f | refs/heads/master | 2020-04-14T19:12:45.197035 | 2019-01-05T01:13:06 | 2019-01-05T01:13:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | import sys
PACKAGE_DIR = '/Users/aliu/code/python/packages/'
def add_local(*packages):
for package in packages:
sys.path.insert(0,PACKAGE_DIR+package)
| [
"albertymliu@gmail.com"
] | albertymliu@gmail.com |
689f7241d4dc56a641bc73e4a10d491e1b16ae55 | a86864b0ca6bc1d4dbdd22c26257340b8131e859 | /forms/contract_award.py | 24bb6668c834f1225f8d923038e604378fd92b82 | [
"MIT"
] | permissive | pudo-attic/ted-xml | 95d00f4f02ce16677da7672d4f40478ef13fac11 | 627c100ba464f574c2c71f7584f05f3aabf480e8 | refs/heads/master | 2021-01-01T05:31:32.156917 | 2013-09-13T13:13:29 | 2013-09-13T13:13:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,149 | py | from pprint import pprint
from parseutil import Extractor
LOOKUP = {
'appeal_body': {
'std': './/PROCEDURES_FOR_APPEAL/APPEAL_PROCEDURE_BODY_RESPONSIBLE//',
'util': './/APPEAL_PROCEDURES/RESPONSIBLE_FOR_APPEAL_PROCEDURES//',
'mil': './/PROCEDURES_FOR_APPEAL/APPEAL_PROCEDURE_BODY_RESPONSIBLE//'
},
'authority': {
'std': './/CONTRACTING_AUTHORITY_INFORMATION_CONTRACT_AWARD/NAME_ADDRESSES_CONTACT_CONTRACT_AWARD//',
'util': './/NAME_ADDRESSES_CONTACT_CONTRACT_AWARD_UTILITIES/CA_CE_CONCESSIONAIRE_PROFILE//',
'mil': './/CONTRACTING_AUTHORITY_INFORMATION_CONTRACT_AWARD_DEFENCE//CA_CE_CONCESSIONAIRE_PROFILE//',
},
'award_dest': {
'std': './/AWARD_OF_CONTRACT',
'util': './FD_CONTRACT_AWARD_UTILITIES/AWARD_CONTRACT_CONTRACT_AWARD_UTILITIES',
'mil': './/AWARD_OF_CONTRACT_DEFENCE'
},
'total_value': {
'std': './/TOTAL_FINAL_VALUE/COSTS_RANGE_AND_CURRENCY_WITH_VAT_RATE',
'util': './/OBJECT_CONTRACT_AWARD_UTILITIES/COSTS_RANGE_AND_CURRENCY_WITH_VAT_RATE',
'mil': './/TOTAL_FINAL_VALUE/COSTS_RANGE_AND_CURRENCY_WITH_VAT_RATE'
},
'award_description': {
'std': './/DESCRIPTION_AWARD_NOTICE_INFORMATION',
'util': './/OBJECT_CONTRACT_AWARD_UTILITIES/DESCRIPTION_CONTRACT_AWARD_UTILITIES',
'mil': './/DESCRIPTION_AWARD_NOTICE_INFORMATION_DEFENCE'
},
'short_desc': {
'std': './/DESCRIPTION_AWARD_NOTICE_INFORMATION/SHORT_CONTRACT_DESCRIPTION/P',
'util': './/DESCRIPTION_CONTRACT_AWARD_UTILITIES/SHORT_DESCRIPTION/P',
'mil': './/DESCRIPTION_AWARD_NOTICE_INFORMATION_DEFENCE/SHORT_CONTRACT_DESCRIPTION/P'
},
'reference': {
'std': './/ADMINISTRATIVE_INFORMATION_CONTRACT_AWARD/FILE_REFERENCE_NUMBER/P',
'util': './/ADMINISTRATIVE_INFO_CONTRACT_AWARD_UTILITIES/REFERENCE_NUMBER_ATTRIBUTED/P',
'mil': './/ADMINISTRATIVE_INFORMATION_CONTRACT_AWARD_DEFENCE/FILE_REFERENCE_NUMBER/P'
},
'additional_info': {
'std': './/COMPLEMENTARY_INFORMATION_CONTRACT_AWARD/ADDITIONAL_INFORMATION/P',
'util': './/COMPLEMENTARY_INFORMATION_CONTRACT_AWARD_UTILITIES/ADDITIONAL_INFORMATION/P',
'mil': './/COMPLEMENTARY_INFORMATION_CONTRACT_AWARD/ADDITIONAL_INFORMATION/P'
},
'electronic_auction': {
'std': './/F03_IS_ELECTRONIC_AUCTION_USABLE',
'util': './/F06_IS_ELECTRONIC_AUCTION_USABLE',
'mil': './/F18_IS_ELECTRONIC_AUCTION_USABLE'
},
'activity_type': {
'std': './/TYPE_AND_ACTIVITIES_AND_PURCHASING_ON_BEHALF//TYPE_OF_ACTIVITY',
'util': './/NOPATH',
'mil': './/TYPE_AND_ACTIVITIES_OR_CONTRACTING_ENTITY_AND_PURCHASING_ON_BEHALF//TYPE_OF_ACTIVITY'
},
'activity_type_other': {
'std': './/TYPE_AND_ACTIVITIES_AND_PURCHASING_ON_BEHALF//TYPE_OF_ACTIVITY_OTHER',
'util': './/NOPATH',
'mil': './/TYPE_AND_ACTIVITIES_OR_CONTRACTING_ENTITY_AND_PURCHASING_ON_BEHALF//TYPE_OF_ACTIVITY_OTHER'
},
'authority_type': {
'std': './/TYPE_AND_ACTIVITIES_AND_PURCHASING_ON_BEHALF//TYPE_OF_CONTRACTING_AUTHORITY',
'util': './/NOPATH',
'mil': './/TYPE_AND_ACTIVITIES_OR_CONTRACTING_ENTITY_AND_PURCHASING_ON_BEHALF//TYPE_OF_CONTRACTING_AUTHORITY'
},
'authority_type_other': {
'std': './/TYPE_AND_ACTIVITIES_AND_PURCHASING_ON_BEHALF//TYPE_AND_ACTIVITIES/TYPE_OF_CONTRACTING_AUTHORITY_OTHER',
'util': './/NOPATH',
'mil': './/TYPE_AND_ACTIVITIES_OR_CONTRACTING_ENTITY_AND_PURCHASING_ON_BEHALF/TYPE_AND_ACTIVITIES/TYPE_OF_CONTRACTING_AUTHORITY_OTHER'
},
'operator': {
'std': './ECONOMIC_OPERATOR_NAME_ADDRESS//',
'util': './/',
'mil': './ECONOMIC_OPERATOR_NAME_ADDRESS//'
},
}
def _lookup(s, key):
return LOOKUP[key][s]
def extract_address(ext, prefix, query):
if query is None:
return {}
data = {
prefix + '_official_name': ext.text(query+'OFFICIALNAME'),
prefix + '_address': ext.text(query+'ADDRESS'),
prefix + '_town': ext.text(query+'TOWN'),
prefix + '_postal_code': ext.text(query+'POSTAL_CODE'),
prefix + '_country': ext.attr(query+'COUNTRY', 'VALUE'),
prefix + '_attention': ext.text(query+'ATTENTION'),
prefix + '_phone': ext.text(query+'PHONE'),
prefix + '_email': ext.text(query+'EMAIL') or ext.text(query+'E_MAIL'),
prefix + '_fax': ext.text(query+'FAX'),
prefix + '_url': ext.text(query+'URL_GENERAL') or ext.text(query+'URL'),
prefix + '_url_buyer': ext.text(query+'URL_BUYER'),
prefix + '_url_info': ext.text(query+'URL_INFORMATION'),
prefix + '_url_participate': ext.text(query+'URL_PARTICIPATE')
}
for k, v in data.items():
if v is None:
del data[k]
return data
def extract_values(ext, prefix, query):
if query is None:
return {}
data = {
prefix + '_currency': ext.attr(query, 'CURRENCY'),
prefix + '_cost': ext.attr(query + '/VALUE_COST', 'FMTVAL'),
prefix + '_low': ext.attr(query + '//LOW_VALUE', 'FMTVAL'),
prefix + '_high': ext.attr(query + '//HIGH_VALUE', 'FMTVAL'),
prefix + '_months': ext.attr(query + '//NUMBER_MONTHS', 'FMTVAL'),
prefix + '_years': ext.attr(query + '//NUMBER_YEARS', 'FMTVAL'),
prefix + '_vat_rate': ext.attr(query + '//VAT_PRCT', 'FMTVAL')
}
if ext.el.find(query + '/INCLUDING_VAT') is not None:
data[prefix + '_vat_included'] = True
if ext.el.find(query + '/EXCLUDING_VAT') is not None:
data[prefix + '_vat_included'] = False
for k, v in data.items():
if v is None:
del data[k]
return data
def parse_award(root, lookup):
ext = Extractor(root)
contract = {
'contract_number': ext.text('./CONTRACT_NUMBER') or ext.text('.//CONTRACT_NO'),
'lot_number': ext.text('./LOT_NUMBER') or ext.text('.//LOT_NUMBER'),
'contract_title': ext.text('./CONTRACT_TITLE/P') or ext.text('./CONTRACT_TITLE') or ext.text('.//TITLE_CONTRACT') or ext.text('.//TITLE_CONTRACT/P'),
'contract_award_day': ext.text('.//CONTRACT_AWARD_DATE/DAY') or ext.text('.//DATE_OF_CONTRACT_AWARD/DAY'),
'contract_award_month': ext.text('.//CONTRACT_AWARD_DATE/MONTH') or ext.text('.//DATE_OF_CONTRACT_AWARD/MONTH'),
'contract_award_year': ext.text('.//CONTRACT_AWARD_DATE/YEAR') or ext.text('.//DATE_OF_CONTRACT_AWARD/YEAR'),
'offers_received_num': ext.text('.//OFFERS_RECEIVED_NUMBER'),
'offers_received_meaning': ext.text('.//OFFERS_RECEIVED_NUMBER_MEANING')
}
contract.update(extract_values(ext, 'contract_value', './/COSTS_RANGE_AND_CURRENCY_WITH_VAT_RATE'))
contract.update(extract_values(ext, 'initial_value', './/INITIAL_ESTIMATED_TOTAL_VALUE_CONTRACT'))
contract.update(extract_address(ext, 'operator', lookup('operator')))
#from lxml import etree
#print etree.tostring(root, pretty_print=True)
#pprint(contract)
#ext.audit()
return contract
def parse_form(root):
form_type = 'std'
if 'DEFENCE' in root.tag:
form_type = 'mil'
elif 'UTILITIES' in root.tag:
form_type = 'util'
lookup = lambda k: _lookup(form_type, k)
ext = Extractor(root)
form = {
'file_reference': ext.text(lookup('reference')),
'relates_to_eu_project': ext.text('.//RELATES_TO_EU_PROJECT_YES/P'),
'notice_dispatch_day': ext.text('.//NOTICE_DISPATCH_DATE/DAY'),
'notice_dispatch_month': ext.text('.//NOTICE_DISPATCH_DATE/MONTH'),
'notice_dispatch_year': ext.text('.//NOTICE_DISPATCH_DATE/YEAR'),
'appeal_procedure': ext.text('.//PROCEDURES_FOR_APPEAL//LODGING_OF_APPEALS_PRECISION/P'),
'location': ext.text(lookup('award_description')+'/LOCATION_NUTS/LOCATION/P') or ext.text(lookup('award_description')+'/LOCATION_NUTS/LOCATION'),
'location_nuts': ext.attr(lookup('award_description')+'/LOCATION_NUTS/NUTS', 'CODE'),
'type_contract': ext.attr(lookup('award_description')+'//TYPE_CONTRACT', 'VALUE'),
'gpa_covered': ext.attr(lookup('award_description')+'/CONTRACT_COVERED_GPA', 'VALUE'),
'electronic_auction': ext.attr(lookup('electronic_auction'), 'VALUE'),
'cpv_code': ext.attr(lookup('award_description')+'/CPV/CPV_MAIN/CPV_CODE', 'CODE'),
'reason_lawful': ext.html('.//REASON_CONTRACT_LAWFUL'),
#'cpv_additional_code': ext.attr('.//DESCRIPTION_AWARD_NOTICE_INFORMATION/CPV/CPV_ADDITIONAL/CPV_CODE', 'CODE'),
'authority_type': ext.text(lookup('authority_type'), 'VALUE'),
'authority_type_other': ext.text(lookup('authority_type_other'), 'VALUE'),
'activity_type': ext.text(lookup('activity_type')),
'activity_type_other': ext.text(lookup('activity_type_other')),
'activity_contractor': ext.attr('.//ACTIVITIES_OF_CONTRACTING_ENTITY/ACTIVITY_OF_CONTRACTING_ENTITY', 'VALUE'),
'concessionaire_email': ext.text('.//CA_CE_CONCESSIONAIRE_PROFILE/E_MAILS/E_MAIL'),
'concessionaire_nationalid': ext.text('.//CA_CE_CONCESSIONAIRE_PROFILE/ORGANISATION/NATIONALID'),
'concessionaire_contact': ext.text('.//CA_CE_CONCESSIONAIRE_PROFILE/CONTACT_POINT'),
'contract_award_title': ext.text(lookup('award_description')+'/TITLE_CONTRACT/P'),
'contract_description': ext.html(lookup('short_desc')),
'additional_information': ext.html(lookup('additional_info')),
'contract_type_supply': ext.attr('.//TYPE_CONTRACT_LOCATION_W_PUB/TYPE_SUPPLIES_CONTRACT', 'VALUE')
}
form.update(extract_address(ext, 'authority', lookup('authority')))
form.update(extract_address(ext, 'appeal_body', lookup('appeal_body')))
form.update(extract_address(ext, 'on_behalf', './/TYPE_AND_ACTIVITIES_AND_PURCHASING_ON_BEHALF/PURCHASING_ON_BEHALF//'))
#form.update(extract_address(ext, 'lodging_info', './/PROCEDURES_FOR_APPEAL/LODGING_INFORMATION_FOR_SERVICE//'))
ext.ignore('.//PROCEDURES_FOR_APPEAL/MEDIATION_PROCEDURE_BODY_RESPONSIBLE/*')
ext.ignore('.//PROCEDURES_FOR_APPEAL/LODGING_INFORMATION_FOR_SERVICE/*')
ext.ignore('./FD_CONTRACT_AWARD_DEFENCE/COMPLEMENTARY_INFORMATION_CONTRACT_AWARD/PROCEDURES_FOR_APPEAL/LODGING_INFORMATION_FOR_SERVICE/*')
ext.ignore('./FD_CONTRACT_AWARD_UTILITIES/CONTRACTING_ENTITY_CONTRACT_AWARD_UTILITIES/NAME_ADDRESSES_CONTACT_CONTRACT_AWARD_UTILITIES/INTERNET_ADDRESSES_CONTRACT_AWARD_UTILITIES/URL_GENERAL')
ext.ignore('./FD_CONTRACT_AWARD_UTILITIES/COMPLEMENTARY_INFORMATION_CONTRACT_AWARD_UTILITIES/APPEAL_PROCEDURES/SERVICE_FROM_INFORMATION/*')
ext.ignore('./FD_CONTRACT_AWARD_UTILITIES/PROCEDURES_CONTRACT_AWARD_UTILITIES/ADMINISTRATIVE_INFO_CONTRACT_AWARD_UTILITIES/PREVIOUS_PUBLICATION_INFORMATION_NOTICE_F6/*')
# Make awards criteria their own table.
ext.ignore('./FD_CONTRACT_AWARD/PROCEDURE_DEFINITION_CONTRACT_AWARD_NOTICE/AWARD_CRITERIA_CONTRACT_AWARD_NOTICE_INFORMATION/AWARD_CRITERIA_DETAIL_F03/*')
ext.ignore('./FD_CONTRACT_AWARD_UTILITIES/PROCEDURES_CONTRACT_AWARD_UTILITIES/F06_AWARD_CRITERIA_CONTRACT_UTILITIES_INFORMATION/*')
ext.ignore('./FD_CONTRACT_AWARD_DEFENCE/PROCEDURE_DEFINITION_CONTRACT_AWARD_NOTICE_DEFENCE/AWARD_CRITERIA_CONTRACT_AWARD_NOTICE_INFORMATION_DEFENCE/AWARD_CRITERIA_DETAIL_F18/*')
ext.ignore('.FD_CONTRACT_AWARD_UTILITIES/PROCEDURES_CONTRACT_AWARD_UTILITIES/F06_AWARD_CRITERIA_CONTRACT_UTILITIES_INFORMATION/PRICE_AWARD_CRITERIA/*')
ext.ignore('./FD_CONTRACT_AWARD_DEFENCE/PROCEDURE_DEFINITION_CONTRACT_AWARD_NOTICE_DEFENCE/ADMINISTRATIVE_INFORMATION_CONTRACT_AWARD_DEFENCE/PREVIOUS_PUBLICATION_INFORMATION_NOTICE_F18/*')
ext.ignore('./FD_CONTRACT_AWARD/AWARD_OF_CONTRACT/*')
ext.ignore('./FD_CONTRACT_AWARD_DEFENCE/AWARD_OF_CONTRACT_DEFENCE/*')
ext.ignore('./FD_CONTRACT_AWARD_UTILITIES/AWARD_CONTRACT_CONTRACT_AWARD_UTILITIES/*')
ext.ignore('./FD_CONTRACT_AWARD_UTILITIES/OBJECT_CONTRACT_AWARD_UTILITIES/DESCRIPTION_CONTRACT_AWARD_UTILITIES/SHORT_DESCRIPTION/*')
ext.ignore('./FD_CONTRACT_AWARD/PROCEDURE_DEFINITION_CONTRACT_AWARD_NOTICE/ADMINISTRATIVE_INFORMATION_CONTRACT_AWARD/PREVIOUS_PUBLICATION_INFORMATION_NOTICE_F3/*')
ext.text('.//TYPE_CONTRACT_LOCATION_W_PUB/SERVICE_CATEGORY_PUB')
ext.text('.//CPV/CPV_ADDITIONAL/CPV_CODE')
form.update(extract_values(ext, 'total_value', lookup('total_value')))
#from lxml import etree
#el = root.find('./FD_CONTRACT_AWARD/OBJECT_CONTRACT_INFORMATION_CONTRACT_AWARD_NOTICE/TOTAL_FINAL_VALUE')
#if el:
# print etree.tostring(el, pretty_print=True)
# #pprint(form)
#ext.audit()
contracts = []
for award in root.findall(lookup('award_dest')):
contract = parse_award(award, lookup)
contract.update(form)
contracts.append(contract)
#pprint(contract)
return contracts
| [
"friedrich@pudo.org"
] | friedrich@pudo.org |
eac0b5d45b5a0ac966f08a4b8b83a3c1d6fdf69c | dea690c107a041ebae0d30ba5ae143c39661303d | /Pong/Bola.py | e2edcfce4df8d6f1caa89a73f45f0b475e4ee7c7 | [] | no_license | esponja92/pyolito | 73b04f27089b16cc50baa260ec52170c55450b65 | 142fd9b3370ca254ff6cb49652543b73303c7a70 | refs/heads/main | 2023-03-13T12:43:05.417946 | 2021-03-28T02:27:30 | 2021-03-28T02:27:30 | 352,139,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,039 | py | import pygame
from ObjetoAtivo import ObjetoAtivo
class Bola(ObjetoAtivo):
def __init__(self, centro_x, centro_y, raio, cor):
self.largura = 15
self.altura = 15
self.velocidade = 5
self.inicial_x = centro_x
self.inicial_y = centro_y
self.cor = cor
self.raio = raio
self.direcao_vertical = 1
self.direcao_horizontal = 0
super().__init__(self.inicial_x, self.inicial_y,
self.largura, self.altura, self.velocidade, self.cor)
def desenha(self, janela):
pygame.draw.circle(janela, self.cor, (self.x, self.y), self.raio)
def reinicia(self):
super().reinicia()
self.direcao_vertical = 1
self.direcao_horizontal = 0
def atualiza_posicao(self, listaObjetosAtivos, janela, keys):
if(self.colliderect(listaObjetosAtivos['jogador'])):
self.direcao_vertical = -1
if(abs(self.get_posicao_central_x() - listaObjetosAtivos['jogador'].get_posicao_central_x()) <= 7.5):
self.direcao_horizontal = 0
elif(self.get_posicao_central_x() > listaObjetosAtivos['jogador'].get_posicao_central_x()):
self.direcao_horizontal = 1
elif(self.get_posicao_central_x() < listaObjetosAtivos['jogador'].get_posicao_central_x()):
self.direcao_horizontal = -1
elif(self.colliderect(listaObjetosAtivos['inimigo'])):
self.direcao_vertical = 1
if(self.colidiu_tela_direita(janela)):
self.direcao_horizontal = -1
if(self.colidiu_tela_esquerda(janela)):
self.direcao_horizontal = 1
#atualiza a posicao da bola
if(self.direcao_vertical == -1):
self.move_cima()
if(self.direcao_vertical == 1):
self.move_baixo()
if(self.direcao_horizontal == 1):
self.move_direita()
if(self.direcao_horizontal == -1):
self.move_esquerda()
| [
"hugodantas@tic.ufrj.br"
] | hugodantas@tic.ufrj.br |
903984715773801ca2e11ea4b30d93214d206d25 | 30d9898c1b4a2be116e8c764ae91fb5ff4b216f5 | /project_python基础/4.18.py | 0c2bf7fa8ef697d14e8da6126a06f633fc696337 | [] | no_license | CgangerCG/PythonLearning | 402a267c84d73f903e8290ea3a20208fd9fdf183 | 67616bfd2aa709b6344e6b7d0124f1e8d710246d | refs/heads/master | 2020-03-28T20:16:20.273278 | 2018-09-17T01:56:32 | 2018-09-17T01:56:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | import random
print(random.sample(range(100),10))
print(random.randrange(1,100,2))
print(random.choice("abcdefghij"))
print(random.choice(['apple','pear','peach','orange'])) | [
"34260109+chengang0727@users.noreply.github.com"
] | 34260109+chengang0727@users.noreply.github.com |
1e274d74ad26e7dd53ac6e762c283f06a179f1b5 | f15d80adc1f2b48d571811c0e6ec67ba63dac836 | /camera-test.py | 4fcafb352bb246453ce908ade5b81752aa3fda58 | [] | no_license | rozymahsun/facenetattendance | 51c672e39c53e38a4b3ad7716822b88853c8455e | 4f6dd57a464c6ff0999cd9026a10e231dfedead3 | refs/heads/master | 2020-03-18T23:58:43.294627 | 2018-05-31T06:13:18 | 2018-05-31T06:13:18 | 135,444,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | import numpy as np
import cv2
import datetime
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
#cv2.VideoCapture.set(CV_CAP_PROP_FRAME_WIDTH, 640)
#cv2.VideoCapture.set(CV_CAP_PROP_FRAME_HEIGHT, 360)
#cap.SetCaptureProperty(CV_CAP_PROP_FRAME_WIDTH,
#cap.set(cv2.CV_CAP_PROP_FRAME_WIDTH, 640);
#cap.set(cv2.CV_CAP_PROP_FRAME_HEIGHT, 360);
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
now = datetime.datetime.today().strftime('%Y%m%d_%H%M%S')
print (now)
# Display the resulting frame
cv2.imshow('Mobile Legend Beng-beng',frame)
#cv2.imshow('gray',gray)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() | [
"rozymahsun@gmail.com"
] | rozymahsun@gmail.com |
0170c0c289bd5cba1649c1763329778c4f8675d5 | fc36b33714a04eb3f916fb6d58f1935482ae1dee | /blog/models.py | 5591dbc876494f8f4f8674ec2fd394a46728819f | [] | no_license | williamleeIao/my-first-blog | ec03645fb5d58a08561362b6b8f4239165feee04 | 91e4401cc844301d09106d6985aa9bf76bf60497 | refs/heads/master | 2020-03-14T15:32:32.525071 | 2018-05-01T02:56:19 | 2018-05-01T02:56:19 | 131,669,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | from django.db import models
from django.utils import timezone
# Create your models here.
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| [
"wilee_6@hotmail.com"
] | wilee_6@hotmail.com |
60d963412a48fad3faf24a928539525426ead706 | 41d5dbdba29881755a44ee4e4fc15afd1f70e83b | /events/models.py | 4baceaf5ada3724abb836862c652100e5daacb5c | [] | no_license | aniket1004/adminpanel | 9493500856f5175317d74e6c6322085ac9f5101f | fdb5e40c55294762eb2959834c5297d307eb654a | refs/heads/master | 2023-04-23T14:38:18.730038 | 2021-05-03T12:19:08 | 2021-05-03T12:19:08 | 363,909,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | from django.db import models
# Create your models here.
class Event(models.Model):
name = models.CharField(max_length=100,blank=False,)
slug = models.CharField(max_length=100,blank=False,)
date = models.DateField(auto_now=False, auto_now_add=False)
| [
"aniketdhole1004@gmail.com"
] | aniketdhole1004@gmail.com |
8f26c0707c6e96062c78e160ebb53b168b45685b | b18ff1d2a88cdad6d8ca73a8e6c34943f7bee055 | /toolcall/models/__init__.py | b223e50a69b3e81be3d40cd155596ad4cbf3849e | [
"MIT"
] | permissive | thebjorn/toolcall | 9c812d608a67990dfb04b4e8bc1ebfcd4e7440c3 | 2c1597c8224958b4751cfb09f7a1b4439ca6df09 | refs/heads/master | 2021-06-13T21:33:12.495795 | 2019-08-31T10:50:55 | 2019-08-31T10:50:55 | 147,824,176 | 0 | 0 | MIT | 2021-06-10T20:46:47 | 2018-09-07T13:02:31 | HTML | UTF-8 | Python | false | false | 53 | py | # -*- coding: utf-8 -*-
from .tool_models import *
| [
"bp@datakortet.no"
] | bp@datakortet.no |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.