text stringlengths 8 6.05M |
|---|
class Chromatique():
def __init__(self):
self.gen1 = [
{'name': 'Bulbizarre','numero': '001'},
{'name': 'Herbizarre','numero': '002'},
{'name': 'Florizarre','numero': '003'},
{'name': 'Salameche', 'numero': '004'},
{'name': 'Reptincel', 'numero': '005'},
{'name': 'Dracaufeu', 'numero': '006'},
{'name': 'Carapuce', 'numero': '007'},
{'name': 'Carabaffe', 'numero': '008'},
{'name': 'Tortank', 'numero': '009'},
{'name': 'Chenipan', 'numero': '010'},
{'name': 'Chrysacier', 'numero': '011'},
{'name': 'Papilusion', 'numero': '012'},
{'name': 'Roucool', 'numero': '016'},
{'name': 'Roucoups', 'numero': '017'},
{'name': 'Roucarnarge', 'numero': '018'},
{'name': 'Rattata', 'numero': '019'},
{'name': 'Rattatac', 'numero': '020'},
{'name': 'Pikachu', 'numero': '025'},
{'name': 'Raichu', 'numero': '026'},
{'name': 'Sabelette', 'numero': '027'},
{'name': 'Sablaireau', 'numero': '028'},
{'name': 'NidoranF', 'numero': '029'},
{'name': 'Nidorina', 'numero': '030'},
{'name': 'Nidoqueen', 'numero': '031'},
{'name': 'Rondoudou', 'numero': '039'},
{'name': 'Grodoudou', 'numero': '040'},
{'name': 'Taupiqueur', 'numero': '050'},
{'name': 'Triopikeur', 'numero': '051'},
{'name': 'Psykokwak', 'numero': '054'},
{'name': 'Akwakwak', 'numero': '055'},
{'name': 'Ferosinge', 'numero': '056'},
{'name': 'Colosinge', 'numero': '057'},
{'name': 'Caninos', 'numero': '058'},
{'name': 'Arcanin', 'numero': '059'},
{'name': 'Machoc', 'numero': '066'},
{'name': 'Machopeur', 'numero': '067'},
{'name': 'Mackogneur', 'numero': '068'},
{'name': 'Racaillou', 'numero': '074'},
{'name': 'Gravalanch', 'numero': '075'},
{'name': 'Grolem', 'numero': '076'},
{'name': 'Ponyta', 'numero': '077'},
{'name': 'Galopa', 'numero': '078'},
{'name': 'Magneti', 'numero': '081'},
{'name': 'Magneton', 'numero': '082'},
{"name": 'Tadmorv',"numero": '088'},
{'name': 'Grotadmorv','numero': '089'},
{'name': 'Kokiyas','numero': '090'},
{'name': 'Crustabri', 'numero': '091'},
{'name': 'Fantominus', 'numero': '092'},
{'name': 'Spectrum', 'numero': '093'},
{'name': 'Ectoplasma', 'numero': '094'},
{'name': 'Soporifik', 'numero': '096'},
{'name': 'Hypnomade', 'numero': '097'},
{'name': 'Osselait', 'numero': '104'},
{'name': 'Ossatueur', 'numero': '105'},
{'name': 'Insécateur', 'numero': '123'},
{'name': 'Magmar', 'numero': '126'},
{'name': 'Scarabrute', 'numero': '127'},
{'name': 'Magicarpe', 'numero': '129'},
{'name': 'Léviator', 'numero': '130'},
{'name': 'Lokhlass', 'numero': '131'},
{'name': 'Evoli', 'numero': '133'},
{'name': 'Aquali', 'numero': '134'},
{'name': 'Voltali', 'numero': '135'},
{'name': 'Pyroli', 'numero': '136'},
{'name': 'Amonita', 'numero': '138'},
{'name': 'Amonistar', 'numero': '139'},
{'name': 'Kabuto', 'numero': '140'},
{'name': 'Kabutops', 'numero': '141'},
{'name': 'Ptéra', 'numero': '142'},
{'name': 'Artikodin', 'numero': '144'},
{'name': 'Electhor', 'numero': '145'},
{'name': 'Sulfura', 'numero': '146'},
{'name': 'Minidraco', 'numero': '147'},
{'name': 'Draco', 'numero': '148'},
{'name': 'Dracolosse', 'numero': '149'}
]
self.gen2 = [
{'name': 'Germignon', 'numero': '152'},
{'name': 'Macronium', 'numero': '153'},
{'name': 'Meganium', 'numero': '154'},
{'name': 'Héricendre', 'numero': '155'},
{'name': 'Feurisson', 'numero': '156'},
{'name': 'Typhlosion', 'numero': '157'},
{'name': 'Kainminus', 'numero': '158'},
{'name': 'Crocodil', 'numero': '159'},
{'name': 'Aligatueur', 'numero': '160'},
{'name': 'Pichu', 'numero': '172'},
{'name': 'Toudoudou', 'numero': '174'},
{'name': 'Togepi', 'numero': '175'},
{'name': 'Togetic', 'numero': '176'},
{'name': 'Natu', 'numero': '177'},
{'name': 'Xatu', 'numero': '178'},
{'name': 'Wattouat', 'numero': '179'},
{'name': 'Lainergie', 'numero': '180'},
{'name': 'Pharamp', 'numero': '181'},
{'name': 'Marill', 'numero': '183'},
{'name': 'Azumarill', 'numero': '184'},
{'name': 'Tournegrin', 'numero': '191'},
{'name': 'Héliatronc', 'numero': '192'},
{'name': 'Mentali', 'numero': '196'},
{'name': 'Noctali', 'numero': '197'},
{'name': 'Cornebre', 'numero': '198'},
{'name': 'Feuforeve', 'numero': '200'},
{'name': 'Pomdepik', 'numero': '204'},
{'name': 'Foretress', 'numero': '205'},
{'name': 'Snubbull', 'numero': '209'},
{'name': 'Granbull', 'numero': '210'},
{'name': 'Cizayox', 'numero': '212'},
{'name': 'Caratroc', 'numero': '213'},
{'name': 'Marcacrin', 'numero': '220'},
{'name': 'Cochignon', 'numero': '221'},
{'name': 'Cadoizo', 'numero': '225'},
{'name': 'Malosse', 'numero': '228'},
{'name': 'Demolosse', 'numero': '229'},
{'name': 'Magby', 'numero': '240'},
{'name': 'Embrylex', 'numero': '246'},
{'name': 'Ymphect', 'numero': '247'},
{'name': 'Tyranocif', 'numero': '248'},
{'name': 'Lugia', 'numero': '249'},
{'name': 'Ho-oh', 'numero': '250'}
]
self.gen3 = [
{'name': 'Arcko','numero': '252'},
{'name': 'Massko','numero': '253'},
{'name': 'Jungko','numero': '254'},
{'name': 'Medhyèna', 'numero': '261'},
{'name': 'Grahyèna', 'numero': '262'},
{'name': 'Zigzaton', 'numero': '263'},
{'name': 'Linéon', 'numero': '264'},
{'name': 'Nénupiot', 'numero': '270'},
{'name': 'Lombre', 'numero': '271'},
{'name': 'Ludicolo', 'numero': '272'},
{'name': 'Nirondelle', 'numero': '276'},
{'name': 'Hélédelle', 'numero': '277'},
{'name': 'Goélise', 'numero': '278'},
{'name': 'Bekipan', 'numero': '279'},
{'name': 'Makuhita', 'numero': '296'},
{'name': 'Hariyama', 'numero': '297'},
{'name': 'Azurill', 'numero': '298'},
{'name': 'Ténéfix', 'numero': '302'},
{'name': 'Mysdibule', 'numero': '302'},
{'name': 'Galekid', 'numero': '303'},
{'name': 'Galegon', 'numero': '304'},
{'name': 'Galeking', 'numero': '305'},
{'name': 'Méditikka', 'numero': '306'},
{'name': 'Charmina', 'numero': '307'},
{'name': 'Posipi', 'numero': '311'},
{'name': 'Négapi', 'numero': '312'},
{'name': 'Rosélia', 'numero': '315'},
{'name': 'Wailmer', 'numero': '320'},
{'name': 'Wailord', 'numero': '321'},
{'name': 'Spoink', 'numero': '325'},
{'name': 'Groret', 'numero': '326'},
{'name': 'Tylton', 'numero': '333'},
{'name': 'Altaria', 'numero': '334'},
{'name': 'Séléroc', 'numero': '337'},
{'name': 'Solaroc', 'numero': '338'},
{'name': 'Barpau', 'numero': '349'},
{'name': 'Milobellus', 'numero': '350'},
{'name': 'Morphéo', 'numero': '351'},
{'name': 'Polichombr', 'numero': '353'},
{'name': 'Branette', 'numero': '354'},
{'name': 'Skelénox', 'numero': '355'},
{'name': 'Téraclope', 'numero': '356'},
{'name': 'Absol', 'numero': '359'},
{'name': 'Stalgamin', 'numero': '361'},
{"name": 'Oniglali',"numero": '362'},
{'name': 'Obalie','numero': '363'},
{'name': 'Phogleur','numero': '364'},
{'name': 'Kaimorse', 'numero': '365'},
{'name': 'Coquiperl', 'numero': '366'},
{'name': 'Serpang', 'numero': '367'},
{'name': 'Rosabyss', 'numero': '368'},
{'name': 'Lovdisc', 'numero': '370'},
{'name': 'Draby', 'numero': '371'},
{'name': 'Drackhaus', 'numero': '372'},
{'name': 'Drattak', 'numero': '373'},
{'name': 'Terhal', 'numero': '374'},
{'name': 'Métang', 'numero': '375'},
{'name': 'Métalosse', 'numero': '376'},
{'name': 'Latias', 'numero': '380'},
{'name': 'Latios', 'numero': '381'},
{'name': 'Kyogre', 'numero': '382'},
{'name': 'Groudon', 'numero': '383'}
]
self.gen4 = [
{'name': 'Lixy','numero': '403'},
{'name': 'Luxio','numero': '404'},
{'name': 'Luxray','numero': '405'},
{'name': 'Rozbouton', 'numero': '406'},
{'name': 'Roserade', 'numero': '407'},
{'name': 'Apitrini', 'numero': '415'},
{'name': 'Apireine', 'numero': '416'},
{'name': 'Baudrive', 'numero': '425'},
{'name': 'Grodrive', 'numero': '426'},
{'name': 'Laporeille', 'numero': '427'},
{'name': 'Lockpin', 'numero': '428'},
{'name': 'Magirêve', 'numero': '429'},
{'name': 'Corboss', 'numero': '430'},
{'name': 'Magnézone', 'numero': '462'},
{'name': 'Maganon', 'numero': '467'},
{'name': 'Phyllali', 'numero': '470'},
{'name': 'Givrali', 'numero': '471'},
{'name': 'Mammochon', 'numero': '473'},
{'name': 'Noctunoir', 'numero': '477'},
{'name': 'Momartik', 'numero': '478'}
]
def get_gen(self, value):
value = int(value)
if value == 1:
G1 = []
for g1 in self.gen1:
G1.append(g1.get('name'))
return ', '.join(G1)
if value == 2:
G2 = []
for g2 in self.gen2:
G2.append(g2.get('name'))
return ', '.join(G2)
if value == 3:
G3 = []
for g3 in self.gen3:
G3.append(g3.get('name'))
return ', '.join(G3)
if value == 4:
G4 = []
for g4 in self.gen4:
G4.append(g4.get('name'))
return ', '.join(G4)
def get_chroma(self, value):
for g1 in self.gen1:
if g1.get('name').title() == value.title():
return g1
for g2 in self.gen2:
if g2.get('name').title() == value.title():
return g2
for g3 in self.gen3:
if g3.get('name').title() == value.title():
return g3
for g4 in self.gen4:
if g4.get('name').title() == value.title():
return g4 |
# -*- coding: UTF-8 -*-
import platform
import argparse
import csv
import time
from enum import Enum
from io import BytesIO
import sys
import os
import logging
import re
from pathlib import Path
import requests
import pdfkit
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import (
NoSuchElementException,
UnexpectedAlertPresentException
)
from selenium.common.exceptions import TimeoutException
from PIL import Image
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_URL = 'http://www.tjmg.jus.br/portal-tjmg/'
URL_POST_CAPTCHA = 'http://2captcha.com/in.php'
URL_GET_CAPTCHA = 'http://2captcha.com/res.php'
KEY = 'c3b78102059c7d2009ea1591019068c6'
COUNT_NUMBERS = 0
CURRENT_NUMBERS = 0
FAILURES = 0
SUCCESS = 0
logging.basicConfig(
filename='scanning.log',
filemode='w',
format='%(name)s - %(levelname)s - %(message)s'
)
if platform.system() == "Windows":
FIREFOX_DRIVER_PATH = os.path.join(BASE_DIR, "firefox", "windows", "geckodriver.exe")
elif platform.system() == "Linux":
FIREFOX_DRIVER_PATH = os.path.join(BASE_DIR, "firefox", "linux", "geckodriver")
else:
FIREFOX_DRIVER_PATH = os.path.join(BASE_DIR, "firefox", "mac", "geckodriver")
class TjmgAutomation(object):
def __init__(self, download_folder, headless=False):
self.download_folder = os.path.join(BASE_DIR, download_folder)
if not os.path.exists(self.download_folder):
os.makedirs(self.download_folder)
self.headless = headless
self.driver = self.session()
def session(self):
options = webdriver.FirefoxOptions()
if self.headless is True:
options.add_argument('-headless')
profile = webdriver.FirefoxProfile()
profile.set_preference("dom.webnotifications.enabled", False)
profile.set_preference("browser.download.folderList", 2)
profile.set_preference("browser.download.dir", self.download_folder)
profile.set_preference("browser.download.manager.alertOnEXEOpen", False)
profile.set_preference("browser.helperApps.neverAsk.saveToDisk",
"application/msword, application/csv, "
"application/ris, text/csv, image/png, application/pdf, "
"text/html, text/plain, application/zip, application/x-zip, "
"application/x-zip-compressed, application/download, application/octet-stream")
profile.set_preference("browser.download.manager.showWhenStarting", False)
profile.set_preference("browser.download.manager.focusWhenStarting", False)
profile.set_preference("browser.helperApps.alwaysAsk.force", False)
profile.set_preference("browser.download.manager.alertOnEXEOpen", False)
profile.set_preference("browser.download.manager.closeWhenDone", True)
profile.set_preference("browser.download.manager.showAlertOnComplete", False)
profile.set_preference("browser.download.manager.useWindow", False)
profile.set_preference("services.sync.prefs.sync.browser.download.manager.showWhenStarting", False)
profile.set_preference("pdfjs.disabled", True)
self.driver = webdriver.Firefox(
service_log_path='/dev/null',
options=options,
firefox_profile=profile,
executable_path=FIREFOX_DRIVER_PATH
)
self.driver.set_page_load_timeout(30)
self.driver.set_window_size(1360, 900)
return self.driver
def rename(self, file_name, number, word):
ext = Path(file_name).suffix
os.rename(self.download_folder + '/' + file_name,
self.download_folder + '/' + number + '_' + word + ext)
def search_process(self, number, search_word=None, work_folder=''):
try:
self.driver.get(BASE_URL)
except TimeoutException:
logging.warning(
'{} - Timeout in loading website'.format(
number
)
)
return None
try:
self.driver.find_elements_by_xpath("//section[@class='tabs-of-process']"
"//input[@id='txtProcesso']")[0].send_keys(number)
self.driver.find_elements_by_xpath("//section[@class='tabs-of-process']"
"//form[@class='first-instance-form']"
"//button[@type='submit']")[0].click()
time.sleep(3)
except NoSuchElementException:
logging.warning(
'{} - Webdriver Element not found.'.format(
number
)
)
return None
except TimeoutException:
logging.warning(
'{} - Timeout in loading website'.format(
number
)
)
return None
try:
captcha_pass = False
for i in range(3):
if self.resolve_captcha() is True:
captcha_pass = True
break
except UnexpectedAlertPresentException:
logging.warning(
'{} - Download do arquivo não permitido'.format(
number
)
)
return None
if not captcha_pass:
logging.warning(
'{} - Captcha pass failed.'.format(
number
)
)
return None
try:
element = WebDriverWait(self.driver, 30).until(EC.presence_of_element_located(
(By.XPATH, "//*[contains(text(), ' Andamentos')]")))
element.click()
except:
logging.warning(
'{} - Arquivo não existe.'.format(
number
)
)
return None
all_tr_items = self.driver.find_elements_by_xpath("//table[@class='corpo']/tbody/tr[contains(@class, 'linha')]")
record_number = len(all_tr_items)
all_files_downloaded = False
for word in search_word:
file_downloaded = False
for i in range(0, record_number):
try:
td_elems = self.driver.find_elements_by_xpath("//table[@class='corpo']/tbody/tr[contains(@class, 'linha')]")[i].find_elements_by_xpath("td")
item_name = td_elems[1].text.strip()
except:
continue
if word in item_name:
try:
if len(td_elems[0].find_elements_by_xpath(".//a")) > 0:
download_btn = td_elems[0].find_elements_by_xpath(".//a")[0]
else:
continue
download_btn.click()
doc_id = download_btn.get_attribute("href")
doc_id = re.search(r"javascript:mostrarOcultarPanel\((.*)?\)", doc_id).group(1)[1:][:-1]
if len(self.driver.find_elements_by_xpath("//table[@id='painelMov" + doc_id + "']//a")) > 0:
download_btn = self.driver.find_elements_by_xpath("//table[@id='painelMov" + doc_id + "']//a")[0]
else:
continue
file_name = download_btn.text
download_btn.click()
time.sleep(3)
except:
continue
my_file = Path(self.download_folder + '/' + file_name)
if my_file.is_file():
try:
self.rename(file_name, number, item_name)
except:
continue
else:
try:
self.driver.get(download_btn.get_attribute('href'))
except:
continue
webpage = self.driver.page_source
try:
self.generate_pdf(content=webpage, name_file=file_name, work_folder=work_folder)
except Exception as e:
continue
self.driver.execute_script("window.history.go(-1)")
time.sleep(2)
try:
self.rename(file_name + '.pdf', number, item_name)
except:
continue
file_downloaded = True
all_files_downloaded = True
break
if not file_downloaded:
logging.warning(
'{} - {} - Arquivo não existe.'.format(
number,
word
)
)
if not all_files_downloaded:
return None
return True
def resolve_captcha(self):
try:
self.driver.find_element_by_id('captcha_image')
except NoSuchElementException:
return True
try:
size_image = 1360, 900
element = self.driver.find_element_by_id('captcha_image')
location = element.location
size = element.size
png = self.driver.get_screenshot_as_png()
im = Image.open(BytesIO(png))
im.thumbnail(size_image, Image.ANTIALIAS)
left = location['x']
top = location['y']
right = location['x'] + size['width']
bottom = location['y'] + size['height']
im = im.crop((left, top, right, bottom))
im.save('screenshot.png')
files = {'file': open('screenshot.png', 'rb')}
data = {'key': KEY}
response = requests.post(
URL_POST_CAPTCHA,
files=files,
data=data,
timeout=15
)
if response.ok:
time.sleep(5)
id_message = response.text.split('|')[-1]
resolved_captcha = requests.get(
'{}?key={}&action=get&id={}'.format(
URL_GET_CAPTCHA,
KEY,
id_message
),
timeout=15
)
message = resolved_captcha.text.split('|')[-1]
self.driver.find_element_by_id('captcha_text').send_keys(message)
return True
except:
return False
def generate_pdf(self, content, name_file, work_folder):
html = '''
<!DOCTYPE HTML>
<html>
<head>
<meta charset="utf-8">
</head>
<body>
{content}
</body>
</html>
'''.format(content=content)
options = {
'quiet': ''
}
pdfkit.from_string(
input=html,
output_path='{}/{}.pdf'.format(work_folder, name_file),
options=options)
def csv_parsing(self, csv_file, csv_words, work_folder=''):
global COUNT_NUMBERS
with open(csv_file, newline='') as f1:
reader1 = csv.DictReader(f1)
COUNT_NUMBERS = len([_ for _ in reader1])
with open(csv_file, newline='') as f2:
reader2 = csv.DictReader(f2)
for row in reader2:
# try:
result = self.search_process(
number=row['Processo Nº'],
search_word=csv_words,
work_folder=work_folder
)
self.progress_bar(result)
# except Exception as e:
# pass
# logging.warning(
# '{} - Download do arquivo não permitido'.format(
# row['processo']
# )
# )
@staticmethod
def progress_bar(result):
global COUNT_NUMBERS
global CURRENT_NUMBERS
global FAILURES
global SUCCESS
CURRENT_NUMBERS += 1
if result is None:
FAILURES += 1
else:
SUCCESS += 1
sys.stdout.write("\r" + str(
'Progress Downloading Numbers: {}/{} ({} succeeded, {} failed)'.format(
CURRENT_NUMBERS,
COUNT_NUMBERS,
SUCCESS,
FAILURES
)
))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='portal tjmg automation.')
parser.add_argument(
'-csv_numbers',
dest='csv_numbers',
type=str,
help='Input CSV numbers'
)
parser.add_argument(
'-csv_words',
dest='csv_words',
type=str,
help='Input CSV words'
)
parser.add_argument(
'-download_folder',
dest='download_folder',
type=str,
help='Folder where will be stored pdf files',
default=''
)
parser.add_argument(
'-number',
dest='number',
type=str,
help='Number of search pdf',
action='store',
nargs='*',
)
args = parser.parse_args()
ja = TjmgAutomation(args.download_folder, headless=True)
search_words = []
if args.download_folder:
if not os.path.exists(args.download_folder):
os.makedirs(args.download_folder)
if args.csv_words:
with open(args.csv_words, newline='') as f:
search_words = [i.replace('\n', '') for i in f.readlines()]
if args.csv_numbers and args.csv_words:
ja.csv_parsing(
args.csv_numbers,
search_words,
work_folder=args.download_folder
)
ja.driver.quit()
sys.exit()
elif args.number and args.csv_words:
COUNT_NUMBERS = len(args.number)
for num in args.number:
result = ja.search_process(num, search_words, args.download_folder)
ja.progress_bar(result)
if args.download_folder:
sys.stdout.write(
"\nFiles stored in folder: {}\n".format(args.download_folder)
)
ja.driver.quit()
|
import nox
@nox.session(python=["3.8", "3.7"])
def tests(session):
"""Run the test suite."""
env = {"VIRTUAL_ENV": session.virtualenv.location}
session.run("poetry", "install", external=True, env=env)
session.run("pytest", "--cov", *session.posargs)
|
'''
Eric Nguyen
Github: Nooj45
CS 1656
Professor Alexandros Labrinidis
Assignment 1
'''
import sys
import requests as req
import json
import math
from math import cos, asin, sqrt
def total_bikes():
sumBikes = 0
#looking through each dict in the list stationData and summing up num_bikes_available
for numBikes in stationData:
count = numBikes['num_bikes_available']
sumBikes += count
return sumBikes
# same as total_bikes except with num_docks_available
def total_docks():
sumDocks = 0
for numDocks in stationData:
count = numDocks['num_docks_available']
sumDocks += count
return sumDocks
# look through stationData and find the correct stationId given by param
# once found calculate percentage (docks/(docks + bikes)) * 100 and return that value
def percent_avail(stationId):
percentage = 0
for isStation in stationData:
if isStation['station_id'] == stationId:
percentage = (isStation['num_docks_available'])/(isStation['num_docks_available'] + isStation['num_bikes_available'])
percentage *= 100
break
return math.floor(percentage)
def closest_stations(lat, lon):
# dictionary of stations to hold (stationIds: distance from given params) (key: values)
stationDict = {}
for i in range(len(stationInfo)):
isClosest = distance(lat, lon, stationInfo[i]['lat'], stationInfo[i]['lon'])
stationDict[stationInfo[i]['station_id']] = isClosest
# sorted stationDict so that the top 3 are at the beginning
# this returns a list of tuples sorted by the second element in the tuple (distance)
sortedDict = sorted(stationDict.items(), key = lambda i: i[1])
#returning list of strings w/ top 3 ids and names
topList = [sortedDict[0][0], sortedDict[1][0], sortedDict[2][0]]
index = 0
'''
topList isn't in order when looping to search for name, we could've
skipped it already, so keeping it in a while to keep restarting
until we find it
'''
while True:
for x in stationInfo:
if x['station_id'] == topList[index]:
topList.append(x['name'])
index += 1
if index == 3:
break
return topList
def closest_bike(lat, lon):
# getting all data with available bikes
filteredStationData = []
for i in range(len(stationData)):
if stationData[i]['num_bikes_available'] > 0:
filteredStationData.append(stationData[i])
filteredStationInfo = []
i = 0
for stations in stationInfo:
if stations['station_id'] == filteredStationData[i]['station_id']:
filteredStationInfo.append(stations)
i += 1
if i == len(filteredStationData):
break
closestAvailBike = {}
for i in range(len(filteredStationInfo)):
isClosest = distance(lat, lon, filteredStationInfo[i]['lat'],filteredStationInfo[i]['lon'])
closestAvailBike[filteredStationInfo[i]['station_id']] = isClosest
sortedBike = sorted(closestAvailBike.items(), key = lambda i: i[1])
returnedBike = []
for i in range(len(filteredStationInfo)):
if filteredStationInfo[i]['station_id'] == sortedBike[0][0]:
returnedBike.append(filteredStationInfo[i]['station_id'])
returnedBike.append(filteredStationInfo[i]['name'])
break
return returnedBike
# distance function given to us
def distance(lat1, lon1, lat2, lon2):
p = 0.017453292519943295
a = 0.5 - cos((lat2-lat1)*p)/2 + cos(lat1*p)*cos(lat2*p) * (1-cos((lon2-lon1)*p)) / 2
return 12742 * asin(sqrt(a))
def printOutput(command, param, outputValue):
print('Command={0}\nParameters={1}'.format(command, param))
print('Output=', end = '')
if command == 'closest_stations':
print('\n' + outputValue)
elif command == 'percent_avail':
print('{0}%'.format(outputValue))
else:
print(outputValue)
return
# getting URLs into string format
baseURL = sys.argv[1]
station_infoURL = baseURL + 'station_information.json'
station_statusURL = baseURL + 'station_status.json'
# request to api to get information
data_request = req.get(station_statusURL)
data_request2 = req.get(station_infoURL)
# 200 = success so load contents as json format
if data_request.status_code == 200 & data_request.status_code == 200:
jsonData = json.loads(data_request.content)
jsonData2 = json.loads(data_request2.content)
else: #if failed then exit program
print("Error with request to API! Exiting...")
exit()
# setting values into variables to use
# stationData for total_bikes, total_docks, & percent_avail
# stationInfo is for closest_stations & closest_bike
stationData = jsonData['data']['stations']
stationInfo = jsonData2['data']['stations']
# checking command line arguments to decide which func. to run
if len(sys.argv) == 3: # total_bikes() and total_docks() only have 3 arg. in command line
if sys.argv[2] == 'total_bikes':
totalBikes = total_bikes()
printOutput(sys.argv[2], '', totalBikes)
elif sys.argv[2] == 'total_docks':
totalDocks = total_docks()
printOutput(sys.argv[2], '', totalDocks)
else:
print('Invalid command-line arguments for total_bikes and total_docks')
elif len(sys.argv) == 4: # percent_avail() takes 1 command line argument (station_id)
if sys.argv[2] == 'percent_avail':
percentage = percent_avail(sys.argv[3])
printOutput(sys.argv[2], sys.argv[3], percentage)
else:
print('Invalid command-line arguments for percent_avail')
elif len(sys.argv) == 5: # closest_stations() and closest_bike() take 2 command line arguments (coordinates)
if sys.argv[2] == 'closest_stations':
top3 = closest_stations(float(sys.argv[3]), float(sys.argv[4]))
top3Str = '{0}, {3}\n{1}, {4}\n{2}, {5}'.format(top3[0],top3[1],top3[2],top3[3],top3[4],top3[5])
combinedParam = sys.argv[3] + ' ' + sys.argv[4]
printOutput(sys.argv[2], combinedParam, top3Str)
elif sys.argv[2] == 'closest_bike':
closestBike = closest_bike(float(sys.argv[3]), float(sys.argv[4]))
bikeStr = '{0}, {1}'.format(closestBike[0], closestBike[1])
combinedParam = sys.argv[3] + ' ' + sys.argv[4]
printOutput(sys.argv[2], combinedParam, bikeStr)
else:
print('Invalid command-line arguments for closest_stations and closest_bike')
else:
print("Invalid number of arguments")
|
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^refer/','py_django_test.refer_test.views.refer')
# Examples:
# url(r'^$', 'py_django_test.views.home', name='home'),
# url(r'^py_django_test/', include('py_django_test.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
|
#!/usr/bin/env python3
# vim: set ai et ts=4 sw=4:
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime as dt
import csv
data_names = ['cafe', 'pharmacy', 'fuel', 'bank', 'waste_disposal',
'atm', 'bench', 'parking', 'restaurant',
'place_of_worship']
data_values = [9124, 8652, 7592, 7515, 7041, 6487, 6374, 6277,
5092, 3629]
dpi = 80
fig = plt.figure(dpi = dpi, figsize = (512 / dpi, 384 / dpi) )
mpl.rcParams.update({'font.size': 10})
plt.title('OpenStreetMap Point Types')
ax = plt.axes()
ax.yaxis.grid(True, zorder = 1)
xs = range(len(data_names))
plt.bar([x + 0.05 for x in xs], [ d * 0.9 for d in data_values],
width = 0.2, color = 'red', alpha = 0.7, label = '2016',
zorder = 2)
plt.bar([x + 0.3 for x in xs], data_values,
width = 0.2, color = 'blue', alpha = 0.7, label = '2017',
zorder = 2)
plt.xticks(xs, data_names)
fig.autofmt_xdate(rotation = 25)
plt.legend(loc='upper right')
fig.savefig('bars.png')
|
import random
import os
import urllib
if not os.path.exists('training'):
os.makedirs('training')
if not os.path.exists('test'):
os.makedirs('test')
#training data
for i in range(10):
#tiling_resolution = random.randint(0, 11)
tiling_resolution = 11
column_num = 320
row_num = 640
#column_num = random.randint(200, 400)
#row_num = random.randint(400, 800)
url = str("https://gibs.earthdata.nasa.gov/wmts/epsg4326/best/Landsat_WELD_NDVI_Global_Monthly/default/2010-04-01/31.25m/"
+ str(tiling_resolution) + "/" + str(column_num) + "/" + str(row_num) + ".jpg")
urllib.urlretrieve(url, str(i) + ".jpg")
os.rename("/Users/ryancho/Documents/Github/systemsCNN/"+str(i)+".jpg", "/Users/ryancho/Documents/Github/systemsCNN/training/"+str(i)+".jpg")
or i in range(10):
#tiling_resolution = random.randint(0, 11)
tiling_resolution = 11
column_num = 320
row_num = 640
#column_num = random.randint(200, 400)
#row_num = random.randint(400, 800)
url = str("https://gibs.earthdata.nasa.gov/wmts/epsg4326/best/Landsat_WELD_NDVI_Global_Monthly/default/2010-04-01/31.25m/"
+ str(tiling_resolution) + "/" + str(column_num) + "/" + str(row_num) + ".jpg")
urllib.urlretrieve(url, str(i) + ".jpg")
os.rename("/Users/ryancho/Documents/Github/systemsCNN/"+str(i)+".jpg", "/Users/ryancho/Documents/Github/systemsCNN/test/"+str(i)+".jpg")
|
#!/usr/bin/python
#qplot -x -3d -s 'set xlabel "x";set ylabel "y";set view equal xy' outcmaes_obj.dat w l outcmaes_res.dat ps 3 data/res000{0,1,2,3,5,6}.dat -showerr
import cma
import numpy as np
def fobj1(x,f_none=None):
assert len(x)==2
if (x[0]-0.5)**2+(x[1]+0.5)**2<0.2: return f_none
return 3.0*(x[0]-1.2)**2 + 2.0*(x[1]+2.0)**2
def frange(xmin,xmax,num_div):
return [xmin+(xmax-xmin)*x/float(num_div) for x in range(num_div+1)]
#fobj= cma.fcts.rosen
fobj= fobj1
options = {'CMA_diagonal':1, 'verb_time':0}
options['bounds']= [[-3.0,-3.0],[3.0,3.0]]
options['tolfun']= 1.0e-4 # 1.0e-4
#options['verb_log']= False
#options['scaling_of_variables']= np.array([0.5,1.0])
options['scaling_of_variables']= np.array([0.00001,1.0])
options['popsize']= 200
#typical_x= [0.0,0.0]
#options['typical_x']= np.array(typical_x)
scale0= 0.5
#parameters0= [0.0,0.0]
#parameters0= [1.19,-1.99]
parameters0= [1.2,0.0]
es= cma.CMAEvolutionStrategy(parameters0, scale0, options)
#solutions= es.ask()
#solutions= [np.array([ 1.29323333]), np.array([ 1.33494294]), np.array([ 1.2478004]), np.array([ 1.34619473])]
#scores= [fobj(x) for x in solutions]
#es.tell(solutions,scores)
print 'es.result():',es.result()
count= 0
while not es.stop():
solutions, scores = [], []
#while len(solutions) < es.popsize+3: #This is OK
while len(solutions) < es.popsize:
#curr_fit = None
#while curr_fit in (None, np.NaN):
x = es.ask(1)[0]
#curr_fit = cma.fcts.somenan(x, cma.fcts.elli) # might return np.NaN
f= fobj(x)
if f is not None:
solutions.append(x)
scores.append(f)
es.tell(solutions, scores)
es.disp()
#print 'es.result():',es.result()
#print solutions
#if count%5==0:
#print '[%i]'%count, ' '.join(map(str,solutions[0]))
fp= file('data/res%04i.dat'%(count),'w')
count+=1
for x in solutions:
fp.write('%s %f\n' % (' '.join(map(str,x)),fobj(x,-10)))
fp.close()
res= es.result()
print 'best solutions = ', res[0]
print 'best solutions fitness = %f' % (res[1])
print res
fp= file('outcmaes_obj.dat','w')
for x1 in frange(-4.0,4.0,100):
for x2 in frange(-4.0,4.0,100):
x= np.array([x1,x2])
fp.write('%s %f\n' % (' '.join(map(str,x)),fobj(x,-10)))
fp.write('\n')
fp.close()
fp= file('outcmaes_res.dat','w')
#for x in res[0]:
x= res[0]
fp.write('%s %f\n' % (' '.join(map(str,x)),fobj(x,-10)))
fp.close()
cma.plot();
print 'press a key to exit > ',
raw_input()
#cma.savefig('outcmaesgraph')
|
#!/usr/bin/env python
# encoding: utf-8
"""
test_orig_dmarket.py
Created by Jakub Konka on 2011-05-09.
Copyright (c) 2011 University of Strathclyde. All rights reserved.
"""
from __future__ import division
import sys
import os
import numpy as np
import scipy.integrate as integral
import scipy.stats.mstats as mstats
import scikits.statsmodels as sm
import matplotlib.pyplot as plt
from operator import itemgetter
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']))
rc('text', usetex=True)
def simulate(N, pmin, pmax):
'''This function simulates one stage of the auctioning game among
the network operators.
Keyword args:
N -- number of bidders
pmin -- minimum price/cost of the service
pmax -- maximum price/cost of the service
Returns: intersection (if exists) of the winning bidders for price weight->1
'''
# Service agent weights
w_range = 1000
w = np.linspace(0, 1, w_range)
# Calculate prices
max_n = 100 # maximum number of contracts that can be supported by the bidder
beta = 1 # parameter characterising price evolution
r = 75 # number of contract tenders already underway minus the current tender
costs = [np.random.uniform(pmin, pmax) for i in range(N)] # costs drawn from P~U(pmin,pmax)
rep = [np.random.uniform(0, 1) for i in range(N)] # reputation drawn from X~U(0,1)
prices = map(lambda x: x + (np.exp(((r-1)/(max_n-1))**beta) - 1) / (np.exp(beta) - 1) * (pmax-x), costs) # pricing vector
# Calculate the winning bids
bids = map(lambda x,y: w*x + (1-w)*y, prices, rep) # bids
winners = [min(enumerate([bids[n][i] for n in range(N)]), key=itemgetter(1))[0] for i in range(w_range)] # winners
# Find the intersection (if exists)
# 1. Winner for w=1
winner_w_1 = winners[-1]
# 2. Descend through the winners list, looking for changes in the winners
count = 0
for winner in reversed(winners):
if winner_w_1 != winner: break
else: count += 1
return w[w_range - count]
def run(n, N, pmin, pmax):
'''This function executes n iterations of the digital marketplace
auctioning game.
Keyword args:
n -- number of iterations
N -- number of bidders
pmin -- minimum price/cost of the service
pmax -- maximum price/cost of the service
'''
# Run the simulation for n times
# intersections = [simulate(N, pmin, pmax) for i in range(n)]
intersections = []
no_intersection = 0
for i in range(n):
sim = simulate(N, pmin, pmax)
while(sim == 0.0):
no_intersection += 1
sim = simulate(N, pmin, pmax)
intersections.append(sim)
# Compute the mean, std and median of the distribution
avg_intersect = np.mean(intersections)
std_intersect = np.std(intersections)
med_intersect = np.median(intersections)
# Compute the probability of an intersection occurring
prob_intersect = 1 - no_intersection/(len(intersections)+no_intersection)
print("Probability of an intersection occurring: {0}".format(prob_intersect))
# Plot the time series
plt.figure()
plt.plot(np.linspace(1, n, n), intersections, 'b*')
plt.xlabel(r"Iterations, $n$")
plt.ylabel(r"Intersections, $w$")
plt.grid()
plt.savefig("time_series.pdf")
# Plot the histogram
fig = plt.figure()
ax = fig.add_subplot(111)
h = 2*(mstats.idealfourths(intersections)[1]-mstats.idealfourths(intersections)[0])*n**(-1/3)
ax.hist(intersections, (max(intersections)-min(intersections))/h, normed=1, facecolor='green')
plt.xlabel(r"Intersections, $w$")
plt.ylabel(r"Empirical PDF")
plt.text(0.65, 1.12, \
"Mean: {0}\nMedian: {1}\nStd: {2}".format(avg_intersect, med_intersect, std_intersect), \
fontsize=13, \
horizontalalignment='left', \
verticalalignment='top', \
transform=ax.transAxes)
plt.grid()
plt.savefig("hist.pdf")
# Plot the empirical CDF
plt.figure()
ecdf = sm.tools.ECDF(intersections)
x_axis = np.linspace(min(intersections), max(intersections))
plt.step(x_axis, ecdf(x_axis))
plt.xlabel(r"Intersections, $w$")
plt.ylabel(r"Empirical CDF")
plt.grid()
plt.savefig("ecdf.pdf")
if __name__ == '__main__':
run(10000, 3, 0, 1)
|
try:
import tkinter as tk
from tkinter import messagebox
except:
import Tkinter as tk
import tkMessageBox as messagebox
import ttk
import time
from scraper.scraper_1 import Drug_Recalls
color = ["#329fea", "#cc93e8"]
def Drug_GUI(frame):
parse_ctrl = Drug_Recalls()
parse_ctrl.parse()
labelframe = tk.LabelFrame(frame, text="Drug Store")
labelframe.configure(bg="#4f617b", fg="#e26306", font=('courier', 15, 'bold'),
relief="sunken", labelanchor="n")
# labelframe.grid(row=i, sticky='WE', padx=5, pady=15, ipadx=5, ipady=5)
labelframe.pack()
i = 0
label_list(labelframe, "Date", "Brand Name", "Production Description", "Reason/Problem", "Company", i)
i = 1
for line in parse_ctrl.total_data:
label_list(labelframe, line['date'], line['brand'], line['description'], line['problem'],
line['company'], i)
i += 1
print('Drug GUI is made!')
def label_list(labelframe, date, brand, desc, problem, company, ind):
frame = tk.Frame(labelframe)
frame.configure(background=color[ind % 2])
frame.pack(padx=5, pady=5, ipadx=5, ipady=5)
''' Date '''
labelVariable_date = tk.StringVar()
label_date = tk.Label(frame, textvariable=labelVariable_date,
width = 10, wraplength=70,
anchor="nw", justify="left",
fg="black", bg=color[ind % 2])
label_date.grid(column=0, row=ind, columnspan = 1, sticky="W",
padx=3, pady=3, ipadx=3, ipady=3)
labelVariable_date.set(date)
''' Brand '''
labelVariable_brand = tk.StringVar()
label_brand = tk.Label(frame, textvariable=labelVariable_brand,
width = 10, wraplength=70,
anchor="nw", justify="left",
fg="black", bg=color[ind % 2])
label_brand.grid(column=2, row=ind, columnspan = 1, sticky="W",
padx=3, pady=3, ipadx=3, ipady=3)
labelVariable_brand.set(brand)
''' Description '''
labelVariable_desc = tk.StringVar()
label_desc = tk.Label(frame, textvariable=labelVariable_desc,
width = 50, wraplength=350,
anchor="nw", justify="left",
fg="black", bg=color[ind % 2])
label_desc.grid(column=4, row=ind, columnspan = 10, sticky="W",
padx=3, pady=3, ipadx=3, ipady=3)
labelVariable_desc.set(desc)
''' Problem '''
labelVariable_problem = tk.StringVar()
label_problem = tk.Label(frame, textvariable=labelVariable_problem,
width = 40, wraplength=280,
anchor="nw", justify="left",
fg="black", bg=color[ind % 2])
label_problem.grid(column=15, row=ind, columnspan = 7, sticky="W",
padx=3, pady=3, ipadx=3, ipady=3)
labelVariable_problem.set(problem)
''' Company '''
labelVariable_company = tk.StringVar()
label_company = tk.Label(frame, textvariable=labelVariable_company,
width = 10, wraplength=70,
anchor="nw", justify="left",
fg="black", bg=color[ind % 2])
label_company.grid(column=23, row=ind, columnspan = 2, sticky="W",
padx=3, pady=3, ipadx=3, ipady=3)
labelVariable_company.set(company)
|
# Generated by Django 2.1.4 on 2019-01-09 18:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('services', '0002_auto_20190109_1816'),
('psychologues', '0008_psychologuesservicesofferts'),
]
operations = [
migrations.RemoveField(
model_name='psychologuesservicesofferts',
name='specialite',
),
migrations.AddField(
model_name='psychologuesservicesofferts',
name='service',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='services.Service'),
preserve_default=False,
),
]
|
def text():
return "kokoko" |
'''
Created on Jul 20, 2016
@author: Dayo
'''
from django.test import TestCase
from django.contrib.auth.models import User
from datetime import datetime
from ..tasks import process_onetime_event, process_private_anniversary, process_public_anniversary
from core.models import Event, Contact, MessageTemplate, KITUser, SMTPSetting
class PrivateAnniversaryTests(TestCase):
@classmethod
def setUpTestData(cls):
# create due private event
user = User.objects.create_user('dayo', 'dayo@windom.biz', 'xHD192zaq')
parent = User.objects.create_user('mradmin', 'dedayoa@gmail.com', 'password14')
kituser = KITUser.objects.create(
user = user,
dob = '20-07-1990',
timezone = 'Africa/Lagos',
parent = parent,
phone_number = '+2348160478727',
industry = 'OTHER',
address_1 = '6, Yomi Ogunkoya',
city_town = 'Ologuneru',
state = 'OYO',
is_admin = False,
)
contact = Contact.objects.create(
salutation = 'mr',
first_name = 'Adedayo',
last_name = 'Ayeni',
email = 'dayo@windom.biz',
phone = '+2348028443225',
kit_user = kituser
)
smtp = SMTPSetting.objects.create(
description = 'Default',
from_user = 'dedayoa@gmail.com',
smtp_server = 'rsb18.rhostbh.com',
smtp_port = 465,
connection_security = 'SSLTLS',
smtp_user = 'dayo@windom.biz',
smtp_password = 'Password2014',
active = True,
kit_admin = parent
)
message_template = MessageTemplate.objects.create(
title = 'Test Template Title',
email_template = 'Email Template',
sms_template = 'SMS Template',
sms_sender = 'Test SMS',
smtp_setting = smtp,
send_sms = True,
send_email = True,
)
cls.dpe = Event.objects.create(
contact = contact,
date = datetime.utcnow(),
message = message_template,
title = 'My Birthday',
last_run = '20-07-2015'
)
def test_process_private_anniversary(self):
pass |
from __future__ import print_function, division
from math import exp
from keras import initializers, regularizers, activations, constraints
from keras.engine import Layer, InputSpec
from keras.models import Model, model_from_json
from keras.layers import Input, Embedding, TimeDistributed, Dense, Dropout, Reshape, Concatenate, LSTM, Conv2D, MaxPooling2D, BatchNormalization
from keras.optimizers import SGD
from keras import backend as K
class Highway(Layer):
"""Densely connected highway network.
Highway layers are a natural extension of LSTMs to feedforward networks.
# Arguments
init: name of initialization function for the weights of the layer
(see [initializations](../initializations.md)),
or alternatively, Theano function to use for weights
initialization. This parameter is only relevant
if you don't pass a `weights` argument.
activation: name of activation function to use
(see [activations](../activations.md)),
or alternatively, elementwise Theano function.
If you don't specify anything, no activation is applied
(ie. "linear" activation: a(x) = x).
weights: list of Numpy arrays to set as initial weights.
The list should have 2 elements, of shape `(input_dim, output_dim)`
and (output_dim,) for weights and biases respectively.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the main weights matrix.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
activity_regularizer: instance of [ActivityRegularizer](../regularizers.md),
applied to the network output.
W_constraint: instance of the [constraints](../constraints.md) module
(eg. maxnorm, nonneg), applied to the main weights matrix.
b_constraint: instance of the [constraints](../constraints.md) module,
applied to the bias.
bias: whether to include a bias
(i.e. make the layer affine rather than linear).
input_dim: dimensionality of the input (integer). This argument
(or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
# Input shape
2D tensor with shape: `(nb_samples, input_dim)`.
# Output shape
2D tensor with shape: `(nb_samples, input_dim)`.
# References
- [Highway Networks](http://arxiv.org/abs/1505.00387v2)
"""
def __init__(self,
init='glorot_uniform',
activation=None,
weights=None,
W_regularizer=None,
b_regularizer=None,
activity_regularizer=None,
W_constraint=None,
b_constraint=None,
bias=True,
input_dim=None,
**kwargs):
self.init = initializers.get(init)
self.activation = activations.get(activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.initial_weights = weights
self.input_spec = InputSpec(ndim=2)
self.input_dim = input_dim
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(Highway, self).__init__(**kwargs)
def build(self, input_shape):
input_dim = input_shape[1]
self.input_spec = InputSpec(dtype=K.floatx(),
shape=(None, input_dim))
self.W = self.add_weight((input_dim, input_dim),
initializer=self.init,
name='W',
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.W_carry = self.add_weight((input_dim, input_dim),
initializer=self.init,
name='W_carry')
if self.bias:
self.b = self.add_weight((input_dim,),
initializer='zero',
name='b',
regularizer=self.b_regularizer,
constraint=self.b_constraint)
self.b_carry = self.add_weight((input_dim,),
initializer='one',
name='b_carry')
else:
self.b_carry = None
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, x):
y = K.dot(x, self.W_carry)
if self.bias:
y += self.b_carry
transform_weight = activations.sigmoid(y)
y = K.dot(x, self.W)
if self.bias:
y += self.b
act = self.activation(y)
act *= transform_weight
output = act + (1 - transform_weight) * x
return output
def get_config(self):
config = {'init': initializers.serialize(self.init),
'activation': activations.serialize(self.activation),
'W_regularizer': regularizers.serialize(self.W_regularizer),
'b_regularizer': regularizers.serialize(self.b_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'W_constraint': constraints.serialize(self.W_constraint),
'b_constraint': constraints.serialize(self.b_constraint),
'bias': self.bias,
'input_dim': self.input_dim}
base_config = super(Highway, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class sSGD(SGD):
def __init__(self, scale=1., **kwargs):
super(sSGD, self).__init__(**kwargs)
self.scale = scale;
def get_gradients(self, loss, params):
grads = K.gradients(loss, params)
if self.scale != 1.:
grads = [g*K.variable(self.scale) for g in grads]
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
grads = [K.switch(norm >= self.clipnorm, g * self.clipnorm / norm, g) for g in grads]
if hasattr(self, 'clipvalue') and self.clipvalue > 0:
grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
return grads
class sModel(Model):
def fit_generator(self, generator, steps_per_epoch, epochs, validation_data, validation_steps, opt):
val_losses = []
lr = K.get_value(self.optimizer.lr)
for epoch in range(epochs):
super(sModel, self).fit_generator(generator, steps_per_epoch, epochs=epoch+1, verbose=1, initial_epoch=epoch)
val_loss = exp(self.evaluate_generator(validation_data, validation_steps))
val_losses.append(val_loss)
print('Epoch {}/{}. Validation perplexity: {}'.format(epoch + 1, epochs, val_loss))
if len(val_losses) > 2 and (val_losses[-2] - val_losses[-1]) < opt.decay_when:
lr *= opt.learning_rate_decay
K.set_value(self.optimizer.lr, lr)
if epoch == epochs-1 or epoch % opt.save_every == 0:
savefile = '%s/lm_%s_epoch%d_%.2f.h5' % (opt.checkpoint_dir, opt.savefile, epoch + 1, val_loss)
self.save_weights(savefile)
@property
def state_updates_value(self):
return [K.get_value(a[0]) for a in self.state_updates]
def set_states_value(self, states):
return [K.set_value(a[0], state) for a, state in zip(self.state_updates, states)]
def save(self, name):
json_string = self.to_json()
with open(name, 'wt') as f:
f.write(json_string)
def load_model(name):
with open(name, 'rt') as f:
json_string = f.read()
model = model_from_json(json_string, custom_objects={'sModel': sModel})
model.compile(loss='sparse_categorical_crossentropy', optimizer=SGD)
return model
def CNN(seq_length, length, feature_maps, kernels, x):
concat_input = []
for feature_map, kernel in zip(feature_maps, kernels):
reduced_l = length - kernel + 1
conv = Conv2D(feature_map, (1, kernel), activation='tanh', data_format="channels_last")(x)
maxp = MaxPooling2D((1, reduced_l), data_format="channels_last")(conv)
concat_input.append(maxp)
x = Concatenate()(concat_input)
x = Reshape((seq_length, sum(feature_maps)))(x)
return x
def LSTMCNN(opt):
# opt.seq_length = number of time steps (words) in each batch
# opt.rnn_size = dimensionality of hidden layers
# opt.num_layers = number of layers
# opt.dropout = dropout probability
# opt.word_vocab_size = num words in the vocab
# opt.word_vec_size = dimensionality of word embeddings
# opt.char_vocab_size = num chars in the character vocab
# opt.char_vec_size = dimensionality of char embeddings
# opt.feature_maps = table of feature map sizes for each kernel width
# opt.kernels = table of kernel widths
# opt.length = max length of a word
# opt.use_words = 1 if use word embeddings, otherwise not
# opt.use_chars = 1 if use char embeddings, otherwise not
# opt.highway_layers = number of highway layers to use, if any
# opt.batch_size = number of sequences in each batch
if opt.use_words:
word = Input(batch_shape=(opt.batch_size, opt.seq_length), name='word')
word_vecs = Embedding(opt.word_vocab_size, opt.word_vec_size)(word)
if opt.use_chars:
chars = Input(batch_shape=(opt.batch_size, opt.seq_length, opt.max_word_l), name='chars')
chars_embedding = Embedding(opt.char_vocab_size, opt.char_vec_size, name='chars_embedding')(chars)
cnn = CNN(opt.seq_length, opt.max_word_l, opt.feature_maps, opt.kernels, chars_embedding)
if opt.use_words:
x = Concatenate()([cnn, word_vecs])
inputs = [chars, word]
else:
x = cnn
inputs = chars
else:
x = word_vecs
inputs = word
if opt.batch_norm:
x = BatchNormalization()(x)
for l in range(opt.highway_layers):
x = TimeDistributed(Highway(activation='relu'))(x)
for l in range(opt.num_layers):
x = LSTM(opt.rnn_size, activation='tanh', recurrent_activation='sigmoid', return_sequences=True, stateful=True)(x)
if opt.dropout > 0:
x = Dropout(opt.dropout)(x)
output = TimeDistributed(Dense(opt.word_vocab_size, activation='softmax'))(x)
model = sModel(inputs=inputs, outputs=output)
model.summary()
optimizer = sSGD(lr=opt.learning_rate, clipnorm=opt.max_grad_norm, scale=float(opt.seq_length))
model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer)
return model
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from hyper_parameters import *
from global_parameters import *
class AcNet(object):
'''
Class: A3C network
'''
def __init__(self, scope, globalAC=None):
'''
:param scope: The network it belongs to
:param globalAC: The global_net name
'''
self.SESS = get_value('SESS')
self.OPT_A = get_value('OPT_A')
self.OPT_C = get_value('OPT_C')
if scope == GLOBAL_NET_SCOPE:
'''Global_net initialization'''
with tf.variable_scope(scope):
self.s = tf.placeholder(dtype=tf.float32, shape=[None, N_S], name='S')
self.a_params, self.c_params = self._build_net(scope)[-2:] ### the last 2 return of build_net function
else:
with tf.variable_scope(scope):
self.s = tf.placeholder(dtype=tf.float32, shape=[None, N_S], name='S')
self.a_his = tf.placeholder(dtype=tf.int32, shape=[None, N_A], name='A')
self.v_target = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='Vtarget')
self.a_prob, self.v, self.a_params, self.c_params = self._build_net(scope)
td = tf.subtract(self.v_target, self.v, name='TD_Error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('a_loss'):
log_prob = tf.reduce_sum(
tf.log(self.a_prob + 1e-5) * tf.one_hot(indices=self.a_his, depth=N_A, dtype=tf.float32),
axis=1,
keep_dims=True)
exp_v = log_prob * tf.stop_gradient(td)
entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob + 1e-5),
axis=1,
keep_dims=True)
self.exp_v = exp_v + ENTROPY_BETA * entropy
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('local_grad'):
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.push_a_params = self.OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.push_c_params = self.OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self, scope):
'''
:param scope: The network it belongs to
:return: a_prob (M * N), v, a_params, c_params
'''
with tf.variable_scope('actor'):
### Variable
# W_a1 = tf.Variable(tf.truncated_normal([N_S, UNIT_A], stddev=0.5), dtype=tf.float32, name='W_a1')
# b_a1 = tf.Variable(tf.zeros([UNIT_A]), dtype=tf.float32, name='b_a1')
# W_a2 = tf.Variable(tf.truncated_normal([UNIT_A, UNIT_A], stddev=0.5), dtype=tf.float32, name='W_a2')
# b_a2 = tf.Variable(tf.zeros([UNIT_A]), dtype=tf.float32, name='b_a2')
# W_prob = tf.Variable(tf.truncated_normal([UNIT_A, N_A], stddev=0.5), dtype=tf.float32, name='W_prob')
# b_prob = tf.Variable(tf.zeros([N_A]), dtype=tf.float32, name='b_prob')
# activation = tf.nn.relu(tf.nn.bias_add(tf.matmul(self.s, W_a1),b_a1))
layer_a1 = tf.layers.dense(inputs = self.s,
units = UNIT_A,
activation = tf.nn.relu6,
kernel_initializer = W_INIT,
name = 'layer_a1')
layer_a2 = tf.layers.dense(inputs = layer_a1,
units = UNIT_A,
activation = tf.nn.relu6,
kernel_initializer = W_INIT,
name = 'layer_a2')
a_prob = tf.layers.dense(inputs=layer_a2,
units=N_A, ### N_A = M*N
activation=tf.nn.sigmoid,
kernel_initializer=W_INIT,
name = 'a_prob')
with tf.variable_scope('critic'):
# W_c1 = tf.Variable(tf.truncated_normal([N_S, UNIT_C], stddev=0.2), dtype=tf.float32, name='W_c1')
# b_c1 = tf.Variable(tf.zeros([UNIT_A]), dtype=tf.float32, name='b_c1')
# W_c2 = tf.Variable(tf.truncated_normal([UNIT_C, UNIT_C], stddev=0.2), dtype=tf.float32, name='W_c2')
# b_c2 = tf.Variable(tf.zeros([UNIT_A]), dtype=tf.float32, name='b_c2')
# W_v = tf.Variable(tf.truncated_normal([UNIT_C, 1], stddev=0.2), dtype=tf.float32, name='W_v')
# b_v = tf.Variable(tf.zeros([1]), dtype=tf.float32, name='b_v')
layer_c1 = tf.layers.dense(inputs = self.s,
units = UNIT_C,
activation = tf.nn.relu6,
kernel_initializer = W_INIT,
name = 'layer_c1')
layer_c2 = tf.layers.dense(inputs = layer_c1,
units = UNIT_C,
activation = tf.nn.relu6,
kernel_initializer = W_INIT,
name = 'layer_c2')
v = tf.layers.dense(inputs = layer_c2,
units = 1,
kernel_initializer = W_INIT,
name = 'v')
a_params = tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
c_params = tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
return a_prob, v, a_params, c_params
def choose_action(self, s): ## ?
'''
:param s: state
:return: action
'''
action = self.SESS.run(self.a_prob, feed_dict={self.s: s[np.newaxis, :]})
return action
def pull_global(self):
'''
Pull operation: Pull the up-to-date parameters to the local_net from the global_net
'''
self.SESS.run([self.pull_a_params, self.pull_c_params])
def update_global(self, feed_dict):
'''
Push operation: Push the up-to-date parameters to the global_net from the local_net
Run by a local_net
:param feed_dict: feed_dict
'''
self.SESS.run([self.push_a_params, self.push_c_params], feed_dict)
|
#default flask app for funzies
from flask import Flask, render_template, request, redirect, url_for, session
import utils.manage, hashlib, os
app = Flask(__name__)
app.secret_key = os.urandom(32)
@app.route('/')
def display_login():
#====== DIAGNOSTIC PRINT STATEMENTS ======
print '\n\n\n'
print '=== DIAGNOSTICS === this Flask object'
print app
print '=== DIAGNOSTICS === request object'
print request
print '=== DIAGNOSTICS === request.headers'
print request.headers
#====== DIAGNOSTIC PRINT STATEMENTS ======
return render_template('login.html')
@app.route('/auth', methods=['POST'])
def display_auth():
#====== DIAGNOSTIC PRINT STATEMENTS ======
print '\n\n\n'
print '=== DIAGNOSTICS === request object'
print request
print '=== DIAGNOSTICS === request.form'
print request.form
print '=== DIAGNOSTICS === request.headers'
print request.headers
#====== DIAGNOSTIC PRINT STATEMENTS ======
hash_object0 = hashlib.sha1()
hash_object0.update(request.form['password'])
hashed_pass = hash_object0.hexdigest()
if request.form['sub1'] == 'Register':
if request.form['username'] in utils.manage.get_users():
return render_template('landing.html',
result = 'this username is already registered!')
else:
utils.manage.add_user(request.form['username'],
hashed_pass)
return render_template('landing.html',
result = 'you have successfully registered!')
else:
if request.form['username'] in utils.manage.get_users():
if utils.manage.get_users()[request.form['username']] == hashed_pass:
session['usernname'] = request.form['username']
return render_template('landing.html',
result = 'successfully logged in!')
return render_template('landing.html',
result = 'incorrect password!')
return render_template('landing.html',
result = 'this user is not registered!')
return render_template('landing.html',
result = 'uh oh u broke my website! pls send help')
@app.route('/register', methods=['POST'])
def display_register():
#====== DIAGNOSTIC PRINT STATEMENTS ======
print '\n\n\n'
print '=== DIAGNOSTICS === request object'
print request
print '=== DIAGNOSTICS === request.form'
print request.form
print '=== DIAGNOSTICS === request.headers'
print request.headers
#====== DIAGNOSTIC PRINT STATEMENTS ======
return render_template('register.html')
@app.route('/home')
def display_home():
#====== DIAGNOSTIC PRINT STATEMENTS ======
print '\n\n\n'
print '=== DIAGNOSTICS === request object'
print request
print '=== DIAGNOSTICS === request.form'
print request.form
print '=== DIAGNOSTICS === request.headers'
print request.headers
#====== DIAGNOSTIC PRINT STATEMENTS ======
return 'welcome home!'
@app.route('/logout')
def display_logout():
#====== DIAGNOSTIC PRINT STATEMENTS ======
print '\n\n\n'
print '=== DIAGNOSTICS === request object'
print request
print '=== DIAGNOSTICS === request.form'
print request.form
print '=== DIAGNOSTICS === request.headers'
print request.headers
#====== DIAGNOSTIC PRINT STATEMENTS ======
session.pop('user', None)
return 'Logged out!'
def test():
return utils.manage.get_users()
if __name__ == '__main__':
app.debug = True
app.run()
|
import sys
sys.path.append("/Applications/Autodesk/maya2015/Maya.app/Contents/Frameworks/Python.framework/Versions/Current/lib/python2.7/site-packages/numpy-1.9.1-py2.7-macosx-10.6-intel.egg")
import maya.standalone
maya.standalone.initialize("Python")
import maya.cmds as cmds
import numpy as np
import math as ma
import gc
import maya.utils as utils
import threading
import time
cmds.setAttr("hardwareRenderGlobals.graphicsHardwareGeometryCachingData", 0)
cmds.setAttr("hardwareRenderGlobals.maximumGeometryCacheSize", 2)
MAYA_FILE = "../scenes/Hamed.mb"
cmds.file(MAYA_FILE, force=True, open=True)
def main():
global outPath
global inPath
if len(sys.argv)< 2:
print("usage: %s <png images folder> <output path> <type: d for depth, dv for depth visualization, g for ground truth>[<num_threads>]" % sys.argv[0])
sys.exit(0)
start = int(sys.argv[1])
end = int(sys.argv[2])
for i in range(start,end):
cmds.currentTime(i)
start_time = time.time()
cmds.hwRender(currentFrame= True)
iff.convert(iffsDir,pngDir,only=str(i),offset=16*i)
print("finished timeframe %d --- %s seconds ---" % (i,time.time() - start_time))
cmds.flushUndo()
cmds.clearCache( all=True )
cmds.DeleteHistory()
if __name__ == "__main__":
main()
|
import string
import unicodedata
import os
def clear():
os.system("cls")
def keyOK(n, offset):
return offset >= 0 and offset <= 2*n-3
def computeCoordinates(n, l, offset):
coordinates = []
row = 0
isRising = False
if offset > n-1:
offset = (n-1)-(offset-(n-1))
isRising = True
for column in range(l):
coordinates.append((row+offset, column))
if isRising == False:
if row+offset < n-1:
row += 1
else:
isRising = True
if isRising == True:
if row+offset > 0:
row -= 1
if row+offset == 0:
isRising = False
return coordinates
def init(n, l, coordinates):
dictionary = {}
for pos in coordinates:
dictionary[pos] = None
return dictionary
def fullDictionaryCipher(text, l, coordinates, dictionary):
for j in range(l):
dictionary[coordinates[j]] = text[j]
def fullDictionaryDecipher(text, n, l, coordinates, dictionary):
cursor = 0
for i in range(n):
for j in range(l):
if (i, j) in dictionary:
dictionary[(i, j)] = text[cursor]
cursor += 1
def fullDictionary(text, n, l, coordinates, dictionary, cipher):
if cipher:
fullDictionaryCipher(text, l, coordinates, dictionary)
else:
fullDictionaryDecipher(text, n, l, coordinates, dictionary)
def dictionaryToStringCipher(n, l, coordinates, dictionary):
text = []
for i in range(n):
for j in range(l):
if (i, j) in dictionary:
text.append(dictionary[(i, j)])
return "".join(text)
def dictionaryToStringDecipher(n, l, coordinates, dictionary):
text = []
for j in range(l):
text.append(dictionary[coordinates[j]])
return "".join(text)
def dictionaryToString(n, l, coordinates, dictionary, cipher):
if cipher:
return dictionaryToStringCipher(n, l, coordinates, dictionary)
else:
return dictionaryToStringDecipher(n, l, coordinates, dictionary)
def displayDictionary(n, l, coordinates, dictionary):
for i in range(n):
for j in range(l):
if (i, j) in dictionary:
print(dictionary[(i, j)], end='')
else:
print(" ", end='')
print("")
def algorithm2(text, n, offset, cipher, display):
if keyOK(n, offset):
l = len(text)
coordinates = computeCoordinates(n, l, offset)
dictionary = init(n, l, coordinates)
fullDictionary(text, n, l, coordinates, dictionary, cipher)
if display:
print("\n[?] Affichage de la vague\n")
displayDictionary(n, l, coordinates, dictionary)
if cipher:
print("\n[?] Message chiffré\n")
else:
print("\n[?] Message déchiffré\n")
print(dictionaryToString(n, l, coordinates, dictionary, cipher))
else:
print("Erreur - 'n' et 'offset' sont incompatibles")
input()
def convert_letter(text):
punctuations = string.punctuation
alphabet = string.ascii_uppercase[:26]
valid_string = ""
text = text.upper()
normal = unicodedata.normalize('NFKD', text).encode('ASCII', 'ignore')
end = normal.decode('utf-8')
for elem in end:
if elem in alphabet and elem not in punctuations:
valid_string += elem
return valid_string
modeChoice = -1
crypChoice = -1
displayChoice = -1
n = -1
offset = -1
while modeChoice < 1 or modeChoice > 2:
clear()
print("*** Algorithme 2 ***\n")
print("Selectionnez un preset: ")
print("\t1) Mode saisi manuel")
print("\t2) Exemple question 3.2")
modeChoice = int(input("\nTapez votre choix: "))
if modeChoice == 1:
while crypChoice < 1 or crypChoice > 2:
clear()
print("*** Algorithme 2 ***\n")
print("- Preset: manuel\n")
print("Sélectionnez un mode d'excution: ")
print("\t1) Chiffrement")
print("\t2) Déchiffrement")
crypChoice = int(input("\nTapez votre choix: "))
clear()
print("*** Algorithme 2 ***\n")
print("- Preset: manuel")
if crypChoice == 1:
print("- Excution: chiffrement\n")
else:
print("- Excution: déchiffrement\n")
text = input("Tapez un texte: ")
text = convert_letter(text)
while displayChoice < 1 or displayChoice > 2:
clear()
print("*** Algorithme 2 ***\n")
print("- Preset: manuel")
if crypChoice == 1:
print("- Excution: chiffrement\n")
else:
print("- Excution: déchiffrement\n")
print("Sélectionnez un mode d'affichage: ")
print("\t1) Afficher")
print("\t2) Masquer")
displayChoice = int(input("\nTapez votre choix: "))
while not keyOK(n, offset):
clear()
print("*** Algorithme 2 ***\n")
print("- Preset: manuel")
if crypChoice == 1:
print("- Excution: chiffrement")
else:
print("- Excution: déchiffrement")
if displayChoice == 1:
print("- Display: affiché\n")
else:
print("- Display: masqué\n")
n = int(input("n: "))
offset = int(input("offset: "))
algorithm2(text, n, offset, crypChoice == 1, displayChoice == 1)
if modeChoice == 2:
text = "HANHARYMTPTLAYNCIPSITTITNOWRIOEFHOAEALOWIDIIGTNOSATTNSDOATNSSOEGSHLEFTTAMTODAGGITHSGTIDYTGEETSSSTETMOILJINNWGSNIEEISNAISTKNUELIYSYENNAUAAEILGYLTMUGNMUOASOGRNBTENMGNSWFIRBAJIJMEIGHIOTR"
print("\n[?] Texte à déchiffrer :\n")
print(text)
algorithm2(text, 7, 8, False, True) |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import requests
class AvaClient(object):
def __init__(self, token, url):
self._token = token
self._url = url
self._headers = {
'Authorization': self._token,
}
def get(self, uri, params=None, headers=None):
if headers is None:
headers = dict()
headers.update(self._headers)
return requests.get(self._url + uri, params=params, headers=headers)
def post(self, uri, params=None, headers=None):
if headers is None:
headers = dict()
headers.update(self._headers)
return requests.post(self._url, params=params, headers=headers)
def put(self, uri, params=None, headers=None):
if headers is None:
headers = dict()
headers.update(self._headers)
return requests.put(self._url + uri, params=params, headers=headers)
def delete(self, uri, params=None, headers=None):
if headers is None:
headers = dict()
headers.update(self._headers)
return requests.delete(self._url + uri, params=params, headers=headers)
def head(self, uri, params=None, headers=None):
if headers is None:
headers = dict()
headers.update(self._headers)
return requests.head(self._url + uri, params=params, headers=headers)
def patch(self, uri, params=None, headers=None):
if headers is None:
headers = dict()
headers.update(self._headers)
return requests.patch(self._url + uri, params=params, headers=headers)
def options(self, uri, params=None, headers=None):
if headers is None:
headers = dict()
headers.update(self._headers)
return requests.options(self._url + uri, params=params, headers=headers)
|
import numpy as np
import cv2
face_cascade = cv2.CascadeClassifier('cascade.xml')
img = cv2.imread('Training/show_img/1.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
faces = face_cascade.detectMultiScale(gray,
scaleFactor = 1.1,
minNeighbors = 5,
minSize = (24, 24))
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
cv2.imwrite("img_show_1.png", img)
|
# Copyright 2019 The ASReview Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from asreview.utils import _unsafe_dict_update
class BaseTrainData(ABC):
" Abstract class for balance strategies. "
def __init__(self, balance_kwargs):
self.balance_kwargs = self.default_kwargs()
self.balance_kwargs = _unsafe_dict_update(self.balance_kwargs,
balance_kwargs)
def func_kwargs_descr(self):
" Should give back the function and arguments for balancing. "
return (self.__class__.function(), self.balance_kwargs,
self.__class__.description())
def default_kwargs(self):
return {}
def hyperopt_space(self):
return {}
@staticmethod
@abstractmethod
def function():
raise NotImplementedError
@staticmethod
@abstractmethod
def description():
raise NotImplementedError
|
import re
class Session:
__sessid = -1
def __init__(self, name, speaker, description, start_time, end_time, date, capacity):
self._sessid = self._generate_id()
self._name = name
self._speaker = speaker
self._description = description
self._start_time = start_time
self._end_time = end_time
self._date = self._format(date)
self._capacity = capacity
self._attendees = []
@staticmethod
def _format(date):
return re.sub('-', '/', date)
def get_id(self):
return str(self._sessid)
@staticmethod
def _generate_id():
Session.__sessid += 1
return Session.__sessid
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def speaker(self):
return self._speaker
@speaker.setter
def speaker(self, speaker):
self._speaker = speaker
@property
def description(self):
return self._description
@description.setter
def description(self, description):
self._description = description
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, start_time):
self._start_time = start_time
@property
def date(self):
return self._date
@date.setter
def date(self, date):
self._date = date
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, end_time):
self._end_time = end_time
@property
def capacity(self):
return self._capacity
@capacity.setter
def capacity(self, capacity):
self._capacity = capacity
@property
def attendees(self):
return self._attendees
def spots_left(self):
spots_left = int(self._capacity) - len(self._attendees)
return spots_left
def add_attendees(self, user_name):
self._attendees.append(user_name)
def remove_attendees(self, user_name):
for i in range(0, len(self._attendees)):
if user_name == self._attendees[i]:
self._attendees.remove(user_name)
break
|
#Written by Adam Turnbull
from psychopy import visual from psychopy import visual
from psychopy import gui, data, core,event
import csv
import time
from time import localtime, strftime, gmtime
from datetime import datetime
import os.path
import pyglet
# Kill switch for Psychopy3
event.globalKeys.clear() # clear global keys
esc_key= 'escape' # create global key for escape
# define function to quit programme
def quit():
print ('User exited')
win.close()
core.quit()
# call globalKeys so that whenever user presses escape, quit function called
event.globalKeys.add(key=esc_key, func=quit)
# user should set cwd to the experiment directory
os.chdir('R:\Task_SGT\Adam\psychopyScript')
# user should set directory for output files to be stored
save_path= 'R:\Task_SGT\Adam\psychopyScript\data'
# user can update instructions here if required.
instructions = """You will be presented with several video clips, some of which are just audio.
\nAt the end of each task block, you will be asked to rate several statements about the ongoing thoughts you experienced during that block.
\nTo rate these statements, hold '1' to move the marker left along the slider and hold '2' to move the marker right along the slider. When you are happy with your selection, please press ‘4’ to move on to the next statement.
\nPress '1' to begin the experiment."""
# user can update start screen text here if required.
start_screen = "The experiment is about to start. Press 5 to continue."
# create a dictionary to store information from the dialogue box.
inputbox = {'expdate': datetime.now().strftime('%Y%m%d_%H%M'),'part_number':'','videoCondition':['Films']}
# create dialogue box.
# user enters participant number + video condition (i.e. the Header of the column of video lists in the film csvFile).
dlg=gui.DlgFromDict(inputbox, title = 'Input participation info',
fixed='expdate',
order=['expdate', 'part_number','videoCondition'])
# if the user doesn't press ok, the programme will quit and inform user.
if not dlg.OK:
print ("User exited")
core.quit()
def thought_probes (video_name, participant_number, last=0):
"""Presents thought probes, stores responses and presents break screens in between videos and end screen if it is the last trial"""
# use trialhandler to present task, arousal and uncertainty questions from csv file in sequential order.
fixedQuestions = data.TrialHandler(nReps = 1, method = 'sequential', trialList = data.importConditions('questions/fixedQuestions.csv'), name = 'fixedQuestions')
# use trialhandler to present thought probes from csv file in random order.
Questionnaire = data.TrialHandler(nReps = 1, method = 'random', trialList = data.importConditions('questions/questions.csv'), name = 'Questionnaire')
# create rating scale for user to rate thought probes.
ratingScale = visual.RatingScale(win, low=0, high=10, markerStart=5.0,
precision=10, tickMarks=[1,10],
leftKeys='1', rightKeys='2', acceptKeys='4', scale = None, labels = None, acceptPreText = 'Press key')
# create text stimulus for thought probe presentation.
QuestionText = visual.TextStim(win, color = [-1,-1,-1], alignHoriz = 'center', alignVert= 'top', pos =(0.0, 0.3))
# create text stimuli for low and high scale responses.
Scale_low = visual.TextStim(win, pos= (-0.5,-0.5), color ='black')
Scale_high = visual.TextStim(win, pos =(0.6, -0.5), color ='black')
# make thisRunDict global so that it can be accessed outside of function to write to outputfile.
global thisRunDict
# store participant number and video name in thisRunDict to write to outputfile.
thisRunDict= {'Participant_number': str(participant_number),'videoName': video_name }
# loop through each thought probe in the fixedQuestions created above using trialhandler.
for question in fixedQuestions:
ratingScale.noResponse = True
# section for keyboard handling.
key = pyglet.window.key
keyState = key.KeyStateHandler()
win.winHandle.activate() # to resolve mouse click issue.
win.winHandle.push_handlers(keyState)
pos = ratingScale.markerStart
inc = 0.1
# while there is no response from user, present thought probe and scale.
while ratingScale.noResponse:
# use 1 and 2 keys to move left and right along scale.
if keyState[key._1] is True:
pos -= inc
elif keyState[key._2] is True:
pos += inc
if pos > 10:
pos = 10
elif pos < 1:
pos = 1
ratingScale.setMarkerPos(pos)
# set text of probe and responses
QuestionText.setText(question['Questions'])
Scale_low.setText(question['Scale_low'])
Scale_high.setText(question['Scale_high'])
# draw text stimuli and rating scale
QuestionText.draw()
ratingScale.draw()
Scale_low.draw()
Scale_high.draw()
# store response using getRating function
responded = ratingScale.getRating()
win.flip()
# reset marker to middle of scale each time probe is presented.
ratingScale.setMarkerPos((0.5))
# for each probe, store probe label and response in thisRunDict.
thisRunDict[ str(question['Label'] )] = str(responded)
# loop through each thought probe in the Questionnaire created above using trialhandler.
for question in Questionnaire:
ratingScale.noResponse = True
# section for keyboard handling.
key = pyglet.window.key
keyState = key.KeyStateHandler()
win.winHandle.activate() # to resolve mouse click issue.
win.winHandle.push_handlers(keyState)
pos = ratingScale.markerStart
inc = 0.1
# while there is no response from user, present thought probe and scale.
while ratingScale.noResponse:
# use 1 and 2 keys to move left and right along scale.
if keyState[key._1] is True:
pos -= inc
elif keyState[key._2] is True:
pos += inc
if pos > 10:
pos = 10
elif pos < 1:
pos = 1
ratingScale.setMarkerPos(pos)
# set text of probe and responses
QuestionText.setText(question['Questions'])
Scale_low.setText(question['Scale_low'])
Scale_high.setText(question['Scale_high'])
# draw text stimuli and rating scale
QuestionText.draw()
ratingScale.draw()
Scale_low.draw()
Scale_high.draw()
# store response using getRating function
responded = ratingScale.getRating()
win.flip()
# reset marker to middle of scale each time probe is presented.
ratingScale.setMarkerPos((0.5))
# for each probe, store probe label and response in thisRunDict.
thisRunDict[ str(question['Label'] )] = str(responded)
# create text stimuli to be updated for breaks and end screen.
stim = visual.TextStim(win, "", color = [-1,-1,-1], wrapWidth = 1300, units = "pix", height=40)
# present break screen at the end of each set of questions.
if last==0:
stim.setText("""You are welcome to take a break if you need to.
\nIf you are feeling too distressed to continue with the task, please let the experimenter know.
\nIf you are happy to continue, press '1' when you are ready.""")
stim.draw()
win.flip()
# Wait for user to press Return to continue
key = event.waitKeys(keyList=(['1']), timeStamped = True)
else:
# present end screen at the end of task.
stim.setText("""You have reached the end of the experiment.
\nPlease let the experimenter know you have finished.
\nThank you for your participation.""")
stim.draw()
win.flip()
# wait for user to press escape to exit experiment
key = event.waitKeys(keyList=(['1']), timeStamped = True)
# store participant number, video condition and experiment date provided by user in input box as variables for later use.
part_number = inputbox['part_number']
videoCondition = inputbox['videoCondition']
expdate = inputbox['expdate']
# create filename based on user input
filename = '{}_{}_{}.csv'.format(inputbox['part_number'], inputbox['expdate'],inputbox['videoCondition'])
# update filename to include absolute path so that it is stored in output directory.
completeName = os.path.join(save_path, filename)
# open file for writing.
outputfile = open(completeName, "w", newline = '')
# create list of headers for output csv file.
fieldnames = ['Participant_number', 'videoName','Video_startTime','Video_endTime','Questionnaire_startTime','Questionnaire_endTime',
'TrialDuration','Focus','Future','Past','Self','Other','Emotion','Modality','Detailed','Deliberate','Problem','Diversity','Intrusive','Source', 'Arousal','Tense', 'Uncertainty']
# create variable which calls DictWriter to write to outputfile and specifies fieldnames.
writer = csv.DictWriter(outputfile, fieldnames)
# writes headers using fieldnames as specified above when creating writer variable.
writer.writeheader()
# use trialhandler to sequentially present films listed in filmlist csv file
filmDict = data.TrialHandler(nReps = 1, method = 'sequential', trialList = data.importConditions('conditions\\stimlist_%s.csv' % part_number), name = 'filmList')
# create white window for stimuli to be presented on throughout task.
win = visual.Window(size=[1024, 768], color=[1,1,1,], monitor="testMonitor", fullscr= True, allowGUI = False)
# create text stimuli to be updated for start screen instructions.
stim = visual.TextStim(win, "", color = [-1,-1,-1], wrapWidth = 1300, units = "pix", height=40)
# update text stim to include instructions for task.
stim.setText(instructions)
stim.draw()
win.flip()
# Wait for user to press 1 to continue.
key = event.waitKeys(keyList=(['1']), timeStamped = True)
# update text stim to include start screen for task.
stim.setText(start_screen)
stim.draw()
win.flip()
# Wait for user to press 5 to continue.
key = event.waitKeys(keyList=(['5']), timeStamped = True)
# start a clock right before the experiment starts
tasktime = core.Clock()
tasktime.reset()
# loop through each film stored in filmDict created above using trialhandler.
for film in filmDict:
# store trial start time for later use in calculating trial duration.
start =time.time()
# store when the video started to later store in outputfile, this videoStart uses clock created at start of experiment.
videoStart = tasktime.getTime()
# present film using moviestim3
mov = visual.MovieStim3 (win, 'stimuli\\' + film[videoCondition], size=(1920, 1080), flipVert=False, flipHoriz=False, loop=False)
while mov.status != visual.FINISHED:
mov.draw()
win.flip()
# store when the video ends to later store in outputfile, this videoEnd uses clock created at start of experiment.
videoEnd = tasktime.getTime()
# if statement to either present break screen or end screen.
nextTrial = filmDict.getFutureTrial(n=1) # fixes error for end screen.
if nextTrial is None or nextTrial[videoCondition] != None:
# when the video has ended, call thought_probes function to present probes and rating scale.
thought_probes(film[videoCondition], part_number)
else:
thought_probes(film[videoCondition], part_number,1)
# store when the questions end to later store in outputfile, this qEnd uses clock created at start of experiment.
qEnd = tasktime.getTime()
# store trial end time for later use in calculating trial duration.
end =time.time()
# calculate trial duration to store in outputfile.
trial_duration = (end-start)
# add timings to global thisRunDict to write to outputfile below.
thisRunDict['Video_startTime']= str(videoStart)
thisRunDict['Video_endTime']= str(videoEnd)
thisRunDict['Questionnaire_startTime']= str(videoEnd)
thisRunDict['Questionnaire_endTime']= str(qEnd)
thisRunDict['TrialDuration'] = str(trial_duration)
# write responses and timings stored in thisRunDict to outputfile.
writer.writerows([thisRunDict])
outputfile.flush() |
import sys
import ProcessCMDArgs
import DocumentProcessor
def logicOfProgram (listOfCMDArgs):
def build(dict_dpb_parsed_args):
dpb = dict_dpb_parsed_args.get('dpb')
dp = DocumentProcessor.DocumentProcessor(dpb);
if 'words_to_search_for' in dict_dpb_parsed_args:
search = dict_dpb_parsed_args.get('words_to_search_for')
dp.setWordsToSearchFor(search)
return dp
def get_file_content (file_name):
file = open(file_name, "r")
content = file.read()
file.close()
return content
def print_result(result):
if isinstance(result, str):
print(result)
else:
for x, y in result.items():
print(x, y)
dict_dpb_parsed_args = ProcessCMDArgs.parseCMDArgs(listOfCMDArgs)
dp = build(dict_dpb_parsed_args)
file_content = get_file_content(dict_dpb_parsed_args['file_name'])
result = dp.process(file_content)
print_result(result)
logicOfProgram(sys.argv);
|
import os
import datetime
import calendar
import logging
from typing import Dict
from django.conf import settings
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import api_view
from packages.tasks import celery_generate_summary
from packages.utils import start_month_year
import pytz
from bm.users.utils import (
to_datetime_object,
to_datetime_format,
get_cache,
set_cache,
days_to_secs,
)
from bm.users.dot_dict import DotDict
logger = logging.getLogger(__name__)
@api_view(["get"])
def get_currency(request):
"""
get the currency symbole and representation from json
"""
import json
file_location = os.path.join(settings.STATIC_ROOT, "js", "")
file_location += settings.BM_CURRENCY_DETAIL_JSON_FILE
with open(file_location) as file:
return Response(json.loads(file.read()), status=status.HTTP_200_OK)
def print_summary_config() -> Dict:
current = datetime.datetime.now()
start_date = to_datetime_format(current, settings.BM_STANDARD_START_MONTH_FORMAT)
last_date = calendar.monthrange(current.year, current.month)
# HACK: no need for consier in date format
# as the date for this specific case always
# has two digit, no problem will occures.
last_date = "%d-%d-%d" % (current.year, current.month, last_date[1])
# REMEBER: if the dicit key length exceeds
# more than 20 charachers then manually
# offset limit in url
temp = {
"current_month": {"start": start_date, "end": last_date},
"two_months": {"start": start_month_year(2, "before"), "end": last_date},
"three_months": {"start": start_month_year(3, "before"), "end": last_date},
"six_months": {"start": start_month_year(6, "before"), "end": last_date},
}
return temp
@api_view(["get"])
def print_summary_key(request):
"""Return the dict key in rest.
:param request: [request object from django]
:type request: [request]
:returns: [key value from the .. function:: print_summary_config()]
:rtype: {[dict]}
"""
cache_name = "print_summary_key"
temp = get_cache(cache_name)
if not temp:
set_timeout = (24 - datetime.datetime.now().hour) * 3600
temp = print_summary_config().keys()
try:
# Converting the dict_key object into
# list() object to accept as caches
temp = list(temp)
set_cache(cache_name, temp, set_timeout)
except TypeError as type_error:
logger.error("Unable to set cache as {}".format(type_error))
logger.debug("Unable to set cache as {}".format(type_error))
return Response({"detail": temp}, status=status.HTTP_200_OK)
@api_view(["post"])
def print_summary(request, key_value: str):
"""This function will get the param
and verfies with the .. function:: print_summary_config()
:param request: [request object from djagno ]
:type request: [request]
:param range_value: [key from .. function:: print_summary_config()]
:type range_value: [str]
:returns: [Simple message to the user]
:rtype: {[Response]}
"""
cache_name = "print_summary_timeout_"
cache_name = "%s%d" % (cache_name, request.user.id)
msg = None
# CHECK: get the cache means that the
# mail has been send.
if get_cache(cache_name):
msg = "your request has been processed."
msg += " Please check your mail for attachment."
else:
temp_request = DotDict({"user_id": request.user.id})
temp = print_summary_config()
output = celery_generate_summary.delay(request, temp[key_value], cache_name)
output.get()
msg = "summary will be mailed soon as possible"
return Response({"detail": msg}, status=status.HTTP_200_OK)
@api_view(["get"])
def get_timezone_list(request):
cache_name = "time_zone_list"
cache_content = get_cache(cache_name)
if cache_content:
return Response({"detail": cache_content})
content = pytz.common_timezones
content = list(content)
set_cache(cache_name, content, days_to_secs(4))
return Response({"detail": pytz.common_timezones})
|
from torch.utils.data import Dataset
import os
import numpy as np
import torch
from PIL import Image
class Data(Dataset):
def __init__(self, path):
#传入图片路径
self.path = path
#创建数据集
# self.dataset = []
#将数据传入数据集
self.dataset = os.listdir(self.path)
# self.dataset.extend(os.path.join(self.path, x) for x in self.list)
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
#将数据集中的每一组数据分割
strs = self.dataset[index]
#找到图片路径
img_path = os.path.join(self.path, strs)
#将图片转为Tensor格式
img_data = torch.Tensor(np.array(Image.open(img_path)) / 255. - 0.5)
#将图片从H,W,C结构转换为C,H,W结构
img_data = img_data.permute(2, 0, 1)
return img_data
# if __name__ == '__main__':
# path = r'E:\AI\GAN\faces'
# data = Data(path)
# print(data[1]) |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .models import Score
from .serializers import ScoreSerializer
class ScoreView(APIView):
def post(self, req):
score = {
'user': req.data.get('userId'),
'win': req.data.get('win'),
'time': req.data.get('time'),
}
serializer = ScoreSerializer(data = score)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status = status.HTTP_201_CREATED)
return Response(serializer.errors, status = status.HTTP_400_BAD_REQUEST)
|
for i in range(10):
print(f"i is now {i}")
print("*" * 20)
i = 0
while i < 10:
print(f"i is now {i}")
i += 1 |
# -*- coding: utf-8 -*-
from datetime import datetime
from irc3.compat import Queue
from irc3.compat import QueueFull
import irc3
__doc__ = '''
==============================================
:mod:`irc3.plugins.ctcp` CTCP replies
==============================================
..
>>> from irc3.testing import IrcBot
>>> from irc3.testing import ini2config
Usage::
>>> config = ini2config("""
... [bot]
... includes =
... irc3.plugins.ctcp
... [ctcp]
... foo = bar
... """)
>>> bot = IrcBot(**config)
Try to send a ``CTCP FOO``::
>>> bot.test(':gawel!user@host PRIVMSG irc3 :\x01FOO\x01', show=False)
>>> # remove escape char for testing..
>>> print(bot.sent[0].replace('\x01', '01'))
NOTICE gawel :01FOO bar01
'''
@irc3.plugin
class CTCP:
"""ctcp replies"""
def __init__(self, bot):
maxsize = int(bot.config.get('ctcp_max_replies', 3))
self.queue = Queue(loop=bot.loop, maxsize=maxsize)
self.handle = None
self.event = irc3.event(irc3.rfc.CTCP, self.on_ctcp)
bot.attach_events(self.event)
self.bot = bot
def clear_queue(self):
self.bot.log.info('CTCP clear queue')
while not self.queue.empty():
self.queue.get_nowait()
self.handle = None
def handle_flood(self):
self.bot.log.warn('CTCP Flood detected. '
'Ignoring requests for 30s')
# ignore events for 30s
self.bot.detach_events(self.event)
self.bot.loop.call_later(30, self.bot.attach_events, self.event)
def on_ctcp(self, mask=None, target=None, ctcp=None, **kw):
lctcp = ctcp.split(' ')[0].lower()
if lctcp in self.bot.config.ctcp:
try:
self.queue.put_nowait((mask.nick, lctcp))
except QueueFull:
self.handle_flood()
else:
if self.handle is None:
self.handle = self.bot.loop.call_later(1, self.clear_queue)
data = self.bot.config.ctcp[lctcp].format(now=datetime.now(),
**self.bot.config)
self.bot.ctcp_reply(mask.nick, '%s %s' % (lctcp.upper(), data))
|
#!/usr/bin/python
from termcolor import colored
import sys
import signal
import requests
import json
from flask import jsonify
def signal_handler(sig, frame):
print()
print(colored("Pleeaasee don't leave me",'cyan',attrs = ['bold']))
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
flag = 0
# user should provide rest api's ports
if(len(sys.argv)==1):
print("Usage is python3 client.py",colored("PORT",'grey',attrs = ['bold']))
sys.exit(0)
port = sys.argv[1]
print(" ")
print("Welcome to the noobcash client")
base_url = "http://127.0.0.1:"+port+"/"
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
while(1):
print(" ")
if(flag):
flag = 0
print("Invalid action")
action = input()
else:
print('Select an action')
action = input()
if(action == 'help'):
print(" ")
print("Supported actions are")
print("")
#t <recipient_address> <amount>
print(colored('t <recipient_address> <amount>','grey',attrs = ['bold'])," creates a new transaction ")
print("")
# show balance
print(colored('show balance','grey',attrs = ['bold'])," returns your account balance")
print("")
# view
print(colored('view transactions','grey',attrs = ['bold'])," displays the transactions contained in the last validated block")
print("")
#help
print(colored('help','grey',attrs = ['bold'])," displays supported actions")
print("")
# fifth action is exit
print(colored('exit','grey',attrs = ['bold'])," exits the system")
print("")
elif(not(action)):
flag = 1
continue
elif(action[0]=='t'):
print(" ")
url = base_url+"create_transaction"
inputs = action.split()
recepient_address = inputs[1]
amount = inputs[2]
payload = {'recepient_address':recepient_address,'amount':amount}
payload = json.dumps(payload)
response = requests.post(url,data=payload,headers=headers)
# print(response.json())
elif(action=='show balance'):
print(" ")
url = base_url+"show_balance"
response = requests.get(url)
print(response.json())
elif(action=='view transactions'):
print(" ")
url = base_url+"view_last_transactions"
response = requests.get(url)
print(response.json())
elif(action=='exit' or action=='Exit' or action=='exit()' or action=='EXIT()' or action=='EXIT'):
print(" ")
print(colored("Pleeaasee don't leave me",'cyan',attrs = ['bold']))
sys.exit(0)
else:
flag = 1 |
import pytest
from autumn.settings import Models
from autumn.core.project.project import _PROJECTS, get_project
COVID_PROJECTS = list(_PROJECTS[Models.COVID_19].keys())
@pytest.mark.benchmark
@pytest.mark.github_only
@pytest.mark.parametrize("project_name", COVID_PROJECTS)
def test_benchmark_covid_models(project_name, benchmark):
"""
Performance benchmark: check how long our models take to run.
See: https://pytest-benchmark.readthedocs.io/en/stable/
Run these with pytest -vv -m benchmark --benchmark-json benchmark.json
"""
project = get_project(Models.COVID_19, project_name)
benchmark(_run_model, project=project)
def _run_model(project):
project.run_baseline_model(project.param_set.baseline)
|
# Generated by Django 3.1.3 on 2020-11-29 17:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolio_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='portfolio',
name='img',
field=models.ImageField(upload_to='portfolio_app/images'),
),
]
|
import re
sectors = open('input').read()
total = 0
for sector in sectors.split("\n"):
if not sector:
continue
letters = dict()
sector_id = re.search(r"\d+", sector).group(0)
checksum = re.search(r"\[(.*?)\]", sector).group(1)
full_code = re.match(r"[a-z-]+", sector).group(0)
codes = full_code.replace("-", "")
for code in codes:
if not code in letters:
letters[code] = 1
else:
letters[code] += 1
letters = [value[0] for value in sorted(letters.iteritems(), key=lambda(key, value): (-value, key))]
wrong = False
for i in range(len(checksum)):
if checksum[i] != letters[i]:
wrong = True
break
if wrong == False:
name = ""
for i in range(len(full_code)):
if full_code[i] == "-":
name += " "
continue
ascii_code = (ord(full_code[i]) - 97 + int(sector_id)) % 26
name += chr(ascii_code + 97)
print "Name: " + name + " [" + str(sector_id) + "]"
|
import math
import types
"""Module containing functions and classes that are shared between different tests"""
def assertFloatWithinPercentage(testCase, expect, actual, percenttolerance = 0.5, places = 5):
"""Assert that actual is within percenttolerance of expect. Percenttolerance is specified as a percentage of
expect. If expect is == 0.0 then revert to using testCase.assertAlmostEqual() method and check
that actual is the same as expect for given number of places"""
if expect == 0.0:
testCase.assertAlmostEqual(expect, actual, places = places)
else:
percentDifference = math.fabs( ((actual-expect)/float(expect)) * 100.0 )
msg = "Actual %f != Expect %f within tolerance of %f%% (difference = %f%%)" % (actual, expect, percenttolerance, percentDifference)
testCase.assertTrue(percentDifference <= percenttolerance, msg)
def checkVector(tc, expect, actual, msg=None, tolerance = 0.0025):
"""Assert that two vectors are within tolerance of each other
@param tc unittest.TestCase object
@param expect Expected vector
@param actual Actual vector
@param msg Test fail message
@param tolerance Acceptable distance between expected and actual vectors"""
tc.assertEqual(len(expect), len(actual), msg=msg)
diff = (expect[0] - actual[0], expect[1] - actual[1], expect[2]-actual[2])
dist = math.sqrt( diff[0]**2 + diff[1]**2 + diff[2]**2)
if msg == None:
msg = "%s != %s" % (expect, actual)
tc.assertTrue(dist <= dist, msg = msg)
def _compareCollection(path, testCase, expect, actual, places, percenttolerance):
expectType = type(expect)
if expectType == list or expectType == tuple:
#Compare lists
try:
testCase.assertEqual(len(expect), len(actual))
except AssertionError as e:
raise AssertionError("%s at '%s'" % (str(e), path))
for i,(e,a) in enumerate(zip(expect, actual)):
_compareCollection(path+'[%d]'% i, testCase, e,a, places, percenttolerance)
elif expectType == dict:
#Compare dictionaries
ekeys = list(expect.keys())
akeys = list(actual.keys())
ekeys.sort()
akeys.sort()
testCase.assertEqual(ekeys, akeys)
for k,v in expect.items():
_compareCollection(path+'[%s]'% (k,), testCase, v, actual[k], places, percenttolerance)
elif expectType == float:
#Compare float type in a fuzzy manner
try:
if math.isnan(expect):
testCase.assertTrue(math.isnan(actual))
elif percenttolerance != None:
assertFloatWithinPercentage(testCase, expect, actual, percenttolerance = percenttolerance, places = places)
else:
testCase.assertAlmostEqual(expect, actual, places = places)
except AssertionError as e:
raise AssertionError("%s at '%s'" % (str(e), path))
else:
#Compare anything else
try:
testCase.assertEqual(expect,actual)
except AssertionError as e:
raise AssertionError("%s at '%s'" % (str(e), path))
def compareCollection(testCase, expect, actual, places = 5, percenttolerance = None):
"""Check two collections are the same"""
path = "collection"
_compareCollection(path, testCase, expect, actual, places, percenttolerance)
|
/home/ajitkumar/anaconda3/lib/python3.7/enum.py |
# vim:fileencoding=utf-8:noet
# WARNING: using unicode_literals causes errors in argparse
from __future__ import (division, absolute_import, print_function)
import argparse
import sys
from itertools import chain
from powerline.lib.overrides import parsedotval, parse_override_var
from powerline.lib.dict import mergeargs
from powerline.lib.encoding import get_preferred_arguments_encoding
from powerline.lib.unicode import u, unicode
from powerline.bindings.wm import wm_threads
if sys.version_info < (3,):
encoding = get_preferred_arguments_encoding()
def arg_to_unicode(s):
return unicode(s, encoding, 'replace') if not isinstance(s, unicode) else s # NOQA
else:
def arg_to_unicode(s):
return s
def finish_args(parser, environ, args, is_daemon=False):
'''Do some final transformations
Transforms ``*_override`` arguments into dictionaries, adding overrides from
environment variables. Transforms ``renderer_arg`` argument into dictionary
as well, but only if it is true.
:param dict environ:
Environment from which additional overrides should be taken from.
:param args:
Arguments object returned by
:py:meth:`argparse.ArgumentParser.parse_args`. Will be modified
in-place.
:return: Object received as second (``args``) argument.
'''
args.config_override = mergeargs(chain(
parse_override_var(environ.get('POWERLINE_CONFIG_OVERRIDES', '')),
(parsedotval(v) for v in args.config_override or ()),
))
args.theme_override = mergeargs(chain(
parse_override_var(environ.get('POWERLINE_THEME_OVERRIDES', '')),
(parsedotval(v) for v in args.theme_override or ()),
))
if args.renderer_arg:
args.renderer_arg = mergeargs((parsedotval(v) for v in args.renderer_arg), remove=True)
if 'pane_id' in args.renderer_arg:
if isinstance(args.renderer_arg['pane_id'], (bytes, unicode)):
try:
args.renderer_arg['pane_id'] = int(args.renderer_arg['pane_id'].lstrip(' %'))
except ValueError:
pass
if 'client_id' not in args.renderer_arg:
args.renderer_arg['client_id'] = args.renderer_arg['pane_id']
args.config_path = (
[path for path in environ.get('POWERLINE_CONFIG_PATHS', '').split(':') if path]
+ (args.config_path or [])
)
if args.ext[0].startswith('wm.'):
if not is_daemon:
parser.error('WM bindings must be used with daemon only')
elif args.ext[0][3:] not in wm_threads:
parser.error('WM binding not found')
elif not args.side:
parser.error('expected one argument')
return args
def int_or_sig(s):
if s.startswith('sig'):
return u(s)
else:
return int(s)
def get_argparser(ArgumentParser=argparse.ArgumentParser):
parser = ArgumentParser(description='Powerline prompt and statusline script.')
parser.add_argument(
'ext', nargs=1,
help='Extension: application for which powerline command is launched '
'(usually `shell\' or `tmux\'). Also supports `wm.\' extensions: '
+ ', '.join(('`wm.' + key + '\'' for key in wm_threads.keys())) + '.'
)
parser.add_argument(
'side', nargs='?', choices=('left', 'right', 'above', 'aboveleft'),
help='Side: `left\' and `right\' represent left and right side '
'respectively, `above\' emits lines that are supposed to be printed '
'just above the prompt and `aboveleft\' is like concatenating '
'`above\' with `left\' with the exception that only one Python '
'instance is used in this case. May be omitted for `wm.*\' extensions.'
)
parser.add_argument(
'-r', '--renderer-module', metavar='MODULE', type=str,
help='Renderer module. Usually something like `.bash\' or `.zsh\' '
'(with leading dot) which is `powerline.renderers.{ext}{MODULE}\', '
'may also be full module name (must contain at least one dot or '
'end with a dot in case it is top-level module) or '
'`powerline.renderers\' submodule (in case there are no dots).'
)
parser.add_argument(
'-w', '--width', type=int,
help='Maximum prompt with. Triggers truncation of some segments.'
)
parser.add_argument(
'--last-exit-code', metavar='INT', type=int_or_sig,
help='Last exit code.'
)
parser.add_argument(
'--last-pipe-status', metavar='LIST', default='',
type=lambda s: [int_or_sig(status) for status in s.split()],
help='Like above, but is supposed to contain space-separated array '
'of statuses, representing exit statuses of commands in one pipe.'
)
parser.add_argument(
'--jobnum', metavar='INT', type=int,
help='Number of jobs.'
)
parser.add_argument(
'-c', '--config-override', metavar='KEY.KEY=VALUE', type=arg_to_unicode,
action='append',
help='Configuration overrides for `config.json\'. Is translated to a '
'dictionary and merged with the dictionary obtained from actual '
'JSON configuration: KEY.KEY=VALUE is translated to '
'`{"KEY": {"KEY": VALUE}}\' and then merged recursively. '
'VALUE may be any JSON value, values that are not '
'`null\', `true\', `false\', start with digit, `{\', `[\' '
'are treated like strings. If VALUE is omitted '
'then corresponding key is removed.'
)
parser.add_argument(
'-t', '--theme-override', metavar='THEME.KEY.KEY=VALUE', type=arg_to_unicode,
action='append',
help='Like above, but theme-specific. THEME should point to '
'an existing and used theme to have any effect, but it is fine '
'to use any theme here.'
)
parser.add_argument(
'-R', '--renderer-arg',
metavar='KEY=VAL', type=arg_to_unicode, action='append',
help='Like above, but provides argument for renderer. Is supposed '
'to be used only by shell bindings to provide various data like '
'last-exit-code or last-pipe-status (they are not using '
'`--renderer-arg\' for historical reasons: `--renderer-arg\' '
'was added later).'
)
parser.add_argument(
'-p', '--config-path', action='append', metavar='PATH',
help='Path to configuration directory. If it is present then '
'configuration files will only be sought in the provided path. '
'May be provided multiple times to search in a list of directories.'
)
parser.add_argument(
'--socket', metavar='ADDRESS', type=str,
help='Socket address to use in daemon clients. Is always UNIX domain '
'socket on linux and file socket on Mac OS X. Not used here, '
'present only for compatibility with other powerline clients. '
'This argument must always be the first one and be in a form '
'`--socket ADDRESS\': no `=\' or short form allowed '
'(in other powerline clients, not here).'
)
return parser
def write_output(args, powerline, segment_info, write):
if args.renderer_arg:
segment_info.update(args.renderer_arg)
if args.side.startswith('above'):
for line in powerline.render_above_lines(
width=args.width,
segment_info=segment_info,
mode=segment_info.get('mode', None),
):
if line:
write(line + '\n')
args.side = args.side[len('above'):]
if args.side:
rendered = powerline.render(
width=args.width,
side=args.side,
segment_info=segment_info,
mode=segment_info.get('mode', None),
)
write(rendered)
|
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
import astropy.coordinates as coords
from astropy.time import Time
from astropy.timeseries import TimeSeries
from pygdsm import GSMObserver2016, GlobalSkyModel2016
from datetime import datetime
from healpy import ang2vec, ang2pix, get_nside
import lofarantpos.db
from observation import Observation
def sb_to_f(sbs, obs_mode):
"""
Convert subband number to a frequency in MHz
"""
nyq_dict = {3:1, 5:2, 7:3}
nyq_zone = nyq_dict[obs_mode]
clock_dict = {3:200, 4:160, 5:200, 6:160, 7:200} #MHz
clock = clock_dict[obs_mode]
nu = (nyq_zone-1. + sbs/512.) * (clock/2.)
return nu * u.MHz
def astropytime_to_datetime(t):
ds = t.value.split('-')
hs = ds[2].split('T')
ts = hs[1].split(':')
ss = ts[2].split('.')
return datetime(int(ds[0]), int(ds[1]), int(hs[0]), int(ts[0]), int(ts[1]), int(ss[0]), int(ss[1]))
def kondratiev_temp(freqs):
"""
Get the LBA antenna temperatures from the sixth order polynomial Kondratiev uses from Winholds+2011
"""
T_inst_poly = [6.2699888333e-05, -0.019932340239, 2.60625093843, -179.560314268, 6890.14953844, -140196.209123, 1189842.07708]
temps = np.poly1d(T_inst_poly)
return temps(freqs.value)
def get_min_d(positions):
ds = []
for j in positions:
distances = j - positions
d = np.sqrt(distances[:,0]**2 + distances[:,1]**2 + distances[:,2]**2)
dmin = np.sort(d)[1]
ds.append(dmin)
return ds
def get_lofar_aeff_max(freqs, n_elem, station_str='IE613LBA'):
"""
Calculate the max Aeff
"""
c = 300.0 #speed of light when converting from MHz
l = c / freqs #wavelength in metres
db = lofarantpos.db.LofarAntennaDatabase()
positions = db.antenna_pqr(station_str)
ds = get_min_d(positions)
print(ds)
aeff = []
for j in l.value:
aef = []
for s in ds:
#aef.append((j**2.)/3.)
aef.append(np.minimum((j**2.)/3., np.pi*(s**2.)/4.))
aeff.append(n_elem*np.mean(aef))
return np.array(aeff)
def galactic_noise(freqs, gal_coords):
"""
Get the background sky noise from PyGDSM from D. Price on github (https://github.com/telegraphic/pygdsm)
"""
gsm = GlobalSkyModel2016(freq_unit='MHz', data_unit='TCMB')
#freqs = np.linspace(15,30,79)
map_cube = gsm.generate(freqs.value)
vec = ang2vec(np.radians(gal_coords.l.value), np.radians(gal_coords.b.value))
ipix = ang2pix(nside=get_nside(map_cube[0]), theta=np.radians(gal_coords.l.value), phi=np.radians(gal_coords.b.value))
T_sky = map_cube[:, ipix]
return T_sky
def sensitivity(freqs, gal_coords, n_elem=96):
"""
From kondratiev+2016
"""
kb = 1.38e-16 #in cgs (so output will be erg/m^2)
beta = 1 #we are using 16 bit observations so digitisation correction is not needed
n_pol = 2
#T_obs = 81.92e-6 #observation resolution
T_obs = 1e-3 #resample data to 1 ms..
#T_obs = 1 #1s resolution
bw = (freqs[1].value-freqs[0].value)*1e6 #bandwidth
print(bw)
T_sky = galactic_noise(freqs, gal_coords)
T_ant = kondratiev_temp(freqs)
print(T_sky, T_ant)
T_sys = T_ant + T_sky
Aeff = get_lofar_aeff_max(freqs, n_elem)
Aeff = Aeff*1e4 #convert to cm^2 from m^2
print(Aeff)
gain = Aeff / (2.*kb)
return beta*T_sys / (gain*np.sqrt(n_elem*(n_elem-1)*n_pol*T_obs*bw))
"""
gsm = GlobalSkyModel2016(freq_unit='MHz', data_unit='TCMB')
#freqs = np.linspace(15,30,79)
map_cube = gsm.generate(freqs.value)
vec = ang2vec(np.radians(gal_coords.l.value), np.radians(gal_coords.b.value))
ipix = ang2pix(nside=get_nside(map_cube[0]), theta=np.radians(gal_coords.l.value), phi=np.radians(gal_coords.b.value))
print(map_cube.shape)
plt.loglog(map_cube[:,ipix]) # Random pixel
#plt.loglog(map_cube[:,ipix]) # Another random pixel
plt.xlabel("Frequency [MHz]")
plt.ylabel("Temperature [K]")
plt.savefig('uranus_skymodel/mollweide_multi_freq.png')
plt.close()
model_freq = 20 #MHz
ov = GSMObserver2016()
ov.lat, ov.long, ov.elev = ilofar.lat.value, ilofar.lon.value, ilofar.height.value
#ov.date = astropytime_to_datetime(t)
#ov.generate(model_freq)
samples = 20
tdelta = duration/samples
tsl = TimeSeries(time_start=start, time_delta=tdelta, n_samples=samples)
"""
if False:
for i,j in enumerate(tsl):
print(j[0])
ov.date = astropytime_to_datetime(j[0])
print(ov.date)
ov.generate(model_freq)
ov.view(logged=True)
#plt.show()
plt.savefig('uranus_skymodel/ortho_{}'.format(i))
plt.close()
if __name__=="__main__":
#sbs = np.arange(76,198)[:78]
sbs = np.arange(76,198)
freqs = sb_to_f(sbs, 3)
start = '2020-12-15T20:04:00' #isot format
duration = 176*60*u.second # seconds
n_elem = 96
myobs = Observation('uranus', start, 'isot', duration.value, freqs)
t = Time('2020-12-15T20:04:00', format='isot')
c = coords.get_body('uranus', t)
ilofar = coords.EarthLocation(lat='53.095', lon='-7.9218', height=100*u.m)
aa = coords.AltAz(location=ilofar, obstime=t)
altaz_coords = c.transform_to(aa) #at ilofar
gal_coords = c.transform_to(coords.Galactic())
print(myobs)
print('AltAz Coordinates\n'+'alt: {}\taz: {}'.format(altaz_coords.alt, altaz_coords.az))
print('Galactic Coordinates\n'+'l: {}\tb: {}\n'.format(gal_coords.l, gal_coords.b))
s = sensitivity(freqs, gal_coords, n_elem=n_elem)
print(s)
jy = 1e23*s |
max_value = 0
min_value = 1000
student_scores = input("Input a list of student scores with 'space' between each score: \n").split()
for n in range(0, len(student_scores)):
student_scores[n] = int(student_scores[n])
if student_scores[n] > max_value: max_value = student_scores[n]
if min_value > student_scores[n]: min_value = student_scores[n]
print(student_scores)
print(f"Maximum student score is: {max_value}")
print(f"Minimum student score is: {min_value}") |
T = [
"light red bags contain 1 bright white bag, 2 muted yellow bags.\n",
"dark orange bags contain 3 bright white bags, 4 muted yellow bags.\n",
"bright white bags contain 1 shiny gold bag.\n",
"muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.\n",
"shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.\n",
"dark olive bags contain 3 faded blue bags, 4 dotted black bags.\n",
"vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.\n",
"faded blue bags contain no other bags.\n",
"dotted black bags contain no other bags.\n",
]
L = [
line.strip()
.replace(" no other bags", "")
.replace(" bags", "")
.replace(" bag", "")
.replace(",", "")
.replace(".", "")
.replace(" contain", "")
.split(" ")
for line in open("input_07.txt").readlines()
# for line in T
]
# print(L)
S = dict()
for line in L:
B = line[0] + " " + line[1]
if len(line) > 2:
for i in range(len(line[2:]) // 3):
bag = line[2 + i * 3 + 1] + " " + line[2 + i * 3 + 2]
num = int(line[2 + i * 3])
try:
S[bag][B] = num
except KeyError:
S[bag] = {B: num}
Bags = set()
target = {"shiny gold"}
done = False
while not done:
NewBags = set()
for bag in target:
try:
NewBags = set.union(set(S[bag].keys()), NewBags)
except KeyError:
pass
if not NewBags:
done = True
else:
target = NewBags
Bags = set.union(Bags, NewBags)
print(len(Bags)) |
"""
Invokes flask_djangofy-admin when the flask-djangofy module is run as a script.
Example: python -m flask_djangofy check
"""
from flask_djangofy.core import management
if __name__ == "__main__":
management.execute_from_command_line() |
from rest_framework import viewsets
from share.models.jobs import IngestJob
from api.base.views import ShareViewSet
from api.pagination import CursorPagination
from api.ingestjobs.serializers import IngestJobSerializer
class IngestJobViewSet(ShareViewSet, viewsets.ReadOnlyModelViewSet):
ordering = ('-id', )
serializer_class = IngestJobSerializer
pagination_class = CursorPagination
def get_queryset(self):
return IngestJob.objects.all()
|
# import blender gamengine modules
from bge import logic
from bge import events
from bge import render
from . import bgui
from .settings import *
from .helpers import *
import time
import pdb
import colorsys
import random
class Timeline():
def __init__(self, timelineWidget, scrollerWidget, sys):
'''sets-up timeline GUI'''
self.root = timelineWidget # alias to main timeline widget
self.scroller = scrollerWidget # alias to scroller widget
self.system = sys # alias to gui system
self.slides = [] # keeps track of slides
self.connectors = [] # keeps track of connectors
self.ticks = [] # keeps track of ticks
self.initUI() # init common timeline widgets
self.clickedXY = None
self.movingSlide = None
def initUI(self):
# draw decorator to hold play/pause button, progress bar, playback toggle, and total time of animation
self.timeLinePanel = bgui.Frame(self.root, 'timeLinePanel', size=[0, 20], sub_theme='op', options=bgui.BGUI_TOP|bgui.BGUI_FILLX)
# draw progress bar UI
self.timeLineProgress = bgui.ProgressBar(self.root, 'timeLineProgress', size = [0,16], offset=[90,-2,210,0], options=bgui.BGUI_NO_FOCUS|bgui.BGUI_TOP|bgui.BGUI_LEFT|bgui.BGUI_FILLX)
# draw Play/Pause button
self.timeLinePlayBtn = bgui.ImageButton(self.root,'timeLinePlayBtn',size=[64,64],offset=[0,23],sub_theme='timelineProxy',options=bgui.BGUI_TOP|bgui.BGUI_LEFT)
self.timeLinePlayBtn.on_left_release = self.playToggle
#draw playback loop toggle button
self.timeLineLoopBtn = bgui.ImageButton(self.root,'timeLineLoopBtn',size=[40,16],offset=[-120,-4],sub_theme='timelineLoop',options=bgui.BGUI_TOP|bgui.BGUI_RIGHT)
self.timeLineLoopBtn.on_left_release = self.loopToggle
self.timeLineLoopBtn.tooltip = 'Toggle looping of animation'
#display total time of animation
self.timeLabel = bgui.Label(self.root, 'timeLabel', text='', sub_theme='whiteLabelBold', offset=[-70,-6], options=bgui.BGUI_TOP|bgui.BGUI_RIGHT)
self.timeLabel.tooltip = 'Animation duration'
# draw frame widget containter that will hold train ##
self.timeLineTrainTrack = bgui.Frame(self.system, 'timeLineTrainTrack', size=[0, 20], sub_theme="invisible", options=bgui.BGUI_NO_FOCUS|bgui.BGUI_BOTTOM|bgui.BGUI_LEFT|bgui.BGUI_FILLX)
self.timeLineTrainTrack.extraPadding = [50,50,25,25]
# draw Image widget for train which will display time format: MM:SS.S
self.timeLineTrain = bgui.ImageButton(self.timeLineTrainTrack, 'timeLineTrain',sub_theme="timelineTrain", size=[64, 32],options=bgui.BGUI_CACHE)
self.timeLineTrain.on_left_click = self.trainMove
self.timeLineTrain.on_left_release = self.trainMoved
# compute trainTrack widget's offset relative to half the size of train at beginning and end of track
self.timeLineTrain.position = [0,-5]
trainCenter = int(self.timeLineTrain.size[0]/2)
self.timeLineTrainTrack.offset = [90-trainCenter,130,210+trainCenter,0]
# draw label widget that displays the current time of the animation in the train widget
self.timeLineTrainLabel = bgui.Label(self.timeLineTrain, 'timeLineTrainLabel', text='', sub_theme='blackLabel', offset=[-12,-10], options=bgui.BGUI_TOP|bgui.BGUI_RIGHT|bgui.BGUI_NO_FOCUS)
def playToggle(self, widget=None):
"""invoked by the play button on timeline, starts from or pauses animation at active slide"""
logic.mvb.playing = not logic.mvb.playing
def loopToggle(self, widget=None):
"""invoked by the playback loop button on timeline, by default playback loop mode is enabled."""
logic.mvb.looping = not logic.mvb.looping
def trainMove(self, widget):
"""registers the mouse handler"""
if self.trainMoving in logic.registeredFunctions:
logic.registeredFunctions.remove(self.trainMoving)
else:
logic.registeredFunctions.append(self.trainMoving)
def trainMoving(self):
""" handles the draggable timline label widget"""
if logic.gui.mouse_state == bgui.BGUI_MOUSE_LEFT_RELEASE:
self.trainMoved(None)
else:
ratio = (logic.gui.mousePos[0] - self.timeLineTrainTrack.position[0]) / (self.timeLineTrainTrack.size[0])
logic.mvb.time = ratio * logic.mvb.getTotalTime()
logic.mvb._scrubbing = True
logic.mouseLock = 3
return True
def trainMoved(self, widget):
"""delete the mouse handler"""
try:
logic.mvb._scrubbing = False
logic.mvb.snap()
logic.registeredFunctions.remove(self.trainMoving)
logic.mouseLock = 0
except:
pass
# ----------------- slide related functions -----------------
def slideMove(self, widget=None):
self.movingSlide = widget
logic.registeredFunctions.append(self.slideMoving)
def slideMoving(self):
if logic.gui.mouse_state == bgui.BGUI_MOUSE_LEFT_RELEASE:
# end drag and drop
self.slideMoved()
return False
else:
# set slide visual position
pos = logic.gui.mousePos[:]
pos[0] -= 50
pos[1] -= 25
pos[1] = self.movingSlide.position[1]
self.movingSlide.position = pos
# test for connectorWidget
padding = 50
for connector in self.connectors:
# if mouse if over activation area (connector)
if (connector.gl_position[0][0] <= logic.gui.mousePos[0] <= connector.gl_position[1][0]) and \
(connector.gl_position[0][1]-padding <= logic.gui.mousePos[1] <= connector.gl_position[2][1]+padding):
# get target slide form connector widget name
name = connector.name[:-9]
try:
slideWidget = self.scroller.children[name]
except:
logic.logger.new("Error: Slide not found")
self.slideMoved()
return False
a, indexA = self.getMVBSlide(self.movingSlide) # the slide being dragged
b, indexB = self.getMVBSlide(slideWidget) # the slide being displaced
if a and b:
if a != b:
logic.mvb.moveSlide(indexA, indexB)
self.slideMoved()
return False
else:
logic.logger.new('Error: Slide not found')
self.slideMoved()
return False
return True
def slideMoved(self, widget=None):
# settle all slides to their supposed position
self.viewUpdate()
def slideAdd(self, widget=None, silent=False):
''' create the slide in the model and becomes active slide'''
if not silent:
logic.undo.append('Slide Copied', props='TIMELINE')
if widget:
# use widget as parent slide
slide, index = self.getMVBSlide(widget.parent)
index += 1
else:
# use active slide
index = logic.mvb.activeSlide + 1
newSlideIndex = logic.mvb.addSlide(index, silent)
if not silent:
self.viewUpdate()
logic.options.updateView()
def slideDelete(self, widget=None):
''' delete a slide'''
logic.undo.append('Slide Deleted', props='TIMELINE')
selectedSlide = None
if widget:
slide, i = self.getMVBSlide(widget.parent)
else:
i = logic.mvb.activeSlide
logic.mvb.deleteSlide(i)
self.viewUpdate()
logic.options.updateView()
def viewUpdate(self, animate = True):
'''Update the interface'''
# check for newly added slide
for slide in logic.mvb.slides:
if not slide.ui:
self.widgetAdd(slide)
# check for newly deleted slide
for slideWidget in self.slides:
if slideWidget not in [slide.ui for slide in logic.mvb.slides]:
self.widgetDelete(slideWidget)
# at this point, we have the proper number of slides that match the model
# placement
# fit scroller
self.scroller.fitX(len(logic.mvb.slides)*200)
# animate slides
xCoord = 20 # initial padding
for i, slide in enumerate(logic.mvb.slides):
w = slide.ui
if animate:
w.move([xCoord, 15], 200)
else:
w.move([xCoord, 15], 0)
w.text = str(i+1)
xCoord += 200
# interval connector
# kill existing
for c in self.connectors:
c.kill()
self.connectors = []
# regenerate all connectors
xCoord = 20 # initial padding
for i, slide in enumerate(logic.mvb.slides):
try:
interval = logic.mvb.slides[i+1].time - slide.time
except:
interval = None
else:
name = str(slide.id)
intervalText = str(round(interval,1))
connectorWidget = bgui.ImageButton(self.scroller, name+"connector", size=[40,25], pos=[xCoord+160, 46], sub_theme='timelineTrain', options=bgui.BGUI_CACHE)
connectorWidget.image.color = [1,1,1,0.7]
interval = bgui.TextInput(connectorWidget, "interval", text=intervalText, size=[0,0], pos=[0,2], centerText=True, sub_theme='slideConnectorNum', options=bgui.BGUI_FILLED)
interval.tooltip = "Duration in seconds between slides"
interval.on_mouse_wheel_up = self.onMouseWheelUp
interval.on_mouse_wheel_down = self.onMouseWheelDown
interval.on_edit = self.setValue
interval.on_enter_key = self.setValueFinal
xCoord += 200
self.connectors.append(connectorWidget)
slide.connectorUI = interval
self.tickMarkUpdate()
# highlight
self.slideHighlight(logic.mvb.activeSlide)
def widgetAdd(self, slide):
name = str(slide.id) # name of main slide widget is the slide id
# get prev slide
prevPosX = 0
for i, s in enumerate(logic.mvb.slides):
if slide == s:
prevSlide = logic.mvb.slides[i-1]
try:
prevPosX = prevSlide.ui.position[0]
except:
pass
slideWidget = bgui.FrameButton(self.scroller, name, text='', size=[160, 90], pos=[prevPosX+100,100], sub_theme='slide', radius=6)
slideWidget.lighter = True
slideWidget.on_left_click = self.slideLeftClick
slideWidget.on_left_active = self.slideLeftClickActive
slideWidget.on_left_release = self.slideLeftRelease
slideWidget.on_mouse_exit = self.slideLeftRelease
# progress
bgui.Frame(slideWidget, 'progress', sub_theme='slideProgress', size=[1,0], pos=[0,0], options=bgui.BGUI_NO_FOCUS|bgui.BGUI_NORMALIZED)
# capture bg image
...
# bg image
useImage = False
if useImage:
slideWidgetBG = bgui.Image(slideWidget, "bg", img = '', offset=[5,5,5,5], options=bgui.BGUI_NO_FOCUS|bgui.BGUI_LEFT|bgui.BGUI_BOTTOM|bgui.BGUI_FILLED)
col = list(colorsys.hsv_to_rgb(random.random(), 1.0, 1.0)) # randomize color
col.append(0.1)
#col = [1,1,1,0.5]
slideWidgetBG.color = col
# close button
closeWidget = bgui.ImageButton(slideWidget, 'close', size=[32,32], offset=[1,1], sub_theme='slideClose', options=bgui.BGUI_CACHE|bgui.BGUI_TOP|bgui.BGUI_RIGHT)
closeWidget.on_left_click = self.slideDelete
closeWidget.tooltip = "Delete this slide"
# dup button
dupWidget = bgui.ImageButton(slideWidget, 'dup', size=[32,32], offset=[1,1], sub_theme='slideDup', options=bgui.BGUI_CACHE|bgui.BGUI_BOTTOM|bgui.BGUI_RIGHT)
dupWidget.on_left_click = self.slideAdd
dupWidget.tooltip = "Duplicate this slide"
# capture button
# captureWidget = bgui.ImageButton(slideWidget, 'capture', size=[16,16], offset=[8,8], sub_theme='slideCapture', options=bgui.BGUI_CACHE|bgui.BGUI_BOTTOM|bgui.BGUI_LEFT)
# captureWidget.on_left_click = self.slideCapture
# register widget
slide.ui = slideWidget
self.slides.append(slideWidget)
def widgetDelete(self, slideWidget):
slideWidget.kill()
self.slides.remove(slideWidget)
def slideLeftClick(self, widget=None):
"""Invoked when the slide is left-clicked"""
slide, index = self.getMVBSlide(widget)
if slide:
# activate slide
logic.mvb.activeSlide = index
self.slideHighlight(index)
# move slide
self.clickedXY = logic.mouse.position
def slideLeftClickActive(self, widget=None):
''' When the slide is being clicked '''
x, y = logic.mouse.position
threshold = 0.001
if self.clickedXY and ((abs(x-self.clickedXY[0]) > threshold) or (abs(y-self.clickedXY[1]) > threshold)):
self.slideMove(widget)
self.clickedXY = None
def slideLeftRelease(self, widget=None):
self.clickedXY = None
def slideRightClick(self, widget=None):
"""invoked with right click on a highlighted slide for choosing option for pop-up menu widget"""
#create the Menu UI w/its lower left corner at the mouse cursor position
menuItems = ["Duplicate Slide", "Delete Slide", "Capture"]
self.popUpMenu = logic.gui.showMenu(name="popUpMenu", pos=logic.mouse.position, caller=widget, callback=selectMenuOption, items=menuItems)
#logic.mvb.deleteSlide(i) # delete from model
#self.slideAdd()
# ------------utility functions ---------
def tickMarkUpdate(self):
"""invoked when user modifies interval value, adds or deletes slide; used for positioning tickmark at a given slide's timestamp"""
for widget in self.ticks:
widget.kill()
self.ticks = []
for slide in logic.mvb.slides:
try:
tickmarkPos = [(self.timeLineProgress.size[0] * (slide.time / logic.mvb.getTotalTime())),0]
except:
tickmarkPos = [0,0]
tick = bgui.Frame(self.timeLineProgress, "timeLineTickmark"+str(slide.id), size=[2, 16],pos=tickmarkPos)
self.ticks.append(tick)
logic.mvb.activeSlide = logic.mvb.activeSlide
def onMouseWheelUp(self, widget):
self.increamentValue(widget, 0.1)
self.scroller.locked = True
def onMouseWheelDown(self, widget):
self.increamentValue(widget, -0.1)
self.scroller.locked = True
def setValueFinal(self, widget):
self.setValue(widget, final=True)
def setValue(self, widget, final=False):
try:
number = float(widget.text)
except:
if final:
print("Cannot parse value, resetting to default")
widget.text = str(defaultAnimationInterval)
else:
if number > 100:
number = 100
widget.text = str(round(number,1))
elif number < 0.1:
number = 0.1
widget.text = str(round(number,1))
name = widget.parent.name[:-9]
try:
slideWidget = self.scroller.children[name]
except:
print("slide not found")
slide, index = self.getMVBSlide(slideWidget)
if slide:
slide.interval = round(number, 1)
def increamentValue(self, widget, value):
try:
number = float(widget.text)
except:
print("Cannot parse value, resetting to default")
widget.text = str(defaultAnimationInterval)
else:
number += value
if number > 100:
number = 100
elif number < 0.1:
number = 0.1
widget.text = str(round(number,1))
name = widget.parent.name[:-9]
try:
slideWidget = self.scroller.children[name]
except:
print("slide not found")
slide, index = self.getMVBSlide(slideWidget)
if slide:
slide.interval = round(number, 1)
def getMVBSlide(self, widget):
for i, slide in enumerate(logic.mvb.slides):
if widget == slide.ui:
return slide, i
print("Error: Slide not found in model")
return None, None
def slideHighlight(self, index, fill = None):
for w in self.slides:
w.border = 0
w.children['progress'].size = [1,0]
slideWidget = logic.mvb.slides[index].ui
slideWidget.border = 3
if fill:
slideWidget.children['progress'].size = [fill, 1]
def slideCapture(self, widget):
pass
# instantiates the timeline singleton
def init(timelineWidget, scrollerWidget, sys):
return Timeline(timelineWidget, scrollerWidget, sys)
|
nt1 = float(input('Digite a primeira nota: '))
nt2 = float(input('Digite a segunda nota: '))
media = (nt1 + nt2) / 2
if media < 5:
print('VocÊ foi reprovado sua média é {:.1f}.'.format(media))
elif 5 <= media <= 6.9:
print('Você está em recuperação, sue média é {:.1f}.'.format(media))
elif media > 7:
print('Você foi aprovado, sua média é {:.1f}.'.format(media)) |
from hexadecimal import Hexadecimal
def main():
grade = 0
#Constructor worth 25 points
grade += test_constructor()
#Overloaded str() worth 10 points
grade += test_str()
#Overloaded math operators (+, -, *, /, **, %) worth 20 points
grade += test_math_ops()
#Overloaded relational operators (>, <, <=, >=, ==, !=) worth 20 points
grade += test_relational_ops()
#Overloaded combination operators (+=, -=, *=, /=, **=, %=) worth 20 points
grade += test_combo_ops()
#Overloaded casting (int(), float()) worth 5 points
grade += test_overloaded_casting()
print(grade)
return
#If there are no problems with the constructor, this function should return 20
def test_constructor():
grade = 0
#These should work
try:
a = Hexadecimal()
except Exception as ex:
print("Constructor doesn't have default arguments.")
else:
grade += 2
try:
b = Hexadecimal(10)
except Exception as ex:
print("Constructor doesn't accept positive ints.")
else:
grade += 2
try:
c = Hexadecimal(10.1)
except Exception as ex:
print("Constructor doesn't accept positive floats.")
else:
grade += 2
try:
d = Hexadecimal('10')
except Exception as ex:
print("Constructor doesn't accept string version of ints")
else:
grade += 2.5
#These shouldn't work
try:
e = Hexadecimal([1, 2, 3])
except Exception as ex:
grade += 1
else:
print("Constructor shouldn't accept lists")
try:
e = Hexadecimal('Q')
except Exception as ex:
grade += 1
else:
print("Constructor shouldn't accept a 'Q'")
try:
f = Hexadecimal(-1)
except Exception as ex:
grade += 1
else:
print("Constructor shouldn't accept negative integers.")
try:
g = Hexadecimal(-20.1)
except Exception as ex:
grade += 1
else:
print("Constructor shouldn't accept negative floats.")
#Decided that constructor should be worth 20 points, so multiplied by 2
grade *= 2
return grade
#If there are no problems with __str__, this function should return 10
def test_str():
#As long as there is a constructor and an overloaded str() function,
#the try block shouldn't be necessary, but I included it, just in case.
grade = 0
try:
x = Hexadecimal('AF')
if str(x) == "AF":
grade += 5
else:
print("Overloaded str:", str(x), 'should be AF.')
print("Check the overloaded str(), but also could be in constructor.")
x = Hexadecimal('0')
if str(x) == "0":
grade += 5
else:
print("Overloaded str:", str(x), 'should be 0.')
except Exception as ex:
print("Crashed when testing str().")
return grade
#If there are no problems, should return 20
def test_math_ops():
grade = 0
try:
two = Hexadecimal(2)
ten = Hexadecimal(10)
zero = Hexadecimal(0)
five = Hexadecimal(5)
fifteen = Hexadecimal(15)
fifty = Hexadecimal(50)
#Add
try:
total = ten + zero
if str(total) == str(ten):
grade += 3
else:
print("Overloaded + not working correctly.")
total = ten + five
if str(total) == str(fifteen):
grade += 2
else:
print("Overloaded + not working correctly.")
except Exception as ex:
print("Crashed when trying to add Hexadecimal objects.")
#Subtract
try:
difference = fifteen - five
if str(difference) == str(ten):
grade += 3
else:
print("Overloaded - not working correctly.")
except Exception as ex:
print("Crashed when trying to subtract Hexadecimal objects.")
try:
difference = ten - fifteen
except Exception as ex:
grade += 2
else:
print("Your class stored a negative in an object as the result of subtracting.")
print("It should have raised an exception and didn't.")
#Multiply
try:
product = five * ten
if str(product) == str(fifty):
grade += 2
else:
print("Overloaded * not working correctly.")
product = five * zero
if str(product) == str(zero):
grade += 1
else:
print("Overloaded * not working correctly.")
except Exception as ex:
print("Crashed when trying to multiply Hexadecimal objects.")
#Divide
try:
quotient = fifty / five
if str(quotient) == str(ten):
grade += 2
else:
print("Overloaded / not working correctly.")
except Exception as ex:
print("Crashed when trying to divide Hexadecimal objects.")
try:
quotient = fifty / zero
except Exception as ex:
grade += 1
else:
print("Should have raised an exception when trying to divide by zero.")
#Power
try:
answer = five ** two
if str(answer) == str(Hexadecimal(25)):
grade += 2
else:
print("Overloaded ** not working correctly.")
except Exception as ex:
print("Crashed when trying to raise a hexadecimal to a power")
#Mod
try:
answer = ten % five
if str(answer) == str(zero):
grade += 1
else:
print("Overloaded % not working correctly.")
except Exception as ex:
print("Crashed when trying to calculate remainder.")
try:
answer = ten % zero
except Exception as ex:
grade += 1
else:
print("Should have raised an exception when trying to divide by zero -- which is used with % operations.")
except Exception as ex:
print("Crashed when testing math operations.")
return grade
#If all relational operators work correctly, returns 20
def test_relational_ops():
grade = 0
try:
one = Hexadecimal(1)
two = Hexadecimal(2)
#Testing <
try:
if one < two:
grade += 2
else:
print("Trouble with <")
if two < one:
print("Trouble with <")
else:
grade += 1
except Exception as ex:
print("Crashed when testing <")
#Testing <=
try:
if one <= two:
grade += 2
else:
print("Trouble with <=")
if one <= one:
grade += 1
else:
print("Trouble with <=")
if two <= one:
print("Trouble with <=")
else:
grade += 1
except Exception as ex:
print("Crashed when testing <=")
#Testing >
try:
if one > two:
print("Trouble with >")
else:
grade += 2
if two > one:
grade += 1
else:
print("Trouble with >")
except Exception as ex:
print("Crashed when testing >")
#Testing >=
try:
if two >= one:
grade += 2
else:
print("Trouble with >=")
if two >= two:
grade += 1
else:
print("Trouble with >=")
if one >= two:
print("Trouble with >=")
else:
grade += 1
except Exception as ex:
print("Crashed when testing >=")
#Testing ==
try:
if one == one:
grade += 2
else:
print("Trouble with ==")
if one == two:
print("Trouble with ==")
else:
grade += 1
except Exception as ex:
print("Crashed when testing ==")
#Testing !=
try:
if one != two:
grade += 2
else:
print("Trouble with !=")
if one != one:
print("Trouble with !=")
else:
grade += 1
except Exception as ex:
print("Crashed when testing !=")
except Exception as ex:
print("Crashed when testing Relational operators")
return grade
#If there are no problems, function returns 20
def test_combo_ops():
grade = 0
try:
one = Hexadecimal(1)
two = Hexadecimal(2)
three = Hexadecimal(3)
four = Hexadecimal(4)
#Testing +=
try:
one += two
if str(one) == str(three):
grade += 3
else:
print("Trouble with +=")
#resetting one
one = Hexadecimal(1)
except Exception as ex:
print("Crashed when testing +=")
#Testing -=
try:
one -= two
except Exception as ex:
grade += 1
else:
print("Your class stored a negative in an object as the result of -=.")
print("It should have raised an exception and didn't.")
one = Hexadecimal(1)
try:
two -= one
if str(two) == str(one):
grade += 3
else:
print("Trouble when testing -=")
print(two, one)
two = Hexadecimal(2)
except Exception as ex:
print("Crashed when testing -=")
#Testing *=
try:
two *= two
if str(two) == str(four):
grade += 3
else:
print("Trouble when testing *=")
two = Hexadecimal(2)
except Exception as ex:
print("Crashed when testing *=")
#Testing /=
try:
four /= two
if str(four) == str(two):
grade += 3
else:
print("Trouble when testing /=")
four = Hexadecimal(4)
except Exception as ex:
print("Crashed when testing /=")
try:
zero = Hexadecimal(0)
four /= zero
except ZeroDivisionError as ex:
grade += 1
else:
print("Division by 0 is undefined, you should have raised an exception.")
#Testing **=
try:
two **= two
if str(four) == str(two):
grade += 3
else:
print("Trouble when testing **=")
two = Hexadecimal(2)
except Exception as ex:
print("Crashed when testing **=")
#Testing %=
try:
four %= three
if str(four) == str(one):
grade += 2
else:
print("Trouble when testing %=")
four = Hexadecimal(4)
except Exception as ex:
print("Crashed when testing %=")
try:
four %= zero
except Exception as ex:
grade += 1
else:
print("Your class should have caused a crash when trying to % by 0, but didn't.")
four = Hexadecimal(4)
except Exception as ex:
print("Crashed when testing combination operators")
return grade
#When there are no problems, the function returns 5
def test_overloaded_casting():
grade = 0
try:
ten = Hexadecimal(10)
x = int(ten)
if type(x) is int and x == 10:
grade += 3
else:
print("Can't cast Hexadecimal objects as ints correctly.")
except Exception as ex:
print("Crashed when casting Hexadecimal objects as ints.")
try:
ten = Hexadecimal(10)
x = float(ten)
if type(x) is float and x == 10.0:
grade += 2
else:
print("Can't cast Hexadecimal objects as floats correctly.")
except Exception as ex:
print("Crashed when casting Hexadecimal object as ints.")
return grade
main()
|
import re,string,random
# from django.apps import apps
# from django.db.models import Max
from cl_table.models import Fmspw, Securitylevellist
def code_generator(size=4,chars=string.ascii_letters + string.digits):
code = ''
for i in range(size):
code += random.choice(chars)
return code
# def create_temp_diagnosis_code():
# code = code_generator()
# Diagnosis = apps.get_model(app_label='cl_table', model_name='Diagnosis')
# qs = Diagnosis.objects.filter(diagnosis_code=code).exists()
# if qs:
# return create_temp_diagnosis_code()
# return code
#
# def get_next_diagnosis_code():
# Diagnosis = apps.get_model(app_label='cl_table', model_name='Diagnosis')
# curr_pk = Diagnosis.objects.all().aggregate(Max('sys_code'))['sys_code__max']
# return "%6d" % curr_pk + 1
from cl_table.models import Fmspw, Securitylevellist
class PermissionValidator:
def __init__(self,auth_user,permissions:list,nested=False):
"""
:param auth_user: request.user
:param permission: permission list Eg. ['mnuEmpDtl','mnuCustomer','mnuDiagnosis'] (from Securitycontrollist.controlname)
:param nested: TODO if nested is true control parent permissions considered.
"""
self.auth_user = auth_user
self.permissions = permissions
self.nested = nested
def is_allow(self):
"""
:return: true if the user have permission else false
"""
self.no_permission = None
fmspw = Fmspw.objects.filter(user=self.auth_user, pw_isactive=True).first()
if not fmspw:
self.error = "fmspw object doesn't exists"
return False
user_level = fmspw.LEVEL_ItmIDid
self.sec_level_qs = Securitylevellist.objects.filter(level_itemid=user_level.level_code,
controlstatus=True,
controlname__in=self.permissions)
self.no_permission = set(self.permissions) - set(self.sec_level_qs.values_list('controlname', flat=True))
# self.no_permission_qs = self.sec_level_qs.exclude(controlname__in=self.permissions)
# if self.sec_level_qs.count() > self.no_permission_qs.count():
# return True
if self.sec_level_qs.exists():
return True
self.error = "user hasn't any permissions"
return False
|
from django.db import models
from patients.models import Patient
from django.contrib.auth.models import User
class Lab(models.Model):
name = models.CharField(max_length=30)
category = models.CharField(max_length=30)
class Diagnosis(models.Model):
VISIT_TYPES = (
('Inpatient', 'Inpatient'),
('Outpatient', 'Outpatient')
)
patient = models.ForeignKey(Patient, on_delete=models.CASCADE)
timestamp = models.DateTimeField(auto_now_add=True)
doctor = models.ForeignKey(User, on_delete=models.CASCADE)
possible_diagnosis = models.TextField()
visit_type = models.CharField(max_length=30, choices=VISIT_TYPES)
def __str__(self):
return('Diagnosis for {}'.format(self.patient))
class LabTest(models.Model):
patient = models.ForeignKey(Patient, on_delete=models.CASCADE)
specimen = models.CharField(max_length=30)
test = models.CharField(max_length=60)
period = models.PositiveIntegerField()
lab = models.ForeignKey(Lab, blank=True, null=True)
def __str__(self):
return('Test {}'.format(self.test))
class LabResult(models.Model):
lab_test = models.ForeignKey(LabTest)
result = models.TextField()
def __str__(self):
return(self.result)
|
#!/usr/bin/python
## Author: Omid Shams Solari
## E-mail: omid@genapsys.com
## Date: 03/16/2014
import numpy as np
from Bio import Seq, SeqIO
from Bio.Alphabet import IUPAC
from Bio.SeqRecord import SeqRecord
def createRecord(seq, parentRec):
""" inputs: seq <Seq object>
parentRec <record object>
returns: a new record in which newRec.seq = seq, newRec.id = parentRec.id,
newRec.name = parentRec.name, newRec.description = parentRec.description,
newRec.dbxrefs = parentRec.dbxrefs """
newRec = SeqRecord(seq)
newRec.id = str(parentRec.id)
newRec.name = parentRec.name
newRec.description = parentRec.description
newRec.dbxrefs = parentRec.dbxrefs
return newRec
def findORF(proteinRec, table = 11, secondLongestRatio = .5, minLen = 10):
""" inputs: protein <SeqRecord object>
table <int> translation table number, default value = 11
secondLongestRatio <float> ratio of the second longest ORF, default value = .5
returns: Longest ORF, if length of the second longest is
more than secondLongestRatio of the longest, then return both. """
L = []
c = 0
for SEQ in [proteinRec.seq, proteinRec.seq.reverse_complement()]:
for frame in range(3):
length = 3 * ((len(proteinRec)-frame) // 3)
for pro in SEQ[frame:frame+length].translate(table).split("*"):
if "M" in pro:
tmp = pro[pro.find("M"):]
if len(tmp) > minLen:
L.append(tmp)
c = c + 1
L.sort(key = len)
#print len(L)
#print c
if c > 1:
if len(L[-2])>secondLongestRatio*len(L[-1]):
return [[createRecord(L[-1],proteinRec),createRecord(L[-2],proteinRec)],c]
else:
return [createRecord(L[-1],proteinRec),1]
elif c == 1:
return [createRecord(L[-1],proteinRec),1]
else:
return [L,c]
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='vcs_learning',
version='0.1',
description="Learning to develop a Version Control System",
long_description_markdown_filename='README.md',
url='https://github.com/ShubhankarKG/VCS_learnings.git',
license='AGPL-3.0-or-later',
keywords='git version-control',
packages=find_packages(include='vcs'),
classifiers=[
'Development Status :: In Progress',
'Intended Audience :: Developers',
'License :: GNU AFFERO GENERAL PUBLIC LICENSE',
'Natural Language :: English',
'Programming Language :: Python :: 3.7',
],
include_package_data=True,
py_modules=['vcs'],
# install_requires=install_requires,
# extras_require=deps,
# setup_requires=['setuptools-markdown'],
# zip_safe=False,
)
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution1(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
"""
A: a1 → a2
↘
c1 → c2 → c3
↗
B: b1 → b2 → b3
c1相交
"""
# 1. 给节点上加标记
node_a = headA
node_b = headB
while node_a or node_b:
if node_a:
if not hasattr(node_a, 'visited'):
node_a.visited = True
else:
return node_a
node_a = node_a.next
if node_b:
if not hasattr(node_b, 'visited'):
node_b.visited = True
else:
return node_b
node_b = node_b.next
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
# 双指针,同时开始遍历
if not headA or not headB:
return
node_a, node_b = headA, headB
while node_a and node_b and node_a != node_b:
node_a = node_a.next
node_b = node_b.next
# 如果相等退出,包含None的情况
if node_a == node_b:
return node_a
# 如果node_a完了,跳到b链表,
# 两个链表如果相交的化,第二次遍历的时候肯定会重合
if not node_a:
node_a = headB
if not node_b:
node_b = headA
return node_a
|
from solvent import config
from solvent import label
from solvent import run
from upseto import gitwrapper
class ThisProjectLabel:
def __init__(self, product):
gitWrapper = gitwrapper.GitWrapper('.')
hash = gitWrapper.hash()
basename = gitWrapper.originURLBasename()
if config.OFFICIAL_BUILD:
state = 'officialcandidate'
elif config.CLEAN:
state = 'cleancandidate'
else:
state = 'dirty'
self._label = label.label(basename, product, hash, state)
self._makeSureExists()
def _makeSureExists(self):
if self._exists(config.LOCAL_OSMOSIS):
return
if config.WITH_OFFICIAL_OBJECT_STORE:
if self._exists(config.OFFICIAL_OSMOSIS):
return
raise Exception("Label '%s' does not exists in any of the object stores" % self._label)
def _exists(self, objectStore):
output = run.run(["osmosis", "listlabels", "^%s$" % self._label, "--objectStores=" + objectStore])
return self._label in output
def label(self):
return self._label
|
from kaffe.tensorflow import Network
class MobileNetYOLO(Network):
def setup(self):
(self.feed('data')
.conv(3, 3, 32, 2, 2, biased=False, relu=False, name='conv0')
.batch_normalization(relu=True, name='conv0_bn')
.conv(3, 3, 32, 1, 1, biased=False, group=32, relu=False, name='conv1_dw')
.batch_normalization(relu=True, name='conv1_dw_bn')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='conv1')
.batch_normalization(relu=True, name='conv1_bn')
.conv(3, 3, 64, 2, 2, biased=False, group=64, relu=False, name='conv2_dw')
.batch_normalization(relu=True, name='conv2_dw_bn')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='conv2')
.batch_normalization(relu=True, name='conv2_bn')
.conv(3, 3, 128, 1, 1, biased=False, group=128, relu=False, name='conv3_dw')
.batch_normalization(relu=True, name='conv3_dw_bn')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='conv3')
.batch_normalization(relu=True, name='conv3_bn')
.conv(3, 3, 128, 2, 2, biased=False, group=128, relu=False, name='conv4_dw')
.batch_normalization(relu=True, name='conv4_dw_bn')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='conv4')
.batch_normalization(relu=True, name='conv4_bn')
.conv(3, 3, 256, 1, 1, biased=False, group=256, relu=False, name='conv5_dw')
.batch_normalization(relu=True, name='conv5_dw_bn')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='conv5')
.batch_normalization(relu=True, name='conv5_bn')
.conv(3, 3, 256, 2, 2, biased=False, group=256, relu=False, name='conv6_dw')
.batch_normalization(relu=True, name='conv6_dw_bn')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='conv6')
.batch_normalization(relu=True, name='conv6_bn')
.conv(3, 3, 512, 1, 1, biased=False, group=512, relu=False, name='conv7_dw')
.batch_normalization(relu=True, name='conv7_dw_bn')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='conv7')
.batch_normalization(relu=True, name='conv7_bn')
.conv(3, 3, 512, 1, 1, biased=False, group=512, relu=False, name='conv8_dw')
.batch_normalization(relu=True, name='conv8_dw_bn')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='conv8')
.batch_normalization(relu=True, name='conv8_bn')
.conv(3, 3, 512, 1, 1, biased=False, group=512, relu=False, name='conv9_dw')
.batch_normalization(relu=True, name='conv9_dw_bn')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='conv9')
.batch_normalization(relu=True, name='conv9_bn')
.conv(3, 3, 512, 1, 1, biased=False, group=512, relu=False, name='conv10_dw')
.batch_normalization(relu=True, name='conv10_dw_bn')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='conv10')
.batch_normalization(relu=True, name='conv10_bn')
.conv(3, 3, 512, 1, 1, biased=False, group=512, relu=False, name='conv11_dw')
.batch_normalization(relu=True, name='conv11_dw_bn')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='conv11')
.batch_normalization(relu=True, name='conv11_bn')
.conv(3, 3, 512, 2, 2, biased=False, group=512, relu=False, name='conv12_dw')
.batch_normalization(relu=True, name='conv12_dw_bn')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='conv12')
.batch_normalization(relu=True, name='conv12_bn')
.conv(3, 3, 1024, 1, 1, biased=False, group=1024, relu=False, name='conv13_dw')
.batch_normalization(relu=True, name='conv13_dw_bn')
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='conv13')
.batch_normalization(relu=True, name='conv13_bn')
.conv(3, 3, 1024, 1, 1, biased=False, group=1024, relu=False, name='conv15_dw')
.batch_normalization(relu=True, name='conv15_dw_bn')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='conv15')
.batch_normalization(relu=True, name='conv15_bn')
.upsample(name='upsample'))
(self.feed('conv11_bn')
.conv(3, 3, 512, 1, 1, biased=False, group=512, relu=False, name='conv17_dw')
.batch_normalization(relu=True, name='conv17_dw_bn')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='conv17')
.batch_normalization(relu=True, name='conv17_bn'))
(self.feed('upsample',
'conv17_bn')
.add(name='conv17_sum')
.conv(3, 3, 512, 1, 1, biased=False, group=512, relu=False, name='conv18_dw')
.batch_normalization(relu=True, name='conv18_dw_bn')
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='conv18')
.batch_normalization(relu=True, name='conv18_bn')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='conv19')
.batch_normalization(relu=True, name='conv19_bn')
.upsample(name='upsample3'))
(self.feed('conv15_bn')
.conv(1, 1, 45, 1, 1, relu=False, name='conv22'))
(self.feed('conv18_bn')
.conv(1, 1, 45, 1, 1, relu=False, name='conv23'))
(self.feed('upsample3',
'conv5_bn')
.add(name='upsample3_sum')
.conv(3, 3, 256, 1, 1, biased=False, group=256, relu=False, name='conv25_dw')
.batch_normalization(relu=True, name='conv25_dw_bn')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='conv25')
.batch_normalization(relu=True, name='conv25_bn')
.conv(1, 1, 1, 1, 1, relu=False, name='conv26')) |
## @package test_tiles
## Unit tests for tiles (under constrcution).
from unittest import TestCase
from unittest import TestSuite
from enhanced_grid import Grid2D
from tiles import energy2
from tiles import energy3
from tiles import calc_acc_energy
from tiles import find_min_seam
from tiles import quilt
from tiles import ENERGY_MAX
from image import greyscale_grid_to_rgb_grid
from image import grid_to_rgb_image
def add_colors(obj):
obj.white = (1, 1, 1, 1)
obj.black = (0, 0, 0, 1)
obj.grey = (0.5, 0.5, 0.5, 1)
obj.brown = (0.7, 0.3, 0.1, 1)
obj.washblue = (0.1, 0.3, 0.7, 1)
obj.red = (1, 0, 0, 1)
obj.green = (0, 1, 0, 1)
obj.blue = (0, 0, 1, 1)
obj.darkred = (0.5, 0, 0, 1)
obj.darkgreen = (0, 0.5, 0, 1)
obj.darkblue = (0, 0, 0.5, 1)
def make_checker_grid(col_obj):
checker_grid = Grid2D((50, 30), col_obj.white)
for index in checker_grid.index_iter():
x, y = index
if (x + y) % 2 == 0:
checker_grid[index] = col_obj.black
return checker_grid
def make_half_checker_grids(col_obj):
checker_grid1 = Grid2D((50, 30), col_obj.white)
checker_grid2 = Grid2D((50, 30), col_obj.white)
for index in checker_grid1.index_iter():
x, y = index
checker_grid = checker_grid1
if x < 25:
checker_grid = checker_grid2
if x != 25:
if (x + y) % 2 == 0:
checker_grid[index] = col_obj.black
else:
checker_grid[index] = col_obj.grey
return checker_grid1, checker_grid2
class TestEnergy2(TestCase):
def setUp(self):
add_colors(self)
def testMax(self):
self.assertAlmostEqual(energy2(self.black, self.white), 3)
self.assertAlmostEqual(energy2(self.white, self.black), 3)
def testSymmetry(self):
self.assertAlmostEqual(energy2(self.washblue, self.brown), energy2(self.brown, self.washblue))
def testZeroEnergy(self):
self.assertAlmostEqual(energy2(self.brown, self.brown), 0)
def testChannelEquivalence(self):
self.assertAlmostEqual(energy2(self.red, self.darkred),
energy2(self.blue, self.darkblue))
self.assertAlmostEqual(energy2(self.red, self.darkred),
energy2(self.green, self.darkgreen))
class TestEnergy3(TestCase):
def setUp(self):
add_colors(self)
def testMax(self):
self.assertAlmostEqual(energy3(self.white, self.black, self.black), 6)
self.assertAlmostEqual(energy3(self.black, self.white, self.white), 6)
def testSymmetry(self):
self.assertAlmostEqual(energy3(self.black, self.washblue, self.brown),
energy3(self.black, self.brown, self.washblue))
self.assertAlmostEqual(energy3(self.black, self.brown, self.washblue),
energy3(self.black, self.washblue, self.brown))
self.assertAlmostEqual(energy3(self.brown, self.washblue, self.black),
energy3(self.washblue, self.brown, self.black))
self.assertAlmostEqual(energy3(self.washblue, self.brown, self.black),
energy3(self.brown, self.washblue, self.black))
def testZero(self):
self.assertAlmostEqual(energy3(self.brown, self.brown, self.brown), 0)
def testChannelEquivalence(self):
self.assertAlmostEqual(energy3(self.red, self.darkred, self.black),
energy3(self.blue, self.darkblue, self.black))
self.assertAlmostEqual(energy3(self.red, self.darkred, self.black),
energy3(self.green, self.darkgreen, self.black))
def testDouble(self):
self.assertAlmostEqual(2*energy3(self.brown, self.washblue, self.brown),
1*energy3(self.brown, self.washblue, self.washblue))
self.assertAlmostEqual(2*energy3(self.washblue, self.washblue, self.brown),
1*energy3(self.washblue, self.brown, self.brown))
class TestCalcAccEnergy(TestCase):
def setUp(self):
add_colors(self)
self.checker_grid = make_checker_grid(self)
self.checker_grid1, self.checker_grid2 = make_half_checker_grids(self)
def testZero(self):
washblue_grid = Grid2D((50, 30), self.washblue)
acc_energy = calc_acc_energy(washblue_grid, washblue_grid)
for index in acc_energy.index_iter():
x, y = index
if (x > 0):
self.assertAlmostEqual(acc_energy[index], 0)
else:
self.assertAlmostEqual(acc_energy[index], ENERGY_MAX)
def testGrowing(self):
acc_energy = calc_acc_energy(self.checker_grid, self.checker_grid)
w, h = self.checker_grid.dims
for i in range(1, w-1):
for j in range(1, h):
self.assertTrue(acc_energy[i, j] >= min(acc_energy[i, j-1], acc_energy[i-1, j-1], acc_energy[i+1, j-1]))
def testAccEnergy(self):
acc_energy = calc_acc_energy(self.checker_grid1, self.checker_grid2)
print acc_energy[:,0]
class TestFindMinSeam(TestCase):
def setUp(self):
add_colors(self)
self.checker_grid = make_checker_grid(self)
self.checker_grid1, self.checker_grid2 = make_half_checker_grids(self)
def testSeam(self):
seam = find_min_seam(self.checker_grid1, self.checker_grid2)
for item in seam:
self.assertEqual(item, 25)
|
def read_file(file_name):
file_dic = {}
with open(file_name, 'r') as input_file:
lines = input_file.readlines()
for index in range(0, len(lines) - 1, 2):
if lines[index].strip() == '':
continue
cnt = int(lines[index].strip())
name = lines[index + 1].strip()
if cnt in file_dic.keys():
name_list = file_dic.get(cnt)
name_list.append(name)
name_list.sort()
else:
file_dic[cnt] = [name]
print(cnt, name)
return file_dic
def output_keys(file_dic, file_name):
with open(file_name, 'w') as outfile:
for key in sorted(file_dic.keys()):
outfile.write('{}:{}\n'.format(key, '; '.join(file_dic.get(key))))
print('{}: {}\n'.format(key, ';'.join(file_dic.get(key))))
def output_titles(file_dic, file_name):
titles = []
for title in file_dic.values():
titles.extend(title)
with open(file_name, 'w') as out_file:
for title in sorted(titles):
out_file.write('{}\n'.format(title))
print(title)
def main():
file_name = '/Users/wayne/PycharmProjects/pythonProject1/venv/file1'
dic = read_file(file_name)
if dic is None:
print('Error: Invalid file name provided.')
return
print(dic)
output_file_name1 = 'output_key.txt'
output_file_name2 = 'output_titles.txt'
output_keys(dic, output_file_name1)
output_titles(dic, output_file_name2)
main()
|
# 실습 : 19_1, 2, 3, 4, 5, EarlyStopping 까지
# 총 6개의 파일을 완성하시오.
import numpy as np
from sklearn.datasets import load_diabetes
dataset = load_diabetes()
x = dataset.data
y = dataset.target
print(x[:5])
print(y[:10])
print(x.shape, y.shape)
print(np.max(x), np.min(x))
# print(dataset.feature_names)
# print(dataset.DESCR)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.2, random_state=45)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(32, activation='relu', input_shape=(10,)))
model.add(Dense(8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
model.fit(x_train, y_train, epochs=100, batch_size=1, validation_split=0.2, verbose=2)
loss, mse = model.evaluate(x_test, y_test)
print("loss :", loss)
print('MSE :', mse)
y_predict = model.predict(x_test)
from sklearn.metrics import mean_squared_error, r2_score
def rmse(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test, y_predict))
print('RMSE :', rmse(y_test, y_predict))
print('MSE :', mean_squared_error(y_test, y_predict))
print('R2 :', r2_score(y_test, y_predict))
# Result
# loss : 2566.899658203125
# MSE : 40.038841247558594
# RMSE : 50.66457864761847
# MSE : 2566.8995295407176
# R2 : 0.47978546948585854 |
#!/usr/bin/python
from plumbum import cli
import socket
import sys
import time
import struct
kMagicNumber = 0x53545259
kPing = 1
kGetStats = 2
kResetStats = 3
kCompress = 4
kHdrSize = 8
kStatSize = 9
class Application(cli.Application):
port = cli.SwitchAttr(["p"], int, default=4000)
host = cli.SwitchAttr(["h"], str, default="127.0.0.1")
def sendHdr(self, packetType, payload = 0):
data = struct.pack("!ihh", kMagicNumber, payload, packetType)
self.s.send(data)
def receiveResp(self):
data = self.s.recv(kHdrSize)
resp = struct.unpack("!ihh", data)
return resp
def receiveStats(self):
data = self.s.recv(kStatSize)
return struct.unpack("!iib", data)
def main(self, action = None):
if action == "stats":
self.getStats()
elif action == "ping":
self.ping()
elif action == "compress":
self.compress()
elif action == "reset":
self.reset()
else:
self.getStats()
self.ping()
self.compress()
def connect(f):
def wrap(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
print("Connecting to {0}".format(self.port))
s.connect((self.host, self.port))
print("connected")
self.s = s
f(self)
except socket.error as err:
print("Error connecting to remote host: {0}".format(err))
sys.exit(1)
return wrap
@connect
def getStats(self):
self.sendHdr(kGetStats)
print (self.receiveStats())
@connect
def ping(self):
self.sendHdr(kPing)
print (self.receiveResp())
@connect
def reset(self):
self.sendHdr(kResetStats)
print (self.receiveResp())
@connect
def compress(self):
pl = 1003 * "a" + 40 * "b" + 20002 * "f" + "cc"
self.sendHdr(kCompress, len(pl))
self.s.send(pl)
resp = self.receiveResp()
print(resp)
data = self.s.recv(resp[1])
print(data)
if __name__ == "__main__":
Application.run()
|
#coding:utf-8
import os
print(os.path.exists('AutoEncoder.py')) #判断当前路径下的文件是否存在 |
#/bin/env python3
# ==============================================================================
# Copyright (c) Moises Martinez by Fictizia. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import imageio
from imgaug import augmenters as iaa
import numpy
import os
numpy.random.bit_generator = numpy.random._bit_generator
class Augmentations:
def __init__(self, rotations = [15], scales=[25]):
self.__output = '../data_augmented/'
self.__filters = dict()
for value in rotations:
self.__filters['rotation_' + str(value)] = iaa.Affine(rotate=value)
for value in scales:
self.__filters['scale_' + str(value)] = iaa.Affine(scale=value)
self.__filters['grey'] = iaa.Grayscale(alpha=1.0)
self.__filters['half_grey'] = iaa.Grayscale(alpha=0.5)
self.__filters['flip_h'] = iaa.Fliplr(1.0)
self.__filters['flip_v'] = iaa.Flipud(1.0)
@property
def output_folder(self):
return self.__output
def get_filters(self):
return self.__filters.items()
folder = '../data/'
rots = Augmentations([15, 30, 45, 60, 75, 90], [0.25, 0.50, 0.75])
for base, dirs, files in os.walk(folder):
for file in files:
image = imageio.imread(base + file)
name = file.split('.')[0]
ext = file.split('.')[1]
for id, filter in rots.get_filters():
image_augmented = filter.augment_images([image])[0]
imageio.imwrite(rots.output_folder + name + '_' + id + '.' + ext, image_augmented)
exit(1)
|
import tflearn
from tflearn.data_utils import to_categorical, pad_sequence
from tflearn.datasets import imdb
# IMBD Dataset loading
train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000, valid_portion=0.1)
trainX, trainY = train
testX, testY = test
# Data preprocessing
# Sequence padding
# Vectorize inputs - Convert to numerical representation
# padd each sequence with a zero at end, which max sequence length is 100
trainX = pad_sequence(trainX, maxlen=100, value=0.)
textX = pad_sequence(testX, maxlen=100, value=0.1)
# convert labels to binary vectors with classes 0 = positve 1 = negative
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Build network
# Input layer, where data is feeded - specify inputs shape. First Element: Batch size to none Length: Equal to Max Sequence Length
net = tflearn.input_data([None, 100])
# Embedding layer
# use output of previous as input layer
# set to 10.000 because loaded 10.000 words from dataset
# output dim to 128 number of dimension of resulting embedding
net = tflearn.embedding(net, input_dim=10000, output_dim=128)
# feed LSTM (long short term meomry )
# allow to remeber form beginning of sequences, improve prediction
# dropout technice to prevent overfitting by randomly turning of an on pathways
net = tflearn.lstm(net, 128, dropout=0.8)
# fully connected, every layer in previous is connected to every in this
# adding fully connected layer computationaly cheap way of non linear combination of them
# softmax activiation funtion, take vector and squash it to output propability 0 to 1 (sigmoid)
net = tflearn.fully_connected(net, 2, activation='softmax')
# last layer is regression layer : apply regression operation,
# optimizer is adam (uses gradient decent) minimize given loss funktion
# and learning rate (how fast network to learn)
# loss categorical crossentroopy helps find difference predicted and expcteed
net = tflearn.regression (net, optimizer='adam', learning_rate=0.0001, loss='categorical_crosstropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
# show_metric view log of accuracy
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True, batch_size=32) |
import fasttext
from src.binaryfuncs import *
from src.multifuncs import *
def main():
trainingTextBin = import_text("training/EXIST2021_training.tsv")
format_text_bin(trainingTextBin)
crossValBinEN(5)
crossValBinES(5)
testBin()
trainingTextMulti = import_text("training/EXIST2021_training.tsv")
format_text_multi(trainingTextMulti)
crossValMultiEN(5)
crossValMultiES(5)
testMulti()
if __name__ =='__main__':
main() |
import requests
from pydantic import validate_arguments, ValidationError
@validate_arguments
def GetBooks(q:str) -> object:
r = requests.get("https://www.googleapis.com/books/v1/volumes?q="+q)
return r.json()
|
from __future__ import division
from __future__ import print_function
from pathlib import Path
import gzip
import numpy as np
if __name__ == '__main__':
import data_dirs
else:
from tools import data_dirs
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
#from skimage import data, color
from skimage.transform import rescale, resize, downscale_local_mean
DATADIR = data_dirs.material
NUM_LABELS = 5
IMAGE_SHAPE = [227, 227, 3]
base_path = Path(__file__).parent
def get_data(one_hot=True, test_size=0.2, random_state=10, image_shape=None):
"""Utility for convenient data loading."""
X = load_image(image_shape)
Y = load_label(one_hot)
# if test_size==None:
# return shuffle(X, Y, random_state=random_state), [], []
# else:
return train_test_split(X, Y, test_size=test_size, random_state=random_state)
# return X, X, Y, Y
def load_label(one_hot=True):
labels = np.load((base_path / '../data/npy/Y.npy').resolve())
if one_hot != True:
arr = np.arange(0,5)
return labels.dot(arr).astype('uint8')
else:
return labels
def main():
a = load_label()
print(a.shape)
def load_image(image_shape=None):
image_shape_ = image_shape if image_shape is not None else IMAGE_SHAPE
def resize_img(img):
return resize(img, (image_shape_[0], image_shape_[1]), anti_aliasing=True)*255
images = np.load((base_path / '../data/npy/X.npy').resolve()).astype('uint8')
images_resize = []
for image in images:
images_resize.append(resize_img(image))
return np.array(images_resize).astype('uint8')
if __name__ == '__main__':
main()
augmentation_params = dict()
augmentation_params['max_crop_percentage'] = 0.25
# augmentation_params['brightness_max_delta'] = 0.4
# augmentation_params['saturation_lower'] = 0.7
# augmentation_params['saturation_upper'] = 1.2
# augmentation_params['contrast_lower'] = 0.8
# augmentation_params['contrast_upper'] = 1.2
# augmentation_params['hue_max_delta'] = 0.1
augmentation_params['noise_std'] = 0.05
augmentation_params['flip'] = True
augmentation_params['max_rotate_angle'] = 90 |
# SOURCE: experiments/hras_nucleotide_equilibration.rst:16
from rasmodel.scenarios.default import model
# SOURCE: experiments/hras_nucleotide_equilibration.rst:20
import numpy as np
from matplotlib import pyplot as plt
from pysb.integrate import Solver
from pysb import *
# SOURCE: experiments/hras_nucleotide_equilibration.rst:28
ics_to_keep = ['GTP_0', 'GDP_0', 'HRAS_0']
for ic in model.initial_conditions:
ic_param = ic[1]
if ic_param.name not in ics_to_keep:
ic_param.value = 0
# SOURCE: experiments/hras_nucleotide_equilibration.rst:36
t = np.logspace(-3, 3, 1000)
# SOURCE: experiments/hras_nucleotide_equilibration.rst:40
sol = Solver(model, t)
sol.run()
# SOURCE: experiments/hras_nucleotide_equilibration.rst:45
plt.figure()
HRAS_0 = model.parameters['HRAS_0'].value
for obs_name in ['HRAS_GTP_closed_', 'HRAS_GTP_open_',
'HRAS_GDP_closed_', 'HRAS_GDP_open_', 'HRAS_nf_']:
plt.plot(t, sol.yobs[obs_name] / HRAS_0, label=obs_name)
ax = plt.gca()
ax.set_xscale('log')
plt.legend(loc='right')
plt.xlabel('Time (sec)')
plt.ylabel('Fraction of HRAS')
plt.savefig('simulation_1.png')
# SOURCE: experiments/hras_nucleotide_equilibration.rst:68
for ic in model.initial_conditions:
ic_param = ic[1]
ic_param.value = 0
# SOURCE: experiments/hras_nucleotide_equilibration.rst:74
HRAS = model.monomers['HRAS']
GTP = model.monomers['GTP']
HRAS_mGTP_0 = Parameter('HRAS_mGTP_0', 4e-6)
model.parameters['GTP_0'].value = 10e-6 # Unlabeled competitor
model.initial(HRAS(gtp=1, s1s2='open', gap=None, gef=None, p_loop=None,
CAAX=None, mutant='WT') % GTP(p=1, label='y'),
HRAS_mGTP_0)
# SOURCE: experiments/hras_nucleotide_equilibration.rst:84
t = np.logspace(1, 6, 1000)
sol = Solver(model, t)
sol.run()
plt.figure()
plt.plot(t, sol.yobs['HRAS_mGTP_'] / HRAS_mGTP_0.value)
plt.ylim(0, 1.05)
ax = plt.gca()
ax.set_xscale('log')
# SOURCE: experiments/hras_nucleotide_equilibration.rst:96
for ic in model.initial_conditions:
ic_param = ic[1]
ic_param.value = 0
GDP = model.monomers['GDP']
HRAS_mGDP_0 = Parameter('HRAS_mGDP_0', 4e-6)
model.parameters['GDP_0'].value = 10e-6 # Unlabeled competitor
model.initial(HRAS(gtp=1, s1s2='open', gap=None, gef=None, p_loop=None,
CAAX=None, mutant='WT') % GDP(p=1, label='y'),
HRAS_mGDP_0)
sol = Solver(model, t)
sol.run()
# SOURCE: experiments/hras_nucleotide_equilibration.rst:112
plt.plot(t, sol.yobs['HRAS_mGDP_'] / HRAS_mGDP_0.value)
plt.ylim(0, 1.05)
ax = plt.gca()
ax.set_xscale('log')
plt.savefig('simulation_2.png')
# SOURCE: experiments/kras_mapk.rst:12
Observable('RAS_GTP', RAS(gtp=1) % GTP(p=1))
Observable('RAS_RASGAP', RAS(gap=1) % RASA1(rasgap=1))
Observable('RAS_RAF', RAS(s1s2=1) % RAF(ras=1))
Observable('RAFd', RAF(raf=1) % RAF(raf=1))
Observable('MEKpp', MAP2K1(S218='p', S222='p'))
# SOURCE: experiments/kras_mapk.rst:23
from pysb.integrate import Solver
import numpy
ts = numpy.linspace(0, 1000, 100)
solver = Solver(model, ts)
solver.run()
# SOURCE: experiments/kras_mapk.rst:35
import matplotlib.pyplot as plt
for obs in model.observables:
plt.plot(ts, solver.yobs[obs.name], label=obs.name)
plt.xlabel('Time (s)')
plt.ylabel('Concentration (nM)')
plt.legend()
plt.show()
# SOURCE: experiments/ras_gdp_binding.rst:16
from rasmodel.scenarios.default import model
# SOURCE: experiments/ras_gdp_binding.rst:20
import numpy as np
from matplotlib import pyplot as plt
from pysb.integrate import Solver
from pysb import *
from rasmodel import fitting
# Zero out all initial conditions
for ic in model.initial_conditions:
ic[1].value = 0
# SOURCE: experiments/ras_gdp_binding.rst:37
# In this first experiment, 0.5 uM (500 nM) of HRAS is used:
model.parameters['HRAS_0'].value = 500.
# We create an Expression including both HRAS-mGTP and HRAS-mGDP because
# we will get a fluorescence signal even if GTP is hydrolyzed to GDP:
Expression('HRAS_mGXP_', model.observables['HRAS_mGTP_closed_'] +
model.observables['HRAS_mGDP_closed_'])
# We use the parameters calculated for experiments with mGTP at 5C
model.parameters['bind_HRASopen_GTP_kf'].value = 1e-2 # nM^-1 s^-1
model.parameters['bind_HRASopen_GTP_kr'].value = \
1e-2 / (6.1e4 * 1e-9) # s^-1
model.parameters['equilibrate_HRASopenGTP_to_HRASclosedGTP_kf'].value = \
4.5 #s^-1
# Assume that the isomerization reaction is irreversible, as appears
# to be their assumption for this experiment:
model.parameters['equilibrate_HRASopenGTP_to_HRASclosedGTP_kr'].value = \
0 #s^-1
# SOURCE: experiments/ras_gdp_binding.rst:60
t = np.linspace(0, 10, 1000)
sol = Solver(model, t)
# SOURCE: experiments/ras_gdp_binding.rst:67
plt.figure()
k_list = []
# Perform titration
mgtp_concs = np.arange(1, 15) * 1000 # nM (1 - 15 uM)
for mgtp_conc in mgtp_concs:
# Titration of labeled GTP:
model.parameters['mGTP_0'].value = mgtp_conc
sol.run()
# Fit to an exponential function to extract the pseudo-first-order rates
k = fitting.Parameter(1.)
def expfunc(t):
# The maximum of the signal will be with all HRAS bound to GTP/GDP
max_mGXP = model.parameters['HRAS_0'].value
return max_mGXP * (1 - np.exp(-k()*t))
res = fitting.fit(expfunc, [k], sol.yexpr['HRAS_mGXP_'], t)
# Plot data and fits
plt.plot(t, sol.yexpr['HRAS_mGXP_'], color='b')
plt.plot(t, expfunc(t), color='r')
# Keep the fitted rate
k_list.append(k())
# SOURCE: experiments/ras_gdp_binding.rst:95
plt.figure()
plt.plot(mgtp_concs, k_list, marker='o')
plt.ylim(bottom=0)
# SOURCE: experiments/ras_gdp_binding.rst:106
# A constant amount of labeled GDP
model.parameters['mGDP_0'].value = 2.5 * 1000 # nM
model.parameters['mGTP_0'].value = 0
# SOURCE: experiments/ras_gdp_binding.rst:112
model.parameters['bind_HRASopen_GDP_kf'].value = \
1e-2 # nM^-1 s^-1
model.parameters['bind_HRASopen_GDP_kr'].value = \
1e-2 / (5.7e4 * 1e-9) # s^-1
model.parameters['equilibrate_HRASopenGDP_to_HRASclosedGDP_kf'].value = \
3.2 #s^-1
model.parameters['equilibrate_HRASopenGDP_to_HRASclosedGDP_kr'].value = \
5e-7 #s^-1
# SOURCE: experiments/ras_gdp_binding.rst:124
k_list = []
plt.figure()
gdp_concs = np.arange(0, 22) * 1000 # nM
for gdp_conc in gdp_concs:
# Titration of unlabeled GDP
model.parameters['GDP_0'].value = gdp_conc
sol.run()
k = fitting.Parameter(1.)
A = fitting.Parameter(100.)
def expfunc(t):
return A() * (1 - np.exp(-k()*t))
res = fitting.fit(expfunc, [A, k], sol.yexpr['HRAS_mGXP_'], t)
plt.plot(t, sol.yexpr['HRAS_mGXP_'], color='b')
plt.plot(t, expfunc(t), color='r')
k_list.append(k())
# SOURCE: experiments/ras_gdp_binding.rst:145
plt.figure()
plt.plot(gdp_concs, k_list, marker='o')
plt.ylim(bottom=0)
# SOURCE: experiments/gxp_exchange.rst:9
from rasmodel.scenarios.default import model
import numpy as np
from matplotlib import pyplot as plt
from pysb.integrate import Solver
from pysb import *
from tbidbaxlipo.util import fitting
# Zero out all initial conditions
for ic in model.initial_conditions:
ic[1].value = 0
KRAS = model.monomers['KRAS']
GDP = model.monomers['GDP']
GTP = model.monomers['GTP']
Expression('KRAS_mGXP_', model.observables['KRAS_mGTP_closed_'] +
model.observables['KRAS_mGDP_closed_'])
# Add an initial condition for HRAS with GDP or GTP pre-bound
# (Concentration units in nM)
Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='closed', CAAX=None,
mutant='WT') % GDP(p=1, label='n'),
Parameter('KRAS_WT_GDP_0', 0.))
Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='closed', CAAX=None,
mutant='G13D') % GDP(p=1, label='n'),
Parameter('KRAS_G13D_GDP_0', 0.))
Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='closed', CAAX=None,
mutant='WT') % GTP(p=1, label='n'),
Parameter('KRAS_WT_GTP_0', 0.))
Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='closed', CAAX=None,
mutant='G13D') % GTP(p=1, label='n'),
Parameter('KRAS_G13D_GTP_0', 0.))
plt.ion()
# SOURCE: experiments/gxp_exchange.rst:48
# WT, GDP:
model.parameters['mGDP_0'].value = 1500.
model.parameters['KRAS_WT_GDP_0'].value = 750.
t = np.linspace(0, 1000, 1000) # 1000 seconds
sol = Solver(model, t)
sol.run()
plt.figure()
plt.plot(t, sol.yexpr['KRAS_mGXP_'], label='WT')
# G13D, GDP:
model.parameters['KRAS_WT_GDP_0'].value = 0
model.parameters['KRAS_G13D_GDP_0'].value = 750.
sol.run()
plt.plot(t, sol.yexpr['KRAS_mGXP_'], label='G13D')
plt.legend(loc='lower right')
plt.title('GDP exchange')
plt.xlabel('Time (s)')
plt.ylabel('[Bound mGDP] (nM)')
plt.show()
plt.savefig('doc/_static/generated/gxp_exchange_1.png', dpi=150)
# SOURCE: experiments/gxp_exchange.rst:74
# WT, GTP
model.parameters['mGDP_0'].value = 0.
model.parameters['mGTP_0'].value = 1500.
model.parameters['KRAS_WT_GDP_0'].value = 0.
model.parameters['KRAS_G13D_GDP_0'].value = 0.
model.parameters['KRAS_WT_GTP_0'].value = 750.
model.parameters['KRAS_G13D_GTP_0'].value = 0.
sol.run()
plt.figure()
plt.plot(t, sol.yexpr['KRAS_mGXP_'], label='WT')
# G13D, GTP
model.parameters['KRAS_WT_GTP_0'].value = 0.
model.parameters['KRAS_G13D_GTP_0'].value = 750.
sol.run()
plt.plot(t, sol.yexpr['KRAS_mGXP_'], label='G13D')
plt.legend(loc='lower right')
plt.title('GTP exchange')
plt.xlabel('Time (s)')
plt.ylabel('[Bound mGTP] (nM)')
plt.show()
plt.savefig('doc/_static/generated/gxp_exchange_2.png', dpi=150)
# SOURCE: experiments/kras_gtp_hydrolysis.rst:9
from rasmodel.scenarios.default import model
import numpy as np
from matplotlib import pyplot as plt
from pysb.integrate import Solver
from pysb import *
from tbidbaxlipo.util import fitting
KRAS = model.monomers['KRAS']
GTP = model.monomers['GTP']
total_pi = 50000
for mutant in KRAS.site_states['mutant']:
Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='open',
CAAX=None, mutant=mutant) % GTP(p=1, label='n'),
Parameter('KRAS_%s_GTP_0' % mutant, 0))
# SOURCE: experiments/kras_gtp_hydrolysis.rst:31
plt.figure()
t = np.linspace(0, 1000, 1000) # 1000 seconds
for mutant in KRAS.site_states['mutant']:
# Zero out all initial conditions
for ic in model.initial_conditions:
ic[1].value = 0
model.parameters['KRAS_%s_GTP_0' % mutant].value = total_pi
sol = Solver(model, t)
sol.run()
plt.plot(t, sol.yobs['Pi_'] / total_pi, label=mutant)
plt.ylabel('GTP hydrolyzed (%)')
plt.ylim(top=1)
plt.xlabel('Time (s)')
plt.title('Intrinsic hydrolysis')
plt.legend(loc='upper left', fontsize=11, frameon=False)
plt.savefig('doc/_static/generated/kras_gtp_hydrolysis_1.png')
# SOURCE: experiments/kras_gtp_hydrolysis.rst:57
plt.figure()
for mutant in KRAS.site_states['mutant']:
# Zero out all initial conditions
for ic in model.initial_conditions:
ic[1].value = 0
model.parameters['RASA1_0'].value = 50000
model.parameters['KRAS_%s_GTP_0' % mutant].value = total_pi
sol = Solver(model, t)
sol.run()
plt.plot(t, sol.yobs['Pi_'] / total_pi, label=mutant)
plt.ylabel('GTP hydrolyzed (%)')
plt.ylim(top=1)
plt.xlabel('Time (s)')
plt.title('GAP-mediated hydrolysis')
plt.legend(loc='upper right', fontsize=11, frameon=False)
plt.savefig('doc/_static/generated/kras_gtp_hydrolysis_2.png')
|
# Generated by Django 2.0.1 on 2018-02-23 12:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ClubBoard', '0004_merge_20180221_1656'),
('ClubBoard', '0003_auto_20180219_1903'),
]
operations = [
]
|
from World import *
class AmoebaWorld(World):
"""AmoebaWorld is a microscope slide, with hash marks, where
Amoebas trace parametric equations.
"""
def __init__(self):
World.__init__(self)
self.title('AmoebaWorld')
self.ca_width = 400 # canvas width and height
self.ca_height = 400
self.animals = [] # a list of Amoebas
self.thread = None
# create the canvas
self.row()
self.canvas = self.ca(width=self.ca_width, height=self.ca_height,
bg='white', scale=[20,20])
# draw the grid
dash = {True:'', False:'.'}
(xmin, xmax) = (-10, 10)
(ymin, ymax) = (-10, 10)
for x in range(xmin, xmax+1, 1):
self.canvas.line([[x, ymin], [x, ymax]], dash=dash[x==0])
for y in range(ymin, ymax+1, 1):
self.canvas.line([[xmin, y], [xmax, y]], dash=dash[y==0])
def control_panel(self):
self.col([0,0,0,1])
# run, stop, quit buttons
self.gr(2, [1,1], [1,1])
self.bu(text='Clear', command=self.clear)
self.bu(text='Quit', command=self.quit)
self.bu(text='Run', command=self.run_thread)
self.bu(text='Stop', command=self.stop)
self.endgr()
# end time entry
self.row([0,1,0], pady=30)
self.la(text='end time')
self.en_end = self.en(width=5, text='10')
self.la(text='seconds')
self.endfr()
# entries for x(t) and y(t)
self.gr(2, [0,1])
self.en_xoft = self.make_entry('x(t) = ')
self.la()
self.la()
self.en_yoft = self.make_entry('y(t) = ')
self.endgr()
self.la()
self.endcol()
def make_entry(self, label):
"""make the entries for the equations x(t) and y(t)
"""
self.la(text=label)
entry = self.en(width=5, text=' t')
return entry
def clear(self):
"""undraw and remove all the animals, and anything else
on the canvas
"""
for animal in self.animals:
animal.undraw()
self.canvas.delete('slime')
def run_thread(self):
"""execute AmoebaWorld.run in a new thread"""
# if there is already a thread, kill it and wait for it to die
if self.thread:
self.running = 0
self.thread.join()
# find out how long to run
end = self.en_end.get()
end = int(end)
# create a thread and start it
self.thread = MyThread(self.run, end)
def run(self, end=10):
"""count from 0 to end seconds in 0.1 second increments.
At each step, compute the location of the Amoebas and update.
"""
self.running = 1
start_time = time.time()
t = 0
xexpr = self.en_xoft.get()
yexpr = self.en_yoft.get()
while self.exists and self.running and t < end:
for amoeba in self.animals:
x = eval(xexpr)
y = eval(yexpr)
print 't = %.1f x = %.1f y = %.1f' % (t, x, y)
amoeba.redraw(x, y)
time.sleep(0.1)
t = time.time() - start_time
class Amoeba(Animal):
"""a soft, round animal that lives in AmoebaWorld"""
def __init__(self, world, xoft=None, yoft=None):
self.world = world
# xoft and yoft are functions that compute the location
# of the Amoeba as a function of time
self.xoft = xoft or self.xoft
self.yoft = yoft or self.yoft
# size and color
self.size = 0.5
self.color1 = 'violet'
self.color2 = 'medium orchid'
world.register(self)
def xoft(self, t):
"""a simple function that computes the Amoeba's x position"""
return t
def yoft(self, t):
"""a simple function that computes the Amoeba's y position"""
return t
# NOTE: the interfaces for draw and redraw are different from
# other animals. I pass x and y as parameters because I wanted
# to avoid using attributes. Students haven't seen attributes
# yet when they work with AmoebaWorld.
def redraw(self, x, y):
"""erase the Amoeba and redraw at location x, y"""
self.undraw()
self.draw(x, y)
def draw(self, x, y):
"""draw the Amoeba"""
# thetas is the sequence of angles used to compute the perimeter
thetas = range(0, 360, 30)
coords = self.poly_coords(x, y, thetas, self.size)
self.tag = 'Amoeba%d' % id(self)
slime = 'lavender'
# draw the slime outline which will be left behind
self.world.canvas.polygon(coords, fill=slime, outline=slime,
tags='slime')
# draw the outer perimeter
self.world.canvas.polygon(coords,
fill=self.color1, outline=self.color2, tags=self.tag)
# draw the perimeter of the nucleus
coords = self.poly_coords(x, y, thetas, self.size/2)
self.world.canvas.polygon(coords,
fill=self.color2, outline=self.color1, tags=self.tag)
def poly_coords(self, x, y, thetas, size):
"""compute the coordinates of a polygon centered around x,y,
with a radius of approximately size, but with random variation
"""
rs = [size+random.uniform(0, size) for theta in thetas]
coords = [self.polar(x, y, r, theta) for (r, theta) in zip(rs, thetas)]
return coords
class GuiAmoeba(Amoeba):
"""there are two kinds of Amoebas: for a regular Amoeba, xoft
and yoft are functions that compute coordinates as a function of
time. For a GuiAmoeba, xoft and yoft use methods from
AmoebaWorld to read expressions for x(t) and y(t) from the GUI.
"""
def xoft(self, t):
return self.world.xoft(t)
def yoft(self, t):
return self.world.yoft(t)
if __name__ == '__main__':
# create the GUI
world = AmoebaWorld()
world.control_panel()
# create the amoeba
amoeba = GuiAmoeba(world)
# wait for the user to do something
world.mainloop()
|
from orun.http import HttpResponse
from orun.apps import apps
from orun.db import models
class Http(models.Model):
@classmethod
def get_attachment(cls, attachment_id):
obj = apps['content.attachment'].objects.get(pk=attachment_id)
headers = None
res = HttpResponse(obj.content, content_type=obj.mimetype)
if obj.file_name:
res['Content-Disposition'] = 'attachment; filename=' + obj.file_name
return res
class Meta:
name = 'ir.http'
|
# coding: utf-8
"""
把蓝色当成1、白色当成2、红色当成3
i代表1的代表从左到右的位置
k代表3从右到左的位置
"""
def three_flag(x):
i = 0
j = 0
k = len(x) - 1
while j <= k:
if x[j] == 1:
if i != j:
x[i], x[j] = x[j], x[i]
i += 1
j += 1
elif x[j] == 3:
x[k], x[j] = x[j], x[k]
k -= 1
else:
j += 1
if __name__ == "__main__":
x = [2, 1, 1, 3, 2, 1, 3, 2]
three_flag(x)
print(x)
|
# https://www.reddit.com/r/dailyprogrammer/comments/5aemnn/20161031_challenge_290_easy_kaprekar_numbers/
# WEIRD ERROR: ValueError: invalid literal for int() with base 10: ''
import math
def kaprekarNumbers(start, end):
for i in range(start, end+1):
num = int(math.pow(i, 2))
numList = [int(num) for num in str(num)]
print(i, numList, len(numList))
secondHalfIndex = int(len(numList)/2)
firstHalfVal = ""
secondHalfVal = ""
for val, digit in enumerate(numList):
if val < secondHalfIndex:
firstHalfVal += "".join(str(digit))
if val >= secondHalfIndex:
secondHalfVal += "".join(str(digit))
print("first:", firstHalfVal, "second:", secondHalfVal)
firstHalfVal = int(firstHalfVal)
# finalVal = int(firstHalfVal.strip()) + int(secondHalfVal.strip())
# print(finalVal)
kaprekarNumbers(1, 55) |
#!/usr/bin/python
class Fred:
def __init__(self,
name=None,
a="default name",
UUID = None
):
self.name = name
self.a = a
self.UUID = UUID
def PrintShit(self):
print ("I am a wibble")
def UUIDdefined (self):
if (self.UUID == None):
return False
return True
b = Fred("wibble")
c = Fred()
b.a = "I am a variable"
b.PrintShit()
# c.UUID = "ABC"
if (c.UUIDdefined()):
print("The UUID is \"{}\"".format (c.UUID))
else:
print ("It has no UUID")
print ("It's ", c.UUIDdefined())
|
# main window (основное окно)
from tkinter import *
import requests
from tkcalendar import *
root = Tk()
root.geometry('400x250')
root.title('Calendar + weather')
def tell_weather():
url = 'http://wttr.in/?0T'
response = requests.get(url)
print(response.text)
# define the window's appearance (определим место появления окна)
btn_Weather = Button(root, text='Weather', foreground='black', command=tell_weather)
btn_Weather.bind("<Button-1>", tell_weather)
btn_Weather.grid(row=0, column=1)
date_ent = Frame(root, relief=SUNKEN, bd=4, bg='white')
date_ent.grid()
weather_output = Label(root, text="Нажмите 'Weather' чтобы получить прогноз погоды")
weather_output.grid(row=1, column=1)
# определим оформление календаря
cal = DateEntry(date_ent, dateformat=3, width=12, background='white', foreground='black', borderwidth=4, Calendar=2020)
cal.grid(row=1, column=3, sticky='nsew')
# define the weather (определим погоду)
url = 'http://wttr.in/?0T'
response = requests.get(url) # выполните HTTP-запрос
print(response.text) # напечатайте текст HTTP-ответа
root.mainloop()
|
from utils.urls import github_URL
import requests
from bs4 import BeautifulSoup
from spiders.base import Spider
from utils.headers import HEADERS
from models.HotItem import HotItem
from utils.mongo import hot_collection
from utils.cates import types
class GithubSpider(Spider):
def __init__(self, name='github'):
super().__init__(name)
def run(self):
super().run()
res = requests.get(github_URL,headers=HEADERS)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
repository = soup.select('article.Box-row')
for row in repository:
title = row.select('h1.h3.lh-condensed')[0].text.strip()
title = title.replace(' ','').replace('\n','')
url = 'https://github.com' + row.select('h1.h3.lh-condensed')[0].find('a').get('href')
desc_el = row.select('p.col-9')
desc = desc_el[0].text.strip() if desc_el else None
hot_item = HotItem(title, url, cate=types['github'], desc=desc)
self.arr.append(hot_item)
hot_collection.delete_many({'cate':types['github']})
hot_collection.insert_many([vars(item) for item in self.arr])
|
# !/usr/bin/python
"""
-----------------------------------------------
Bardel Attribute Assignment
Written By: Colton Fetters
Version: 1.0
First release: 11/09/2017
-----------------------------------------------
"""
# Import module
import os
import json
import maya.cmds as cmds
# Studio module
import bd_lib.bd_reference_lib as ref_lib
import bd_lib.bd_config_lib as config
import bd_attribute_json_generator as gen
reload(gen)
class Core(object):
def __init__(self):
self.REF = ref_lib.ReferenceManager()
self.CONFIG = config.Config()
self.GEN = gen.Core()
self._RENDER_CAM = self.CONFIG.get("render_camera_shape")
self._ATOM = 'atom'
self._CACHE = 'cache'
self.GEN._CUSTOM_ATTR_DICT
self.GEN._JSON_PATH
self.GEN._JSON_NAME
def _read_config(self, path):
"""[Reads Json File]
[Reads Json File and returns what is inside the file]
Args:
path: [Json File Path]
Returns: JsonFile
[Data in file]
[Dictionary]
"""
if os.path.exists(path):
jsonFile = json.loads(open(path).read())
return jsonFile
else:
return None
def _get_asset_dict(self, asset, referencePath):
"""[Gets Dictionary of Specific Asset]
[Reads the Assets Dictionary]
Args:
asset: [Geo Name]
referencePath: [Asset Path]
Returns: assetDict
[Dictionary of Assets Data]
[Dictionary]
"""
cleanList = []
objectInfoFileList = []
# Trys to split by the assets name
try:
strippedPath = referencePath.split('/{}'.format(asset))[0]
except IndexError:
return "Error with Path Split {}".format(referencePath)
objectInfoConfigPath = '{}{}'.format(strippedPath, self.GEN._JSON_PATH)
if os.path.exists(objectInfoConfigPath):
objectInfoFileList = os.listdir(objectInfoConfigPath)
if objectInfoFileList:
# Checks for the specific naming convention
for each in sorted(objectInfoFileList):
if self.GEN._JSON_NAME in each:
cleanList.append(each)
# Trys to find the latest Json file
try:
objectInfoFile = cleanList[-1]
print objectInfoFile
objectInfoFilePath = '{}{}'.format(objectInfoConfigPath, objectInfoFile)
assetDict = self._read_config(objectInfoFilePath)
if assetDict:
return assetDict
except IndexError:
return {}
else:
return {}
def _get_asset_attr(self, asset, referencePath):
"""[Gets Assets Dictionary]
[Gets Asset's Specific Dictionary of Attriubutes and Geo]
Args:
asset: [Asset Name]
referencePath: [Reference Path]
Returns: assetDict['asset']
[Attributes and Geo Dictionary]
[Dictionary]
"""
assetDict = self._get_asset_dict(asset, referencePath)
if assetDict:
# Trys to attribute specific value from asset key
try:
return assetDict['asset'][asset]
except TypeError:
print('Error Asset: {}'.format(asset))
def set_asset_attr(self, animFile, asset, assetDict):
"""[Sets Attribute Based on Dictionary]
[description]
Args:
animFile: [description]
asset: [description]
assetDict: [description]
"""
renderAttribute = assetDict.keys()
for attr in renderAttribute:
# really ugly way to get key associated with value
mayaAttr = self.GEN._CUSTOM_ATTR_DICT.keys()[self.GEN._CUSTOM_ATTR_DICT.values().index(attr)]
geoList = assetDict[attr]
for geoDict in geoList:
geoName = geoDict.keys()[0]
state = geoDict[geoName]
if animFile == self._CACHE:
geoTransform = '{}:{}'.format(asset, str(geoName))
else:
geoTransform = '{}:{}:{}'.format(animFile, asset, str(geoName))
try:
if 'vray' in attr:
node = cmds.listRelatives(geoTransform, shapes=True)[0]
cmds.vray("addAttributesFromGroup", node, attr, True)
cmds.setAttr('{}.{}'.format(node, mayaAttr), state)
except:
pass
try:
node = cmds.listRelatives(geoTransform, shapes=True)[0]
cmds.setAttr('{}.{}'.format(node, mayaAttr), state)
except:
continue
return 'complete'
def reference_assets(self):
assetList = []
referencePathList = []
referenceList = self.REF.get_loaded_references()
for each in referenceList:
assetName = self.REF.get_namespace_from_node(each)
if assetName == self.REF.get_parent_namespace(each):
if '_TFX' in assetName:
continue
animFile = assetName
else:
if assetName in self._RENDER_CAM:
continue
referencePath = self.REF.get_reference_path(each)
assetList.append(assetName)
referencePathList.append(referencePath)
return animFile, assetList, referencePathList
def config_keys(self, abcConfig, configKeys):
for each in configKeys:
if self._ATOM:
atomKeys = abcConfig[self._ATOM].keys()
if self._CACHE:
abcKeys = abcConfig[self._CACHE].keys()
return atomKeys, abcKeys
def cache_info(self, abcConfig, configKey, key):
if configKey in self._ATOM:
info = abcConfig[configKey][key]['asset_path']
if configKey in self._CACHE:
info = abcConfig[configKey][key]['nested_asset_info'][key]
return info
def cached_assets(self, abcjson):
assetList = []
pathList = []
abcConfig = self._read_config(abcjson)
configKeys = abcConfig.keys()
atomKeys, abcKeys = self.config_keys(abcConfig, configKeys)
for each in atomKeys:
if each in self._RENDER_CAM:
continue
atomInfo = self.cache_info(abcConfig, self._ATOM, each)
assetList.append(each)
pathList.append(atomInfo)
for each in abcKeys:
abcInfo = self.cache_info(abcConfig, self._CACHE, each)
assetList.append(each)
pathList.append(abcInfo)
return self._CACHE, assetList, pathList
def find_assets(self):
abcjson = cmds.fileInfo("infoPath", query=True)
if abcjson:
animFile, assetList, pathList = self.cached_assets(str(abcjson[0]))
else:
animFile, assetList, pathList = self.reference_assets()
return animFile, assetList, pathList
def run(self):
animFile, assetList, referencePathList = self.find_assets()
for x in range(0, len(assetList)):
print('\nReference: {},{}'.format(assetList[x], referencePathList[x]))
assetDict = self._get_asset_attr(assetList[x], referencePathList[x])
if assetDict:
print('Asset Dictionary:{}'.format(assetDict))
status = self.set_asset_attr(animFile, assetList[x], assetDict)
print(status)
|
from django.db import models
from django.contrib.auth.models import User
from investigator.models import Investigator
from case.models import Case
class StatusUpdate(models.Model):
status = models.CharField(max_length=40)
timestamp = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User)
case = models.ForeignKey(Case)
read_by_broker = models.BooleanField(default=True)
read_by_master_broker = models.BooleanField(default=True)
read_by_investigator = models.BooleanField(default=True)
extra_info = models.TextField(blank=True, null=True)
updated_by = models.CharField(max_length=2, default='')
#meta
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class CaseAcceptanceUpdate(models.Model):
investigator = models.ForeignKey(Investigator)
timestamp = models.DateTimeField(auto_now_add=True)
case = models.ForeignKey(Case)
is_accepted = models.BooleanField(default=True)
read_by_broker = models.BooleanField(default=True)
read_by_master_broker = models.BooleanField(default=True)
updated_by = models.CharField(max_length=2, default='')
#meta
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
|
print('안녕하세요? 여러분\n저는 파이썬을 무척 좋아합니다.\n9*8은 ', 9*8, " 입니다.\n안녕히 계세요.")
|
# realize a function
def test(a,b,func):
result = func(a,b)
print(result)
func_new = input("please input a funciton:")
print type(func_new)
# func_new = eval(func_new)
test(2,3,func_new)
|
import torch
from torch import nn
class GRUGate(nn.Module):
def __init__(self, d_model):
super(GRUGate, self).__init__()
self.Wr = nn.Linear(d_model, d_model, bias=False)
self.Ur = nn.Linear(d_model, d_model, bias=False)
self.Wz = nn.Linear(d_model, d_model, bias=False)
self.Uz = nn.Linear(d_model, d_model, bias=False)
self.Wg = nn.Linear(d_model, d_model, bias=False)
self.Ug = nn.Linear(d_model, d_model, bias=False)
self.bg = nn.Parameter(torch.randn((d_model,), requires_grad=True) + 1,
requires_grad=True)
def forward(self, x, y):
"""
:param x:
:param y: output from the residual branch
:return:
"""
r = torch.sigmoid(self.Wr(y) + self.Ur(x))
z = torch.sigmoid(self.Wz(y) + self.Uz(x) - self.bg)
h = torch.tanh(self.Wg(y) + self.Ug(r * x))
return (1 - z) * x + z * h
|
import torch
# data
x_data = torch.tensor([1.0, 2.0, 3.0])
y_data = torch.tensor([1.0, 3.0, 5.0])
# initialize
a, b = torch.tensor([0.0]), torch.tensor([0.0])
lr = 0.1
for epoch in range(100):
# forward
predict = a * x_data + b # shape : (3)
# compute loss
loss = torch.sum((predict - y_data) ** 2) / len(x_data) # shape : (3) -> ()
# backward
gradient_a = 2 * torch.sum(x_data * (predict - y_data)) / len(x_data) # shape : (3) -> ()
gradient_b = 2 * torch.sum((predict - y_data)) / len(x_data) # shape : (3) -> ()
a -= lr * gradient_a # shape : (1)
b -= lr * gradient_b # shape : (1)
if not (epoch+1) % 10:
print(f'Epoch : {epoch+1}')
print(f'Predict : {predict}')
print(f'Loss = {loss}')
print(f'a = {a}, b = {b}\n')
'''
Epoch : 100
Predict : tensor([1.0793, 3.0170, 4.9547])
Loss = 0.00287666660733521
a = tensor([1.9392]), b = tensor([-0.8618])
''' |
from django.test import TestCase
from Apps.Estudiante.models import Estudiante
from Apps.Estudiante.views import cargarTareasdeEstudiante
from Apps.Tarea.models import Tarea
from Apps.Curso.models import Curso
from Apps.Grupo.models import Grupo
from Apps.Grado.models import Grado
from Apps.Tema.models import Tema
from Apps.MateriaGrado.models import MateriaGrado
from Apps.Profesor.models import Profesor
from django.contrib.auth.models import User
# Create your tests here.
class Estudiante_ViewsTest(TestCase):
def test_cargarTareasEstudiante(self):
grado=Grado(idGrado="1")
matgrado1=MateriaGrado(idMateriaGrado="1", nombreMateria="Matematicas", foraneaGrado_id="1")
matgrado2=MateriaGrado(idMateriaGrado="2", nombreMateria="Biologia", foraneaGrado_id="1")
grupo=Grupo(idGrupo="1",foraneaGrado="1")
grupo2=Grupo(idGrupo="2",foraneaGrado="1")
usuarios=[]
profesores=[]
estudiantes=[]
for i in range(1,10):
usuarios.append(User(id=i,password=i,last_login='2018-12-05 00:00:00.000000',is_superuser=0,username='L'+i,
first_name="L"+i,last_name="L"+i,email="L"+i,is_staff=0,is_active=0,date_joined='2018-12-05 00:00:00.000000'))
if i%2==0:
profesores.append(Profesor(cedula=1*i,profesion="Licenciado"+i,telefono=i,codigoprofesor=i,usuario_id=usuarios[i].id))
else:
estudiantes.append(Estudiante(usuario=usuarios[i].id,codigo=i+"",sexo="M"),edad=i,direccion=i+"",email=i,foraneaGrupo=grupo.idGrupo)
curso1 = Curso(idCurso="1", foraneaProfesor=profesores[0].codigoprofesor,foraneaMateriaGrado=matgrado1.idMateriaGrado,foraneaGrupo=grupo.idGrupo)
#curso2 = Curso(idCurso="2", foraneaProfesor=profesores[1].codigoprofesor,foraneaMateriaGrado=matgrado1.idMateriaGrado,foraneaGrupo=grupo.idGrupo)
curso3 = Curso(idCurso="3", foraneaProfesor=profesores[1].codigoprofesor,foraneaMateriaGrado=matgrado2.idMateriaGrado,foraneaGrupo=grupo2.idGrupo)
tema1=Tema(idTema="1",nombreTema="Desigualdades",descripcionTema="fekmmvke",foraneaMateriaGrado=matgrado1.idMateriaGrado)
tema2 = Tema(idTema="2", nombreTema="Ecuaciones", descripcionTema="fekmmvke",
foraneaMateriaGrado=matgrado1.idMateriaGrado)
tema3 = Tema(idTema="3", nombreTema="Celula", descripcionTema="fekmmvke",
foraneaMateriaGrado=matgrado2.idMateriaGrado)
tareas=[]
t1=Tarea(idTarea="1",tituloTarea="Taller desigualdades 1",descripcionTarea="Resuelva",fechaLimite='2018-12-05 00:00:00.000000',
logrosTarea="erbb",foraneaCurso=curso1.idCurso,foraneTema=tema1.idTema)
t2 = Tarea(idTarea="2", tituloTarea="Taller celula", descripcionTarea="Resuelva",
fechaLimite='2018-12-05 00:00:00.000000',
logrosTarea="erbb", foraneaCurso=curso3.idCurso, foraneTema=tema3.idTema)
t3=Tarea(idTarea="3", tituloTarea="Taller ecuaciones", descripcionTarea="Resuelva",
fechaLimite='2018-12-05 00:00:00.000000',
logrosTarea="erbb", foraneaCurso=curso1.idCurso, foraneTema=tema2.idTema)
tareas.append(t1)
tareas.append(t2)
tareas.append(t3)
estudiantes[0].tareasEstudiante.add(t1)
estudiantes[0].tareasEstudiante.add(t2)
estudiantes[0].tareasEstudiante.add(t3)
#self.assertIs(future_question.was_published_recently(), True)
self.assertEqual(cargarTareasdeEstudiante("0"),tareas) |
import os
import argparse
import datetime
import re
import numpy as np
import pandas as pd
import geopandas as gpd
import rasterio
import rioxarray
import shapely
import pykrige.kriging_tools as kt
import struct
import dask.array as da
import multiprocessing
from osgeo import ogr, osr, gdal
from rasterio.crs import CRS
from rasterio.warp import reproject, Resampling
from dateutil.relativedelta import *
from data_processing.knmi_interpolation import Knmi_Interpolator
from osgeo.osr import SpatialReference, CoordinateTransformation
class Coordinate_reference():
epsg_from = SpatialReference()
epsg_to = SpatialReference()
to2from = None
from2to = None
def __init__(self, from_cs, to_cs):
self.epsg_from.ImportFromEPSG(from_cs)
self.epsg_to.ImportFromEPSG(to_cs)
def set_wgs84_from_epsg28992(self):
self.epsg_from.SetTOWGS84(565.237,50.0087,465.658,-0.406857,0.350733,-1.87035,4.0812)
def define_translator(self):
self.from2to = CoordinateTransformation(self.epsg_from, self.epsg_to)
self.to2from = CoordinateTransformation(self.epsg_to, self.epsg_from)
def transformPoints_to_target(self, coordinates):
coordinates_in_origin_cs = self.from2to(coordinates[0], coordinates[1])
return coordinates_in_origin_cs
def transformPoints_to_origin(self, coordinates):
latitude = coordinates[1]
longitude = coordinates[0]
northing, easting, z = self.to2from.TransformPoint(longitude, latitude)
return (easting, northing)
class Feature_collector(Knmi_Interpolator):
product = None
transform = False
coordinates =[]
vars_names = []
aggregated_features = ['EVI', 'NDVI', 'NDWI']
def __init__(self, parent_folder='./data'):
Knmi_Interpolator.__init__(self, parent_folder)
super().__init__(parent_folder)
def activate_transform(self):
self.transform = True
def set_extraction_coordinates(self, coordinates):
# print('Coordinates transformation')
# print(coordinates)
if self.transform:
translate_coordinates = Coordinate_reference(28992, 4326)
translate_coordinates.set_wgs84_from_epsg28992()
translate_coordinates.define_translator()
self.coordinates = [translate_coordinates.transformPoints_to_origin(individual_coordinate) for individual_coordinate in coordinates]
else:
self.coordinates = coordinates
# print(self.coordinates)
# print('------------------------')
def extract_features_from_folder(self):
days_timedelta = self.end_date - self.start_date
days_int = days_timedelta.days
if days_int < 0:
raise Exception('Start date should be set before end date')
os_generator = os.walk(".")
feature_list = []
for root, dirs, file_list in os_generator:
for file_name in file_list[:]:
if self.target_folder in self.aggregated_features:
extracted_values = self.extract_features_from_raster(file_name)
start_date_filter = self.start_date.strftime('%Y')
end_date_filter = self.end_date.strftime('%Y')
filename_info = file_name[:-5].split('_')
feature_extracted = filename_info[1]+'-'+filename_info[2]
date_extracted = filename_info[0]
feature_list.extend([[feature_extracted, date_extracted, value[0], value[1], value[2]] for value in extracted_values])
else:
extracted_values = self.extract_features_from_raster(file_name)
start_date_filter = self.start_date.strftime('%Y%m%d')
end_date_filter = self.end_date.strftime('%Y%m%d')
filename_info = file_name[:-4].split('_')
feature_extracted = filename_info[0]
date_extracted = filename_info[1]
feature_list.extend([[feature_extracted, date_extracted, value[0], value[1], value[2]] for value in extracted_values])
filename_out = '../{0}_feature.csv'.format(self.target_folder)
with open(filename_out, 'w') as output_file:
for line in feature_list:
print(';'.join([x if isinstance(x, str) else '{:.3f}'.format(x) for x in line]), file=output_file)
def extract_features_from_raster(self, file_name):
raster_load = gdal.Open(file_name)
gt = raster_load.GetGeoTransform()
img_width, img_height = raster_load.RasterXSize, raster_load.RasterYSize
raster_values = raster_load.GetRasterBand(1)
x_origin = gt[0]
y_origin = gt[3]
pixelWidth = gt[1]
pixelHeight = gt[5]
extracted_values = []
for northing, easting in self.coordinates:
px2 = int((easting - x_origin) / pixelWidth)
# py2 = raster_load.RasterYSize - int((northing - y_origin) / pixelHeight)
py2 = int((northing - y_origin) / pixelHeight)
struct_values = raster_values.ReadRaster(px2, py2, 1, 1, buf_type=gdal.GDT_Float32)
intval = struct.unpack('f' , struct_values)
extracted_values.append([easting, northing, intval[0]])
return extracted_values
class Feature_merge(Knmi_Interpolator):
def __init__(self, parent_folder='./data'):
Knmi_Interpolator.__init__(self, parent_folder)
super().__init__(parent_folder)
def feature_merge(self):
pass
|
from .base_setup import BaseSetup
from .categorical_image_flow_setup import CategoricalImageFlowSetup
|
import day4
import unittest
class Day2Tests(unittest.TestCase):
def test_True(self):
self.assertTrue(True)
def test_Day4Example(self):
room = day4.Room('aaaaa-bbb-z-y-x-123[abxyz]')
self.assertTrue(room.validate())
def test_Day4Example_2(self):
room = day4.Room('a-b-c-d-e-f-g-h-987[abcde]')
self.assertTrue(room.validate())
def test_Day4Example_3(self):
room = day4.Room('not-a-real-room-404[oarel]')
self.assertTrue(room.validate())
def test_Day4Example_4(self):
room = day4.Room('totally-real-room-200[decoy]')
self.assertFalse(room.validate())
def test_Day4Data(self):
data = []
with open('input.txt') as f:
data = f.readlines()
self.assertEqual(day4.main(data), 409147)
def test_Day4_2Example(self):
room = day4.Room('qzmt-zixmtkozy-ivhz-343[abcde]')
self.assertEqual(room.translate(), 'very encrypted name')
def test_Day4_2Data(self):
data = []
with open('input.txt') as f:
data = f.readlines()
self.assertEqual(day4.find_north_pole(data), 991)
if __name__ == '__main__':
unittest.main()
|
from colosseum.games import GameTracker
class GTNTracker(GameTracker):
def __init__(self, n_players:int, upper:int, lower:int=0, playerid:int=-1):
super().__init__(n_players)
"""
params:
n_players:int - Number of players
upper:int - Exclusive upper bound of the secret number
lower:int=0 - Inclusive lower bound of the secret number
playerid:int=-1 - The id of the player that created this tracker.
If the tracker was created by the host, the id is -1 (default).
"""
self._upper = upper
self._lower = lower
self._is_done = False
def make_move(self, guess, *args, **kwargs):
return {'guess':guess}
def update(self, player:int, guess:int, higher:bool, correct:bool):
"""
params:
guess:int - The guess the bot made
higher:bool - If True, the true value is higher than the guess
correct:bool - If True, the guess was correct
"""
if correct:
self._upper = guess
self._lower = guess
self.points[player] = 1
self._is_done = True
elif higher:
self._lower = guess + 1
else:
self._upper = guess
@property
def upper(self):
"""
Exclusive upper bound of the secret number
"""
return self._upper
@property
def lower(self):
"""
Inclusive lower bound of the secret number
"""
return self._lower
@property
def is_done(self):
return self._is_done |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 10 12:54:44 2018
@author: Yadnyesh
"""
import pandas as pd
import numpy as np
import collections
import matplotlib.pyplot as plt
df_1 = pd.read_csv('F:/ZS/data/train.csv')
df_2 = pd.read_csv('F:/ZS/data/artists.csv')
df_3 = pd.read_csv('F:/ZS/data/test.csv')
# Count number of unique Artist_IDs in each of the three datasets
count_1 = df_1.artist_id.nunique()
count_2 = df_2.artist_id.nunique()
count_3 = df_3.artist_id.nunique()
#Check if Artist_IDs are the same in train and test sets
# Merge dataframes on common Artist_IDs in train and test frames
#df_check = pd.merge(df_3, df_1, on="artist_id", how="outer")
#count_4 = df_check.artist_id.nunique() # Check if the number of unique ID's is same
series_1 = df_1['artist_id']
series_2 = df_3['artist_id']
print(collections.Counter(series_1) == collections.Counter(series_2))
# Merge Train and Test Datasets with artist information from df_2
df_train = pd.merge(df_1, df_2, on="artist_id", how="left")
df_test = pd.merge(df_3, df_2, on="artist_id", how="left")
# Check for NaN's in train and test features
print(df_train['artist_familiarity'].isnull().values.any())
print(df_train['artist_hotttnesss'].isnull().values.any())
print(df_test['artist_familiarity'].isnull().values.any())
print(df_test['artist_hotttnesss'].isnull().values.any()) |
# logging.py
# author: IST411 Group 2
# 9/8/2016
from cookielib import logger
import json
# import logging
#
# logging.basicConfig(filename='myapp.log')
# # create logger
# logger = logging.getLogger('example')
# logger.setLevel(logging.DEBUG)
#
# # create console handler and set level to debug
# ch = logging.StreamHandler()
# ch.setLevel(logging.DEBUG)
#
# # create formatter
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
#
# # add formatter to ch
# ch.setFormatter(formatter)
#
# # add ch to logger
# logger.addHandler(ch)
#
# # 'application' code
# logger.debug('debug message')
# logger.info('info message')
# logger.warn('warn message')
# logger.error('error message')
# logger.critical('critical message')
class Log:
def __init__(self, origin_id, log_code, log_subject, log_message, timestamp, time_to_live, checksum,log_type):
self.origin_id = origin_id
self.log_code = log_code
self.log_subject = log_subject
self.log_message = log_message
self.log_timestamp = timestamp
self.log_ttl = time_to_live
self.log_checksum = checksum
self.log_type = log_type
def JSON_To_Python(self, recieved_messaage):
return json.loads(recieved_message)
def displayLog(self):
print ("ID: ", self.origin_id, "Code: ", self.log_code, "Subject: ", self.log_subject, "Message: ", self.log_message, "Time: ", self.log_timestamp, "Checksum: ", self.log_checksum)
#This method stores the log in a database for future use
def WriteToDB(self):
<<<<<<< HEAD
pass
=======
>>>>>>> 71484bb26a9747dcd029a01237bfee081471afa5
|
# Initialize a Tic-Tac-Toe Board
board = ["-","-","-",
"-","-","-",
"-","-","-"]
# Set a bolean to check win
check_win = False
#Save inputs
save_inputs = []
# Set the counter to take turn
count = 0
# Make the board with values
def display_board():
print(board[0] + "|" + board[1]+ "|" + board[2])
print(board[3] + "|" + board[4]+ "|" + board[5])
print(board[6] + "|" + board[7]+ "|" + board[8])
# A fucntion to run the game
def play_game():
while check_win != True:
# Run the handle turn function
handle_turn()
# Run the check_win function
check_win()
# Decide who's turn the current round
def handle_turn():
global count
decide = True
if count % 2 == 0:
# Display tictactoe board
#display_board()
while decide==True:
try:
print("\n[Player 1's turn]")
position = input("Please choose a position from 1 to 9: ")
position = int(position)
except ValueError:
decide = True
print("String value is not allowed. Please choose from 1 to 9.\n")
if isinstance(position, int):
position = position - 1
if not (position >= 0 and position < 9):
print("Invalid input. Please choose from 1 to 9.\n")
True
else:
print("\n[Player 1's choice]")
if position not in save_inputs:
decide = False
elif position in save_inputs:
decide = True
print("The spot is already taken. Please try other spots.")
save_inputs.append(position)
board[position] = "O"
display_board()
elif count % 2 != 0:
# Display tictactoe board
#display_board()
while decide==True:
try:
print("\n[Player 2's turn]")
position = input("Please choose a position from 1 to 9: ")
position = int(position)
except ValueError:
decide = True
print("String value is not allowed. Please choose from 1 to 9.\n")
if isinstance(position, int):
position = position - 1
if not (position >= 0 and position < 9):
print("Invalid input..... Please choose from 1 to 9.\n")
True
else:
print("\n[Player 2's choice]")
if position not in save_inputs:
decide = False
elif position in save_inputs:
decide = True
print("The spot is already taken. Please try other spots.")
save_inputs.append(position)
board[position] = "X"
display_board()
count+=1
# Check if the player won, lost, or tie
def check_win():
global check_win
if board[0] == "X" and board[1] == "X" and board[2] == "X":
print("Player 2 won the game!")
check_win = True
display_board()
elif board[3] == "X" and board[4] == "X" and board[5] == "X":
print("Player 2 won the game!")
check_win = True
display_board()
elif board[6] == "X" and board[7] == "X" and board[8] == "X":
print("Player 2 won the game!")
check_win = True
display_board()
elif board[0] == "X" and board[3] == "X" and board[6] == "X":
print("Player 2 won the game!")
check_win = True
display_board()
elif board[1] == "X" and board[4] == "X" and board[7] == "X":
print("Player 2 won the game!")
check_win = True
display_board()
elif board[2] == "X" and board[5] == "X" and board[8] == "X":
print("Player 2 won the game!")
check_win = True
display_board()
elif board[0] == "X" and board[4] == "X" and board[8] == "X":
print("Player 2 won the game!")
check_win = True
display_board()
elif board[2] == "X" and board[4] == "X" and board[6] == "X":
print("Player 2 won the game!")
check_win = True
display_board()
elif board[0] == "O" and board[1] == "O" and board[2] == "O":
print("Player 1 won the game!")
check_win = True
display_board()
elif board[3] == "O" and board[4] == "O" and board[5] == "O":
print("Player 1 won the game!")
check_win = True
display_board()
elif board[6] == "O" and board[7] == "O" and board[8] == "O":
print("Player 1 won the game!")
check_win = True
display_board()
elif board[0] == "O" and board[3] == "O" and board[6] == "O":
print("Player 1 won the game!")
check_win = True
display_board()
elif board[1] == "O" and board[4] == "O" and board[7] == "O":
print("Player 1 won the game!")
check_win = True
display_board()
elif board[2] == "O" and board[5] == "O" and board[8] == "O":
print("Player 1 won the game!")
check_win = True
display_board()
elif board[0] == "O" and board[4] == "O" and board[8] == "O":
print("Player 1 won the game!")
check_win = True
display_board()
elif board[2] == "O" and board[4] == "O" and board[6] == "O":
print("Player 1 won the game!")
check_win = True
display_board()
else:
if all(ele != "-" for ele in board):
print("The game ended draw!")
check_win = True
display_board()
# Run the play_game function to run this program
play_game()
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Dependencies: requests, absl-py, lxml
import json
import requests
import csv
import re
from lxml import etree
from absl import app, flags, logging
from datetime import datetime, date
from time import time
FLAGS = flags.FLAGS
flags.DEFINE_bool('debug', False, 'Debug mode')
flags.DEFINE_string('save_json', None,
'Whether to save the json data to file.')
flags.DEFINE_string('cc', None, '持仓信息csv文件,格式和产生的标的文件一样.')
flags.DEFINE_string('blacklist', None, '黑名单文件,格式和产生的标的文件一样.')
flags.DEFINE_integer('top', 20, 'Number of candidates')
# 获取持仓
def get_cc():
cc_dict = {}
with open(FLAGS.cc, 'r', encoding='utf-8') as cc_file:
cc_reader = csv.DictReader(cc_file, delimiter=',')
for row in cc_reader:
if row['操作'] in ['建仓', '持仓']:
cc_dict[row['代 码']] = row
return cc_dict
# 获取最新转债数据
def get_dat(t):
if FLAGS.debug:
if not FLAGS.save_json:
logging.fatal(
'Need to specify name of the json file with --save_json')
# 获取测试转债数据
jf = open(FLAGS.save_json, 'r', encoding='utf-8')
return json.loads(jf.read())
else:
# 排除未上市的
payload = {'listed': 'Y'}
newUrl = 'https://www.jisilu.cn/data/cbnew/cb_list/?___jsl=LST___t=%s' % int(
t * 1000)
logging.info(newUrl)
# 最简单的爬虫请求.也可以加上headers字段,防止部分网址的反爬虫机制
# headers = {
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
# }
response = requests.post(newUrl, data=payload)
# 当爬取的界面需要用户名密码登录时候,构建的请求需要包含auth字段
data = response.content.decode('utf-8')
if FLAGS.save_json:
jf = open(FLAGS.save_json, 'w', encoding='utf-8')
jf.write(data)
jf.close()
return json.loads(data)
def filter_reason(force_redeem, pb, btype, qflag, year_left):
if force_redeem:
return '公布强赎'
if float(pb) < 1.0:
return '破净'
if btype != 'C':
return '非可转债'
if qflag == 'Q':
return '机构可买'
if float(year_left) < 1:
return '剩余年限短:%s' % year_left
return 'Bug'
# 生成转债标的
def process(dat):
blacklist = []
if FLAGS.blacklist:
with open(FLAGS.blacklist, 'r', encoding='utf-8') as bl:
bl_reader = csv.DictReader(bl, delimiter=',')
for row in bl_reader:
blacklist.append(row['代 码'])
# 所有数据
lst_data = {}
for one in dat['rows']:
# 每一条数据
lst_dat = []
# 转债id
id = one['id']
if id in blacklist:
continue
dat_cell = one['cell']
# 是否公布强制赎回
force_redeem = dat_cell['force_redeem']
# 市净率
pb = dat_cell['pb']
# 仅机构可买
qflag = dat_cell['qflag']
# 债券类型,'C'为可转债,‘E'为可交换债
btype = dat_cell['btype']
# 剩余时间
year_left = dat_cell['year_left']
# 转债名称
name = dat_cell['bond_nm']
# 排除已经公布强赎,破净的,仅机构可买的,可交换债
if force_redeem or float(pb) < 1.0 or btype != 'C' or qflag == 'Q' or float(year_left) < 1:
if FLAGS.debug:
logging.info('过滤 %s %s: %s' % (id, name, filter_reason(force_redeem, pb, btype, qflag, year_left)))
continue
# 现价
price = dat_cell['price']
# 溢价率
premium_rt = dat_cell['premium_rt']
# 评级
rating_cd = dat_cell['rating_cd']
# 回售触发价
# put_convert_price = dat_cell['put_convert_price']
# 强赎触发价
# force_redeem_price = dat_cell['force_redeem_price']
# 双低
double_low = dat_cell['dblow']
# 获取赎回价
# xiangqing_url = 'https://www.jisilu.cn/data/convert_bond_detail/' + id
# xiangqing_response = requests.get(xiangqing_url)
# html = xiangqing_response.content.decode('utf-8')
# html = etree.HTML(html)
# lixi = html.xpath('.//td[@id='cpn_desc']/text()')
# pattern = re.compile(r'\d+\.\d+?') # 查找数字
# lixi = pattern.findall(lixi[0])
# shuhuijia = html.xpath('.//td[@id='redeem_price']/text()')
# li_price = 0
# for li in lixi:
# li_price = li_price + float(li)
# try:
# jiancang = float(shuhuijia[0]) + (li_price - float(lixi[-1])) * 0.8
# except:
# jiancang = 0
lst_dat.append(id)
lst_dat.append(name)
lst_dat.append(price)
lst_dat.append(premium_rt)
lst_dat.append(pb)
lst_dat.append(rating_cd)
lst_dat.append(year_left)
lst_dat.append(double_low)
lst_data[id] = lst_dat
# 按双低排序
candidates = {}
cc_dict = get_cc()
for c in sorted(lst_data.values(), key=lambda dat: float(dat[7]))[0:FLAGS.top]:
if FLAGS.debug:
logging.info('%s: %s' % (c[7], ','.join(c)))
if c[0] not in cc_dict:
c.append('建仓')
else:
c.append('持仓')
candidates[c[0]] = c
for id, value in cc_dict.items():
if id not in candidates:
if id in lst_data:
lst_data[id].append('清仓')
candidates[id] = lst_data[id]
else:
value['操作'] = '清仓(过滤)'
candidates[id] = list(value.values())
# 返回时按操作排序
return sorted(candidates.values(), key=lambda candidate: candidate[8])
# 输出转债标的到csv
def write_csv(data, t):
f = open('cb%s.csv' % date.today().strftime('%Y%m%d'),
'w', encoding='utf-8')
csv_writer = csv.writer(f)
csv_writer.writerow(['代 码', '转债名称', '现 价', '溢价率', '市净率', '评级',
'剩余年限', '双低', '操作'])
for dat in data:
csv_writer.writerow(dat)
f.close()
def main(argv):
# t = datetime.strptime(date.today().strftime('%d/%m/%Y'), '%d/%m/%Y').timestamp() + 1
t = time()
dat = get_dat(t)
data = process(dat)
write_csv(data, t)
if __name__ == '__main__':
app.run(main)
flags.mark_flag_as_required('cc')
|
numero = int(input("Digite um valor: "))
contador = 1
total = 0
while contador <= numero:
if numero % contador == 0:
total = total + 1
contador += 1
if total == 2:
print("Ele é primo")
else:
print("Ele não é primo.")
|
import sys, csv
f = sys.argv[1]
write = open(f+'replaced.csv', 'w')
with open(f, 'r') as read:
for line in read:
line = line.replace(';',',').replace('\t', '').replace('\0','')
write.write(line)
read.close()
write.close() |
#-----------
# Usage:
# python run_montyhall.py (change_door= y|n) (tries= 1~...)
#-----------
import sys
import os
import random
import subprocess
# which door will be chosen
target = random.randint(0,2)
# whether to change door everytime (argv = 'y') or not
changeDoor = sys.argv[1]
# number of tries
numTries = int(sys.argv[2])
count = {'Correct':0, 'Wrong':0}
for i in range(0,numTries):
print "%s" % i
output = os.popen("python montyhall.py %s %s" % (target, changeDoor)).read()
count[output.strip()] += 1
print count
|
from database.db_objects import User, Teacher
import gitlab
def gitlab_server_connection(username):
current_user = User.query.filter(User.username == username).first()
current_teacher = Teacher.query.filter(Teacher.user_id == current_user.id).first()
gitlab_key = current_teacher.gitlab_key
# print("clé API :", gitlab_key)
try:
gl = gitlab.Gitlab('https://gitlab.telecomnancy.univ-lorraine.fr', private_token=gitlab_key)
gl.auth()
except gitlab.exceptions.GitlabAuthenticationError as authentication_error:
print("Erreur d'authentification sur gitlab :", authentication_error)
return None
return gl
|
from flask import Flask, jsonify, request
from flask_cors import CORS
import random
import string
def get_random_string(length):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
app = Flask(__name__)
CORS(app)
def makeClass(id):
return {
'id': id,
'name': 'Class ' + get_random_string(4),
'days': [1, 2],
'active': True,
'year': 2020,
'duration': 2,
'timeslot': '8.00 - 10.00pm'
}
def makeTutor():
return {
'id': 'id' + get_random_string(10),
'name': 'Name ' + get_random_string(4),
'email': get_random_string(6) + '@gmail.com',
'school': get_random_string(4) + ' School',
'dateOfBirth': '2000-02-01',
'dateOfRegistration': '2020-01-15',
'gender': 'm',
'status': 1,
'admin': 0
}
tutors = [makeTutor() for _ in range(30)]
tutors.sort(key=lambda a: a['name'])
classes = [makeClass(i) for i in range(3)]
classTutors = [{
'id': t['id'],
'name': t['name'],
'admin': 1,
'joinDate': '2020-10-10'
}
for t in tutors[:5]
]
present = [t['id'] for t in classTutors[:3]]
session = {
'id': 0,
'date': '2020-05-10',
'remarks': 'Class',
'duration': 3
}
@ app.route('/')
def hello_world():
return jsonify(
{
"a": "Hello",
"b": "Bye"
}
)
def getTutorById(id):
for tutor in tutors:
if tutor['id'] == id:
return tutor
return None
def inClass(id):
for tutor in classTutors:
if tutor['id'] == id:
return True
return False
@app.route('/suggestions')
def getSuggestions():
nameFilter = request.args.get('filter')
return jsonify(list(filter(lambda x: (nameFilter is None or nameFilter in x['name']) and not inClass(x['id']), tutors))[:10])
@app.route('/tutors')
def getTutors():
page = int(request.args.get('page'))
perPage = 10
entries = len(tutors)
pages = (entries // perPage) + (0 if entries % perPage == 0 else 1)
if page is None:
page = 0
offset = int(perPage * page)
return jsonify({
'page': page,
'perPage': perPage,
'lastPage': pages,
'total': entries,
'data': tutors[offset:offset + perPage]
})
@app.route('/classes')
def getClasses():
page = 0
perPage = 10
pages = 1
entries = len(classes)
return jsonify({
'page': page,
'perPage': perPage,
'lastPage': pages,
'total': entries,
'data': classes
})
@app.route('/class/<cid>')
def getClass(cid):
return jsonify(classes[int(cid)])
@app.route('/class/<cid>/addtutor/', methods=["POST"])
def addTutorToClass(cid):
data = request.json
joinDate = data['joinDate']
tid = data['tutorId']
t = getTutorById(tid)
print(tid, joinDate, t)
if joinDate is None or t is None:
return 'Failed', 500
classTutors.append({
'id': t['id'],
'name': t['name'],
'admin': t['admin'],
'joinDate': joinDate
})
return 'Ok', 200
@app.route('/class/<cid>/sessions')
def getSessions(cid):
return jsonify([session])
@app.route('/class/<cid>/session/<sid>')
def getSessionDetails(cid, sid):
return jsonify(session)
@app.route('/class/<cid>/session/<sid>/tutors')
def getSessionTutors(cid, sid):
return jsonify(classTutors)
@app.route('/class/<cid>/session/<sid>/attendance')
def getSessionAttendance(cid, sid):
return jsonify(present)
@ app.route('/class/<cid>/tutors')
def getClassTutors(cid):
return jsonify(classTutors)
@ app.route('/tutor/<id>')
def getTutor(id):
for tutor in tutors:
if tutor['id'] == id:
return jsonify(tutor)
return "Record not found", 400
@ app.route('/class/<cid>/session/<sid>/present', methods=["POST"])
def markPresent(cid, sid):
tid = request.data.decode('utf-8')
present.append(tid)
return "Ok", 200
@ app.route('/class/<cid>/session/<sid>/absent', methods=["POST"])
def markAbsent(cid, sid):
tid = request.data.decode('utf-8')
present.remove(tid)
return "Ok", 200
if __name__ == '__main__':
app.run(host='localhost', port=5000)
|
import cv2 # библиотека opencv
import numpy # работа с массивамиpip3 install paho-mqtt
import paho.mqtt.client as mqtt
import math
handle2 = open(str(input())+'.txt','w')
cap = cv2.VideoCapture(2) # читать видео поток
handle = open("blue.txt", "r")
h_down_g = int(handle.readline())
s_down_g = int(handle.readline())
v_down_g = int(handle.readline())
h_up_g = int(handle.readline())
s_up_g = int(handle.readline())
v_up_g = int(handle.readline())
#print(h_up, h_down, s_up, s_down, v_up, v_down)
# h_up_g = 139#115
# s_up_g = 84#255
# v_up_g = 185#255
# h_down_g = 0#81
# s_down_g = 0#155
# v_down_g = 0#70
#левфй верхний правый нижний маркер для отстройки
center=0
center1=0
center2=0
while True:
_, image = cap.read()
img_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV_FULL)
img_hsv = cv2.GaussianBlur(img_hsv, (5, 5), 2)
mask = cv2.inRange(img_hsv, numpy.array([h_down_g, s_down_g, v_down_g]), numpy.array([h_up_g, s_up_g, v_up_g]))
_, contours0, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# перебираем все найденные контуры в цикле
for cnt in contours0:
rect = cv2.minAreaRect(cnt) # пытаемся вписать прямоугольник
box = cv2.boxPoints(rect) # поиск четырех вершин прямоугольника
box = numpy.int0(box) # округление координат
area = int(rect[1][0] * rect[1][1]) # вычисление площади
if area > 70:
center = (int(rect[0][0]), int(rect[0][1]))
if center[0]>300:
center1=center
else:
center2=center
print("center 1 =", center1)
print("center 2 =", center2)
x1 = box[0][0]
y1 = box[0][1]
x2 = box[1][0]
y2 = box[1][1]
x3 = box[2][0]
y3 = box[2][1]
x4 = box[3][0]
y4 = box[3][1]
cv2.drawContours(image, [box], 0, (255, 0, 0), 2) # рисуем прямоугольник
cv2.circle(image, center, 1, (0, 255, 0), 2)
cv2.imshow("mask", mask)
cv2.imshow("original", image)
cv2.imshow("HSV", img_hsv)
if cv2.waitKey(1) == 27:
# client.disconnect()
break
x=center2[0]
y=center2[1]
h=center1[1]-center2[1]
w=center1[0]-center2[0]
handle2.write(str(x) + '\n')
handle2.write(str(y) + '\n')
handle2.write(str(h) + '\n')
handle2.write(str(w) + '\n')
handle2.close() |
from django.contrib import admin
from .models import ClickCount, WeekCount
class ClickCountAdmin(admin.ModelAdmin):
list_display = ('linkid', 'weikefu','mobile', 'user_num', 'valid_num',
'click_num','date', 'write_time', 'username')
list_display_links = ['linkid', 'username']
list_filter = ('date', 'username')
date_hierarchy = 'date'
search_fields = ['=linkid','=mobile']
admin.site.register(ClickCount, ClickCountAdmin)
class WeekCountAdmin(admin.ModelAdmin):
list_display = ('linkid', 'weikefu', 'buyercount', 'user_num', 'valid_num', 'ordernumcount',
'conversion_rate', 'week_code', 'write_time')
list_display_links = ['linkid', 'week_code']
list_filter = ('week_code',)
search_fields = ['=linkid','=week_code']
admin.site.register(WeekCount, WeekCountAdmin)
|
# dataframe, numpy
import pandas as pd
import numpy as np
# Scaler
from sklearn.preprocessing import MinMaxScaler
# Model, LSTM
from keras.models import Sequential
from keras.layers import Dense, LSTM
# graph
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
# downloader file
import urllib.request
# common
import os
import time
import math
import sys
from os import path
from datetime import datetime
# source
# https://www.ecdc.europa.eu/en/publications-data/download-todays-data-geographic-distribution-covid-19-cases-worldwide
LINK_SRC = 'https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-geographic-disbtribution-worldwide.xlsx'
DOWNLOADED_SRC = 'COVID-19-geographic-disbtribution-worldwide.xlsx'
SHEET = 'COVID-19-geographic-disbtributi'
# Wide of trained sample
SAMPLE_TRAINED = 20
PERCENT_TRAINED = 70
# Respected field, CHANGE HERE !!!!!
DATE_FIELD = 'dateRep'
PREDICTED_FIELD = 'cases'
COUNTRY = 'United_Kingdom'
# MAIN PROGRAM
if __name__ == '__main__':
country = COUNTRY
if (len(sys.argv) > 1):
country = ' '.join(sys.argv[1:])
print('Using argv as country: ' + country)
else:
print('Using DEFAULT country: ' + country)
print('\nReading data…\n')
srcExcel = f'cov19-worldwide-{datetime.now().strftime("%Y-%m-%d")}.xls'
# Try read Buffer File
fileBuffExist = path.exists(srcExcel)
if fileBuffExist:
print(f'Reading data from local: {srcExcel}')
else:
try:
print('Downloading…')
link = LINK_SRC
urllib.request.urlretrieve(link, srcExcel)
except urllib.error.HTTPError as ex:
print('Download FAILED')
print(ex)
print(f'\nUsing EMBEDDED SOURCE: {DOWNLOADED_SRC}')
srcExcel = DOWNLOADED_SRC
# Reading source file
df = pd.read_excel(srcExcel, sheet_name=SHEET)
df[DATE_FIELD] = pd.to_datetime(df[DATE_FIELD], dayfirst=True)
# Create mask/filter based on country
mask = df['countriesAndTerritories'] == country
# Mask by country
df = df.loc[mask]
df = df.sort_values(by=DATE_FIELD)
df = df.reset_index() # reset Index
print(df.head())
print(df.info())
# prepare dataset and use only field defined value
dataset = df.filter([PREDICTED_FIELD]).values
# Create len of percentage training set
trainingDataLen = math.ceil((len(dataset) * PERCENT_TRAINED) / 100)
print('Size of trainingSet: ' + str(trainingDataLen))
# Scale the dataset between 0 - 1
scaler = MinMaxScaler(feature_range=(0, 1))
scaledData = scaler.fit_transform(dataset)
# Scaled trained data
trainData = scaledData[:trainingDataLen , :]
# Split into trained x and y
xTrain = []
yTrain = []
for i in range(SAMPLE_TRAINED, len(trainData)):
xTrain.append(trainData[i-SAMPLE_TRAINED:i , 0])
yTrain.append(trainData[i , 0])
# Convert trained x and y as numpy array
xTrain, yTrain = np.array(xTrain), np.array(yTrain)
print('x - y train shape: ' + str(xTrain.shape) + ' ' + str(yTrain.shape))
# Reshape x trained data as 3 dimension array
xTrain = np.reshape(xTrain, (xTrain.shape[0], xTrain.shape[1], 1))
print('Expected x train shape: ' + str(xTrain.shape))
print('')
print('Processing the LSTM model...\n')
# Build LSTM model
model = Sequential()
model.add(LSTM(10, input_shape=(xTrain.shape[1], 1)))
model.add(Dense(1, activation='linear'))
# Compile model
model.compile(optimizer='adam', loss='mean_squared_error')
# Train the model
model.fit(xTrain, yTrain, shuffle=False, epochs=300)
print('\nDone Processing the LSTM model...')
# Prepare testing dataset
testData = scaledData[trainingDataLen - SAMPLE_TRAINED: , :]
# Create dataset test x and y
xTest = []
yTest = dataset[trainingDataLen: , :]
for i in range(SAMPLE_TRAINED, len(testData)):
xTest.append(testData[i - SAMPLE_TRAINED:i, 0])
# Convert test set as numpy array
xTest = np.array(xTest)
# Reshape test set as 3 dimension array
xTest = np.reshape(xTest, (xTest.shape[0], xTest.shape[1], 1))
# Models predict price values
predictions = model.predict(xTest)
predictions = scaler.inverse_transform(predictions)
# Get root mean square (RMSE)
rmse = np.sqrt(np.mean(predictions - yTest) ** 2)
print('\nRoot mean square (RMSE):' + str(rmse))
# Add prediction for Plot
train = df.loc[:trainingDataLen, [DATE_FIELD, PREDICTED_FIELD] ]
valid = df.loc[trainingDataLen:, [DATE_FIELD, PREDICTED_FIELD] ]
print('validLength: {}, predictionLength: {}'.format(len(valid), len(predictions)))
# Create dataframe prediction
dfPrediction = pd.DataFrame(predictions, columns = ['predictions'])
# Reset the index
valid = valid.reset_index()
dfPrediction = dfPrediction.reset_index()
# Merge valid data and prediction data
valid = pd.concat([valid, dfPrediction], axis=1)
# Visualize
fig, ax = plt.subplots(num=f'{country} prediction {PREDICTED_FIELD}')
plt.subplots_adjust(bottom=0.2)
# Add graph info
ax.set_title(f'With RMSE: {rmse:,.2f}')
ax.set_xlabel(DATE_FIELD, fontsize=14)
ax.set_ylabel(PREDICTED_FIELD, fontsize=14)
ax.grid(linestyle='-', linewidth='0.5', color='gray')
# plot trained data
ax.plot(train[DATE_FIELD], train[PREDICTED_FIELD])
# plot actual and predictions
ax.plot(valid[DATE_FIELD], valid[[PREDICTED_FIELD, 'predictions']])
# add legend
ax.legend(['Train', 'Actual', 'Prediction'], loc='lower right')
# finally show graph
plt.show()
print('')
print('Exiting…')
print('') |
import sys, os, shutil
from datetime import datetime as dt
from reorg import page
from reorg import crawling as cr
def adjust_target_dir_for_page(dir_target):
"""From path to directory target as if for section"""
basename = os.path.basename(dir_target)
return dir_target.replace(basename, "")
def whats_dir_target(dir_target_user):
"""Determine the target directory root path to be used"""
dir_target_default = os.environ["PWD"]
dir_target = dir_target_user if dir_target_user else dir_target_default
return os.path.join(dir_target, "reorged")
def prepare_dir_target(dir_target):
if os.path.exists(dir_target):
# TODO: Need user prompt
shutil.rmtree(dir_target)
print("Purged previous target \"{0}\"".format(dir_target))
os.mkdir(dir_target)
print("Created target \"{0}\"".format(dir_target))
def run():
dir_src_root = sys.argv[1].rstrip(os.sep)
print("Reorganizing \"{0}\"".format(dir_src_root))
dir_target_root = whats_dir_target(sys.argv[2] if len(sys.argv) > 2 else None)
prepare_dir_target(dir_target_root)
# Step is ("/current-directory", ["sub-directory"], ["some-file"])
for step in os.walk(dir_src_root):
print(step)
dir_curr, dirs_in_curr, files_in_curr = step
if cr.is_path_special(dir_src_root, dir_curr):
print("Skipping special directory")
elif cr.should_create_page(step):
dir_target = cr.whats_target_dir_for_section(dir_curr, dir_src_root,
dir_target_root)
# Give up on being slick by trying to avoid creating top-level
# sectional/group directory. Assume contexts/concepts will most likely
# have some directory to be copied anyways, code will be simpler..
# Handling _pages must be first since it requires that the directory
# doesn't already exist
# TODO: Inject TOC into index page
if "_pages" in dirs_in_curr:
src = os.path.join(dir_curr, "_pages")
# NOTE: Copying into top-level group directory
dest = dir_target
shutil.copytree(src, dest)
# TODO: Need to adjust the "_static" dir in the note and maybe
# consolidate all static dirs
# Straight up copy _static
# TODO: Should _static dirs be merged in a big global dir?
if "_static" in dirs_in_curr:
src = os.path.join(dir_curr, "_static")
dest = os.path.join(dir_target, "static")
shutil.copytree(src, dest)
# Handle index and notes
if not os.path.exists(dir_target):
os.mkdir(dir_target)
page_text = page.create_page(step, order_latest_first=True)
page_path = os.path.join(dir_target, page.create_page_name(dir_curr))
with open(page_path, "w+") as f:
f.write(page_text)
else:
# Just create a directory
dir_target = cr.whats_target_dir_for_section(dir_curr, dir_src_root,
dir_target_root)
if not os.path.exists(dir_target):
os.mkdir(dir_target)
|
# ###리스트
# 리스트는 데이터의 목록을 다루는 자료형
# []대괄호로 명명한다
# 리스트 안에는 어떠한 자료형도 포함시킬수 있음 C는 같은 자료형만 가능
# 변수가 많아지면 관리해야할 사항이 많아지고 실수할 확률이 높아짐
# 리스트는 연속적으로 되있어서 데이터 가져오기 편함
# 리스트를 가져올때는 인덱스를 사용 0번부터
# ls = [500, 200, 300, 400]
# Sum = 0
# print("ls:", ls)
# print("ls[0]:", ls[0])
# print("ls[1]:", ls[1])
# print("ls[2]:", ls[2])
# print("ls[3]:", ls[3])
# #맨 오른쪽이 -1 맨 왼쪽은 -n임
# ls = [500, 200, 300, 400]
# Sum = 0
# print("ls:", ls)
# print("ls[0]:", ls[-4])
# print("ls[1]:", ls[-3])
# print("ls[2]:", ls[-2])
# print("ls[3]:", ls[-1])
# ls = [0, 0, 0, 0] #박스를 생성해주는 일, 0이 아닌 다른게 들어가도 상관없음
# Sum = 0
# ls[0] = int(input("1번째 숫자 입력:"))
# ls[1] = int(input("2번째 숫자 입력:"))
# ls[2] = int(input("3번째 숫자 입력:"))
# ls[3] = int(input("4번째 숫자 입력:"))
# Sum = ls[0] + ls[1] + ls[2] + ls[3]
# print("ls[0]:", ls[0])
# print("ls[1]:", ls[1])
# print("ls[2]:", ls[2])
# print("ls[3]:", ls[3])
# print("리스트의 합: %d" % Sum)
# ls = [0, 0, 0, 0]
# Sum = 0
# print("len(ls):", len(ls))
# for i in range (len(ls)):
# ls[i] = int(input(str(i+1)+"번째 숫자 입력:"))
# Sum += ls[i]
# for i in range(len(ls)):
# print("ls[%d]:" % i, ls[i])
# print("리스트의 합:", Sum)
# ls = [10, 20, 30, 40]
# print("ls:", ls)
# print()
# print("ls[1:3] => ls[1]~[2]:", ls[1:3])
# print("ls[0:3] => ls[0]~[2]:", ls[0:3])
# print("ls[2:] => ls[2] ~ [끝까지]", ls[2:]) #비워두나 전체길이보다 큰 숫자를 적으면 끝까지
# print("ls[:2] => ls[0] ~ [1]", ls[:2])
#[:]우측은 그 전까지, 좌측은 그 숫자 포함
#### 중요 ####
# ## 리스트[얕은 복사] 데이터 하나를 공유
# ls = [10, 20, 30, 40]
# arr = ls
# print("ls: {}ls, id: {}".format(ls,id(ls)))
# print("arr: {}arr, id: {}".format(arr,id(arr)))
# #arr = ls = [10,20,30,40] 불리는 이름만 다르지 이 둘은 같은 개체임
# ls = [10, 20, 30, 40]
# arr = ls
# arr[2] = 20000
# print("ls: {}ls, id: {}".format(ls,id(ls)))
# print("arr: {}arr, id: {}".format(arr,id(arr)))
# ## ls arr 이 둘은 서로 동기화됨
# ## 리스트[깊은 복사] 똑같은 데이터 2개
# ls = [10, 20, 30, 40]
# arr = ls[:] # arr = [10, 20, 30, 40] 이거랑 똑같은 개념임
# arr[2] = 20000
# print("ls: {}ls, id: {}".format(ls,id(ls)))
# print("arr: {}arr, id: {}".format(arr,id(arr)))
#입고, 재고, 출고
#출고랑 재고는 동기화가 되야되서 얕은 복사
#입고랑 재고는 동기화가 되면 안됨(재고=입고+재고 라고 해서 입고까지 바뀌면 안됨) 깊은 복사
# import copy # copy 라는 묘듈을 가져와라 (묘듈: 함수의 모임)
# ls = [10, 20, 30, 40]
# #arr = ls[:]
# arr = copy.deepcopy(ls)
# arr[2] = "deepcopy"
# print("ls: {}ls, id: {}".format(ls,id(ls)))
# print("arr: {}arr, id: {}".format(arr,id(arr)))
##업데이트 연산
# ls = [10, 20, 30]
# arr = [40, 50, 60]
# print("ls:", ls)
# print("arr:", arr)
# Str = ls + arr
# print("ls + arr => Str", Str)
# string = ls * 3
# print("ls * 3 => string", string)
##숫자 연산
# ls = [10, 20, 30]
# arr = [40, 50, 60]
# for i in range(len(ls)):
# ls[i] = ls[i] + arr[i]
# print(ls)
# for i in range(len(ls)):
# ls[i] = ls[i] * 3
# print(ls)
#선생님 방법
# ls = [10, 20, 30]
# arr = [40, 50, 60]
# Str = [0, 0, 0]
# string = [0, 0, 0]
# for i in range(len(ls)):
# Str[i] = ls[i] + arr[i]
# for i in range(len(ls)):
# string[i] = ls[i] * 3
# print(Str)
# print(string) |
# -*- coding:utf-8 -*-
import cv2
import matplotlib.pyplot as plt
import numpy as np
import math
import time
coef_angular_positivo = []
coef_angular_negativo = []
coef_linear_positivo = []
coef_linear_negativo = []
mediana_x = 0
mediana_y = 0
def ponto_fuga(frame):
lista_xi = []
lista_yi = []
x_ponto_fuga = []
y_ponto_fuga = []
avg_x=0
avg_y=0
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
ret, limiarizada = cv2.threshold(gray, 230, 255, cv2.THRESH_BINARY)
lines = cv2.HoughLines(limiarizada,1, np.pi/180, 200)
for line in lines:
for rho,theta in line:
m = np.cos(theta)
b = np.sin(theta)
if m > 0.4:
coef_angular_positivo.append(m)
coef_linear_positivo.append(b)
x0 = m*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(m))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(m))
line = cv2.line(frame,(x1,y1),(x2,y2),(0,255,0),3)
elif m < -0.4:
coef_angular_negativo.append(m)
coef_linear_negativo.append(b)
x0 = m*rho
y0 = b*rho
x3 = int(x0 + 1000*(-b))
y3 = int(y0 + 1000*(m))
x4 = int(x0 - 1000*(-b))
y4 = int(y0 - 1000*(m))
line = cv2.line(frame,(x3,y3),(x4,y4),(0,255,0),3)
try:
h1 = coef_linear_positivo[len(coef_linear_positivo)-1]
m1 = coef_angular_positivo[len(coef_angular_positivo)-1]
h2 = coef_linear_negativo[len(coef_linear_negativo)-1]
m2 = coef_angular_negativo[len(coef_angular_negativo)-1]
xi = ((x1*y2 - y1*x2)*(x3 - x4) - (x1-x2)*(x3*y4 - y3*x4))/((x1-x2)*(y3-y4) - (y1-y2)*(x3-x4))#((h2-h1)/(m1-m2))
yi = ((x1*y2 - y1*x2)*(y3 - y4) - (y1-y2)*(x3*y4 - y3*x4))/((x1-x2)*(y3-y4) - (y1-y2)*(x3-x4))#(m1*xi) + h1
lista_xi.append(xi)
lista_yi.append(yi)
x_ponto_fuga.append(xi)
y_ponto_fuga.append(yi)
except:
pass
else:
pass
try:
avg_x = int(np.mean(x_ponto_fuga))
avg_y = int(np.mean(y_ponto_fuga))
cv2.circle(frame, (avg_x,avg_y), 3, (255,0,0), 5)
except:
pass
return (avg_x,avg_y) |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from subprocess import check_output
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from sklearn.model_selection import train_test_split
import time #helper libraries
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
from numpy import newaxis
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
train = pd.read_csv('./data/DailyDelhiClimateTrain.csv')
test = pd.read_csv('./data/DailyDelhiClimateTest.csv')
## MEAN TEMP
temp_train = train.iloc[:,1:2]
temp_test = test.iloc[:,1:2]
#Scaling the values between 0 to 1
from sklearn.preprocessing import MinMaxScaler
ss= MinMaxScaler(feature_range=(0,1))
temp_train= ss.fit_transform(temp_train)
temp_test= ss.fit_transform(temp_test)
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
look_back = 1
trainX, trainY = create_dataset(temp_train, look_back)
testX, testY = create_dataset(temp_test, look_back)
# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1],1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1],1))
# create and fit the LSTM network
model_temp = Sequential()
#Adding the first LSTM layer and some Dropout regularisation
model_temp.add(LSTM(units = 100, return_sequences = True, input_shape = (trainX.shape[1], 1)))
model_temp.add(Dropout(0.2))
# Adding a second LSTM layer and some Dropout regularisation
model_temp.add(LSTM(units = 100, return_sequences = True))
model_temp.add(Dropout(0.2))
# Adding a third LSTM layer and some Dropout regularisation
model_temp.add(LSTM(units = 100, return_sequences = True))
model_temp.add(Dropout(0.2))
# Adding a fourth LSTM layer and some Dropout regularisation
model_temp.add(LSTM(units = 50))
model_temp.add(Dropout(0.2))
# Adding the output layer
model_temp.add(Dense(units = 1))
# Compiling the RNN
model_temp.compile(optimizer = 'adam', loss = 'mean_squared_error', metrics=['accuracy'])
# Fitting the RNN to the Training set
model_temp.fit(trainX, trainY, epochs = 100, batch_size = 32)
# PREDICTION
prediction = model_temp.predict(testX)
prediction = ss.inverse_transform(prediction)
temp_test = ss.inverse_transform(temp_test)
plt.figure(figsize=(20,10))
plt.plot(temp_test, color = 'black', label = 'Delhi Mean Temperature')
plt.plot(prediction, color = 'green', label = 'Predicted Delhi Mean Temperature')
plt.title('Delhi Mean Temp Prediction')
plt.xlabel('Time')
plt.ylabel('Mean Temp')
plt.legend()
plt.show()
|
from rcnn_create_model import *
# from rcnn_feature_stats import *
from rcnn_feature import *
from rcnn_load_model import *
import numpy as np
import caffe
from datasets.factory import get_imdb
from sklearn.linear_model import LogisticRegression
import theanets
import climate
def rcnn_train(imdb):
use_gpu = False
opts = Opts()
opts.net_def_file = './model-defs/rcnn_batch_256_output_fc7.prototxt'
conf = [imdb.name, 1, 'cachedir', 'cachedir/' + imdb.name]
rcnn_model = RcnnModel(opts.net_def_file, opts.net_file, opts.cache_name)
# rcnn_load_model(rcnn_model, use_gpu)
rcnn_model.detectors.crop_mode = opts.crop_mode
rcnn_model.detectors.crop_padding = opts.crop_padding
rcnn_model.classes = imdb.classes
rcnn_model.opts = opts
X, y = rcnn_get_all_feature(imdb, rcnn_model)
np.savez('feat', X = X, y = y)
classifier = LogisticRegression(class_weight = 'balanced', solver = 'lbfgs', multi_class = 'multinomial', verbose = 1, n_jobs = -1, max_iter = 1000)
classifier.fit(X, y)
# climate.enable_default_logging()
net = theanets.Classifier(layers=[4096, 21])
net.train((X, y), algo = 'sgd', learning_rate = 0.1, momentum = 0.9, save_every = 60.0, save_progress = 'net.{}.netsave', validate_every = 100)
# opts.feat_norm_mean = rcnn_feature_stats(imdb, opts.layer, rcnn_model)
# print 'average norm = %.3f\n' % feat_norm_mean
#
# X_pos, keys_pos = get_positive_pool5_features(imdb, opts)
#
# caches = []
# for i in imdb.class_ids:
# X_pos[i] = rcnn_pool5_to_fcX(X_pos[i], opts.layer, rcnn_model)
# rcnn_scale_features(X_pos[i], opts.feat_norm_mean)
# caches.append(Cache(X_pos[i], keys_pos[i]))
#
# first_time = True
# max_hard_epochs = 1
# for hard_epoch in xrange(max_hard_epochs):
# for i in xrange(len(imdb.image_ids)):
# [X, keys] = sample_negative_features(first_time, rcnn_model, caches, imdb, i)
class Cache():
def __init__(self, X_pos, keys_pos):
self.X_pos = X_pos
self.X_neg = []
self.keys_pos = keys_pos
self.keys_neg = []
self.num_added = 0
self.retrain_limit = 2000
self.evict_thresh = -1.2
self.hard_thresh = -1.0001
self.pos_loss = []
self.neg_loss = []
self.reg_loss = []
self.tot_loss = []
def sample_negative_features(first_time, rcnn_model, caches, imdb, ind):
opts = rcnn_model.train_opts
d = rcnn_load_cached_pool5_features(opts.cache_name, imdb.name, imdb.image_ids[ind])
class_ids = imdb.class_ids
# todo: wating for check
# if is empty (d['feat'])
d['feat'] = rcnn_pool5_to_fcX(d['feat'], opts.layer, rcnn_model)
d['feat'] = rcnn_scale_features(d['feat'], opts.feat_norm_mean)
neg_over_thresh = 0.3
if first_time:
for cls_id in class_ids:
# todo:
pass
else:
zs = np.dot(d['feat'], rcnn_model.detectors.W) + rcnn_model.detectors.B
for cls_id in class_ids:
z = zs[:, cls_id]
# todo:
pass
def get_positive_pool5_features(imdb, opts):
# X_pos = np.ndarray((max(imdb.class_ids), 1), dtype = float)
# keys = np.ndarray((max(imdb.class_ids), 1), dtype = float)
X_pos = []
keys = []
for i in xrange(imdb.image_ids):
d = rcnn_load_cached_pool5_features(opts.cache_name, imdb.name, imdb.image_ids[i])
for j in imdb.class_ids:
if not X_pos[j]:
X_pos.append([])
keys.append([])
# X_pos[j] = []
# keys[j] = []
sel = np.where(d.c == j)[0]
if sel:
X_pos.append(d.feat[sel, :])
keys.append([i * np.ones(sel.shape[0]), sel])
# X_pos[j] = cat(1, X_pos[j], d.feat[sel, :])
# keys[j] = cat(1, keys[j], [i * np.ones(sel.shape[0]) sel])
return X_pos, keys
if __name__ == "__main__":
VOCdevkit = './datasets/VOCdevkit2007'
imdb_train = get_imdb('voc_2007_trainval')
rcnn_train(imdb_train) |
#import lxml.etree as ET
import xml.etree.ElementTree as ET
import urllib
import sys
if __name__ == "__main__":
for i in sys.argv[1:]:
t = ET.parse(i).getroot()
label = '"<li>'
url = t.attrib.get('url')
if url:
label += "<a href='" + urllib.quote_plus(urllib.unquote(url),":/") + "'>"
label += t.attrib.get('title')
if url:
label += "</a>"
original_title = t.attrib.get('original_title')
if original_title:
label += " (" + original_title + ")"
label += '</li>",'
print label.encode('utf-8')
|
from Record import Record
import mysql.connector
from mysql.connector import errorcode
'''
RecordsManager accesses the underlying MySQL database and transforms that data into
Python objects that the script can work with. It requires mysql.connector to be installed
on the machine you run the script. This can be found here: https://dev.mysql.com/downloads/connector/python/.
Much of the code found here was patterned after this example:
https://dev.mysql.com/doc/connector-python/en/connector-python-example-connecting.html.
'''
class RecordsManager():
username = ""
password = ""
connectionString = ""
records = []
database= 'MFD_MS'
host = 'localhost'
def __init__(self, connectionString, username, password):
self.connectionString = connectionString
self.username = username
self.password = password
#runs a custom query against the database and returns record objects
def getAllRecords(self):
#open the connection
try:
cnx = mysql.connector.connect(user=self.username,
password=self.password,
host=self.host,
database=self.database)
cursor = cnx.cursor(prepared=True)
#Training set pulled out here, just getting the first x patients' records, when grouper = 5 it means patient does not have MS
#This is a subquery, the inside query makes a list of ruids from the first 200 patients in the DB then the outer query gets every
#record for each of those ruids
show_DB = """select ruid, entry_date, content from notes where ruid in
(Select * from (select distinct ruid from notes where grouper != 5 order by ruid, entry_date Limit 200) as t);"""
cursor.execute(show_DB, multi=True)
results = cursor.fetchall()
#converts each result to a record object
for result in results:
ruid = result[0]
date = result[1]
content = result[2]
record = Record(ruid, date, content)
#appends to records attribute then to be used elsewhere
self.records.append(record)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.close()
def getDrugRecords(self):
results = []
try:
cnx = mysql.connector.connect(user=self.username,
password=self.password,
host='localhost',
database='MFD_MS')
cursor = cnx.cursor(prepared=True)
#Training set pulled out here, just getting the first x patients' records
# sqlStatement = "Select ruid, entry_date, content from notes where doc_type != 'PL' and sub_type not like '%PROBLEM%'"
sqlStatement = "select ruid, entry_date, content from notes where ruid in (Select * from (select distinct ruid from notes where grouper != 5 and doc_type != 'PL' and sub_type not like '%PROBLEM%' order by ruid, entry_date Limit 200) as t);"
cursor.execute(sqlStatement, multi=True)
dbResults = cursor.fetchall()
for result in dbResults:
record = Record(result[0], result[1], result[2])
results.append(record)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.close()
return results
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.