text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/env python3
#
# A very basic DREAM Python example. This script generates a basic
# DREAM input file which can be passed to 'dreami'.
#
# Run as
#
# $ ./basic.py
#
# ###################################################################
import numpy as np
import sys
sys.path.append('../../py/')
from DREAM.DREAMSettings import DREAMSettings
import DREAM.Settings.Equations.IonSpecies as Ions
import DREAM.Settings.Solver as Solver
import DREAM.Settings.CollisionHandler as Collisions
import DREAM.Settings.Equations.ElectricField as Efield
from DREAM.Settings.Equations.ElectricField import ElectricField
ds = DREAMSettings()
times = [0]
radius = [0, 1]
# Set E_field
efield = 2000*np.ones((len(times), len(radius)))
ds.eqsys.E_field.setPrescribedData(efield=efield, times=times, radius=radius)
# Set self-consistent E-field evolution
#ds.eqsys.E_field.setType(Efield.TYPE_SELFCONSISTENT)
#ds.eqsys.E_field = ElectricField(Efield.TYPE_SELFCONSISTENT, efield=1.0)
# Set n_cold (prescribed; it is automatically calculated self-consistently otherwise)
#density = 1e20 * np.ones((len(times), len(radius)))
#ds.eqsys.n_cold.setPrescribedData(density=density, times=times, radius=radius)
# Set temperature
temperature = 10 * np.ones((len(times), len(radius)))
ds.eqsys.T_cold.setPrescribedData(temperature=temperature, times=times, radius=radius)
# Set ions
ds.eqsys.n_i.addIon(name='D', Z=1, iontype=Ions.IONS_PRESCRIBED_FULLY_IONIZED, n=1e20)
ds.eqsys.n_i.addIon(name='Ar', Z=18, iontype=Ions.IONS_PRESCRIBED_NEUTRAL, n=1e20)
# Hot-tail grid settings
#pmax = 0.1
#ds.hottailgrid.setNxi(30)
#ds.hottailgrid.setNp(500)
pmax = 0.1
ds.hottailgrid.setNxi(5)
ds.hottailgrid.setNp(600)
ds.hottailgrid.setPmax(pmax)
#ds.collisions.collfreq_mode = Collisions.COLLFREQ_MODE_ULTRA_RELATIVISTIC
ds.collisions.collfreq_mode = Collisions.COLLFREQ_MODE_FULL
ds.collisions.collfreq_type = Collisions.COLLFREQ_TYPE_NON_SCREENED
#ds.collisions.collfreq_type = Collisions.COLLFREQ_TYPE_PARTIALLY_SCREENED
#ds.collisions.bremsstrahlung_mode = Collisions.BREMSSTRAHLUNG_MODE_NEGLECT
ds.collisions.bremsstrahlung_mode = Collisions.BREMSSTRAHLUNG_MODE_STOPPING_POWER
ds.collisions.lnlambda = Collisions.LNLAMBDA_ENERGY_DEPENDENT
# Set initial Maxwellian @ T = 1 keV, n = 5e19, uniform in radius
#ds.eqsys.f_hot.setInitialProfiles(rn0=0, n0=5e19, rT0=0, T0=1e3)
ds.eqsys.f_hot.setInitialProfiles(rn0=0, n0=1e20, rT0=0, T0=10)
# Disable runaway grid
ds.runawaygrid.setEnabled(False)
# Set up radial grid
ds.radialgrid.setB0(5)
ds.radialgrid.setMinorRadius(0.22)
ds.radialgrid.setWallRadius(0.22)
ds.radialgrid.setNr(3)
# Use the linear solver
ds.solver.setType(Solver.LINEAR_IMPLICIT)
# Also output collision frequencies
# ('nu_s' stores ALL slowing-down frequencies; one can also specify
# each frequency separately:
# hottail/nu_s, hottail/nu_s_fr, hottail/nu_s_f1, hottail/nu_s_f2,
# runaway/nu_s, runaway/nu_s_fr, runaway/nu_s_f1, runaway/nu_s_f2
#ds.other.include('nu_s')
#ds.other.include('all')
ds.other.include('nu_s','nu_D','fluid')
# Set time stepper
#ds.timestep.setTmax(1e-2)
#ds.timestep.setNt(100)
ds.timestep.setTmax(1e-7)
ds.timestep.setNt(2)
# Save settings to HDF5 file
ds.save('dream_settings.h5')
|
import requests
from bs4 import BeautifulSoup
import json
import smtplib
from smtplib import SMTPException
import datetime
import mariadb
import sys
def get_wod():
url = "https://www.merriam-webster.com/word-of-the-day"
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
wod = soup.find_all('div', class_="word-and-pronunciation")
wod = wod[0].find('h1').text
wod = str(wod)
return wod
def get_wod_info(wod):
url = f"https://www.dictionaryapi.com/api/v3/references/collegiate/json/{wod}?key=secret"
r = requests.get(url)
r = json.loads(r.text)
short_def = r[0]['shortdef']
pronunciation = str(r[0]['hwi']['prs'][0]['mw'])
word_type = str(r[0]['fl'])
return short_def, pronunciation, word_type
def send_email(wod, word_type, pronunciation, short_def):
def_p_tag = [f'<li style="font-size: 20px; color: white;">{item}</li>' for item in short_def]
def_p_tag = ''.join(def_p_tag)
gmail_user = 'email@gmail.com'
gmail_password = 'secret'
receiver = ['email@gmail.com', 'email@protonmail.com']
message = f"""From: Python Script <email@gmail.com>
To: NAME <email@gmail.com>, NAME <email@protonmail.com>
IME-Version: 1.0
Content-type: text/html; charset="UTF8"
Subject: Merriam Webster's Word of the Day
<html>
<head>
<meta http-equiv="Content-Type" content="text/html charset=UTF-8" />
</head>
<body>
<div style="padding-bottom: 50px; padding-top: 50px; background: #375c71;">
<div style="font-size: 50px; color: white; text-align:center;">
<p style="font-size: 20px;">
Word of the day : {datetime.date.today().strftime("%B %d, %Y")}
</p>
<hr style="width: 50%; margin: 0 auto;">
<p style="font-size: 55px; color: white; font-family: Georgia, serif;">{wod}</p>
<p style="font-size: 20px;"><i>{word_type}</i> | {pronunciation}</p>
<hr>
</div>
<p style="font-size: 25px; color: white; padding-left: 10px;">Definitions: </p>
<ol style="max-width: 800px; margin-right: auto; margin-left: auto;">
{def_p_tag}
</ol>
</div>
</body>
</html>"""
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(gmail_user, gmail_password)
server.sendmail(gmail_user, receiver, message.encode('utf8'))
server.close()
print('Email sent!')
except SMTPException:
print('Something went wrong...')
return None
def db_conn():
try:
conn = mariadb.connect(
user="pi",
password="secret",
host="localhost",
port=3306,
database="dbname"
)
except mariadb.Error as e:
print(f"Error connecting to MariaDB Platform: {e}")
sys.exit(1)
# Get Cursor
cursor = conn.cursor()
return conn, cursor
def insert_row(conn, cursor, day, word, word_type, pronunciation, definition):
cursor.execute(
"""INSERT INTO word_of_the_day.wod_tbl (Day, Word, WordType, Pronunciation, Definition) VALUES
(?, ?, ?, ?, ?)""", (day, word, word_type, pronunciation, definition))
conn.commit()
|
'''
author: Zitian(Daniel) Tong
date: 14:24 2019-05-05 2019
editor: PyCharm
email: danieltongubc@gmail.com
'''
from models.alert import Alert
alerts = Alert.all()
for alert in alerts:
alert.load_item_price()
print(alert.load_item_price())
print(alert.price_limit)
alert.notify_if_price_reached()
if not alerts:
print("No alers have been created. Add an item and an Alert to begin !")
|
'''dinero ganado despues de un mes, si el mes paga 15% de interes por año?'''
n=input('Ingrese el dinero invertido ')
n=float(n)
interespormes= 0.15/12
dineroganado = interespormes*n
print(f"el dinero ganado por mes es de: {round(dineroganado,2)}")
print(f"el dinero ganado por año es de: {n*0.15}")
|
import math
def main():
theSum = 0
numbers = eval(input("Enter numbers seperated by commas: "))
for i in numbers:
theSum = theSum + i
print("The sum is ", theSum)
main()
|
#!/usr/bin/python3.4
# -*-coding:Utf-8
class Tableau:
"""Classe définissant une surface sur laquelle on peut écrire,
que l'on peut lire et effacer, par jeu de méthodes. L'attribut modifié
est 'surface'"""
def __init__(self):
self.surface = ""
def ecrire(self, message_a_ecrire):
if self.surface == "" :
self.surface += '\n'
self.surface += message_a_ecrire
def lire(self):
print(self.surface)
def effacer():
self.surface=""
tab = Tableau()
print(tab.surface)
tab.ecrire("Hello world!")
tab.lire()
print(dir(tab))
|
from os.path import join
from xml.sax import ContentHandler, parseString
from action import Action
import kodi_baselibrary as kodi
class Expression():
def __init__(self):
self.name = ""
self.unit = None
class ExpressionContentHandler(ContentHandler):
def __init__(self, unit):
self.unit = unit
self.definitions = []
self.references = []
self.messages = []
def startElement(self, tag, attributes):
if tag == kodi.EXPRESSION_ELEMENT:
name = attributes['name']
if name:
expression = Expression()
expression.name = name
expression.unit = self.unit
self.definitions.append(expression)
else:
self.messages.append("Nameless expression definition in unit '" + self.unit + "'")
else:
for key, value in attributes.items():
self.parseforexpressionreference(value)
def characters(self, content):
self.parseforexpressionreference(content)
def parseforexpressionreference(self, content):
index = content.find(kodi.EXPRESSION_IDENTIFIER)
while index >= 0:
start = index + len(kodi.EXPRESSION_IDENTIFIER)
end = self.findendofreference(content, start)
parts = content[start + 1:end - 1].split(sep = ',')
name = parts[0]
if name:
expression = Expression()
expression.name = name.strip()
expression.unit = self.unit
self.references.append(expression)
else:
self.messages.append("Nameless expression reference in unit '" + self.unit + "'")
index = content.find(kodi.EXPRESSION_IDENTIFIER, end)
def findendofreference(self, content, start):
index = start
count = 0
while (index == start or count > 0) and index < len(content):
count = count + (1 if content[index] == '[' else 0)
count = count - (1 if content[index] == ']' else 0)
index += 1
return index
class CheckExpressionsAction(Action):
def __init__(self):
super().__init__(
name = "Check expressions",
function = self.checkexpressions,
description = "Check expressions, both the definitions and the references for:\n" +
"- *WIP* duplicate expressions (expression definitions with the same name)\n" +
"- *WIP* unused expressions (expression definitions that are never used)\n" +
"- *WIP* missing expressions (expression references that do not exist as an expression definition)",
arguments = ['skin'])
def checkexpressions(self, messagecallback, arguments):
messagecallback("action", "\nChecking variable definitions and references...")
skin = arguments['skin']
for resolution in skin.resolutions:
messagecallback("info", "- Skin resolution: " + resolution.aspect + " (" + resolution.directory + ")")
self.resetexpressions()
self.parseexpressions(resolution, messagecallback)
self.analyzeexpressions(resolution, messagecallback)
def resetexpressions(self):
self.definitions = []
self.references = []
def parseexpressions(self, resolution, messagecallback):
for unit in resolution.units:
contenthandler = ExpressionContentHandler(unit)
parseString("".join(unit.lines), contenthandler)
self.definitions.extend(contenthandler.definitions)
self.references.extend(contenthandler.references)
messages = contenthandler.messages
for message in messages:
messagecallback("warning", "- " + unit.name + ": " + message)
messagecallback("info", "- Number of expressions: " + str(len(self.definitions)))
messagecallback("info", "- Number of references: " + str(len(self.references)))
def analyzeexpressions(self, resolution, messagecallback):
self.findduplicateexpressions(resolution, messagecallback)
self.findunusedexpressions(resolution, messagecallback)
self.findmissingexpressions(resolution, messagecallback)
def findduplicateexpressions(self, resolution, messagecallback):
for startindex, definition in enumerate(self.definitions):
for index in range(startindex + 1, len(self.definitions)):
if (definition.name == self.definitions[index].name):
messagecallback("warning", "- Duplicate expression: " + definition.name + " (" + definition.unit.name + " ~ " + self.definitions[index].unit.name + ")")
def findunusedexpressions(self, resolution, messagecallback):
referencednames = set([ reference.name for reference in self.references ])
for definition in self.definitions:
if definition.name not in referencednames:
messagecallback("message", "- " + definition.unit.name + ": Unused expression: " + definition.name)
def findmissingexpressions(self, resolution, messagecallback):
declarednames = set([ definition.name for definition in self.definitions ])
for reference in self.references:
if reference.name not in declarednames:
messagecallback("warning", "- " + reference.unit.name + ": Reference to non-existing (missing) expression: " + reference.name)
|
import requests
import json
url = 'http://localhost:8888'
url += '/v1/user/registration'
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
data = {
'email': 'm.petrob@list.ru',
# 'email': 'a.anisimov@lab15.ru',
'password': 'Password123',
'first_name': 'Михаил',
'last_name': 'Петров',
}
if __name__ == '__main__':
response = requests.post(url, headers=headers, data=data)
print(json.dumps(json.loads(response.text), indent=4, ensure_ascii=False))
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Utils
import os
import numpy as np
import matplotlib.pyplot as plt
import rasterio
from rasterio.plot import reshape_as_image
import rasterio.mask
from rasterio.windows import Window
def read_image(root,filename):
"""
read image with rasterio and return an array [C, W, H]
no schema/georef returned yet.
root : root directory
filename : image filename as string
Returns: raster as an array
"""
img = rasterio.open(os.path.join(root,filename))
img_array = img.read()
img.close()
return img_array
def get_tile(root,image_file,tile_size,idx):
"""
image_file : image filename as string
tile_size : tuple of the dimension for the tile (width, height)
idx : index of the tile, int
Returns: tile of the image file [channel,width,height] -> [nb tile , channel, width, height]
"""
# Read Image
image =rasterio.open(os.path.join(root,image_file))
image_shape = np.shape(image)
width = image_shape[0]
#height= image_shape[1]
tile_width = tile_size[0]
tile_height = tile_size[1]
# Number of tile
nb_tile_w = width // tile_width
#nb_tile_h = height // tile_height
row,col = divmod(idx,nb_tile_w)
tile = image.read(window=Window(col*tile_height,row*tile_width,tile_size[0],tile_size[1]))
return tile
# Visualisation
def view(dataset, idx):
"""
dataset: dataset contains tile & mask
idx : index
Returns : plot tile & mask
"""
item = dataset[idx]
raster_tile = reshape_as_image(np.array(item[0]).astype(np.uint8))
raster_gt = reshape_as_image(np.array(item[1][None,:,:]))
figure, ax = plt.subplots(nrows=1, ncols=2,figsize=(10,6))
ax[0].imshow(raster_tile)
ax[0].set_title('Raster Tile')
ax[0].set_axis_off()
ax[1].imshow(raster_gt)
ax[1].set_title('Raster Gt')
ax[1].set_axis_off()
plt.tight_layout()
plt.show()
def view_batch(tiles, gt , pred = None, size = None, ncols = None):
batch_size = tiles.shape[0]
ncols = batch_size
if size is not None :
ncols = size
if pred is None :
figure, ax = plt.subplots(nrows=2, ncols=ncols, figsize=(20, 8))
else :
figure, ax = plt.subplots(nrows=3, ncols=ncols, figsize=(20, 12))
for idx in range(ncols):
item_tile = tiles[idx]
item_gt = gt[idx]
raster_tile = reshape_as_image(np.array(item_tile).astype(np.uint8))
raster_gt = reshape_as_image(np.array(item_gt[None,:,:]))
ax[0][idx].imshow(raster_tile)
ax[0][idx].set_axis_off()
ax[1][idx].imshow(raster_gt)
ax[1][idx].set_axis_off()
if pred is not None :
item_pred = pred[idx]
raster_pred = reshape_as_image(np.array(item_pred[None,:,:]))
ax[2][idx].imshow(raster_pred)
ax[2][idx].set_axis_off()
plt.tight_layout()
plt.show()
|
#! /usr/bin/env python3
import sys
import subprocess
def print_error(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def run_command(command, input=None):
result = subprocess.run(command, check=True, capture_output=True, text=True, input=input)
return result.stdout.strip("\n").split("\n")
def list_windows():
window_ids = run_command(["bspc", "query", "-N", "-n", ".window"])
window_names = run_command(["xtitle", *window_ids])
return list(zip(window_ids, window_names))
def select_window(dmenu_options):
windows = list_windows()
selections = {
"({}) {}".format(i, wname): wid
for i, (wid, wname) in enumerate(windows)
}
selected_windows = run_command(["dmenu", *dmenu_options], input="\n".join(wname for wname in selections.keys()))
print_error("selections:", repr(selected_windows))
yield from (selections[sel] for sel in selected_windows)
if __name__ == "__main__":
dmenu_options = sys.argv[1:]
for w in select_window(dmenu_options):
print(w)
|
# Generated by Django 2.1.7 on 2019-08-28 04:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('level', '0004_auto_20190716_1028'),
]
operations = [
migrations.AddField(
model_name='level',
name='hint1farsi',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='level',
name='hint2farsi',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
#!/usr/bin/python2.7
#coding=utf8
r'''
Fuction:
Version: 1.0.0
Created: Tuyj
Created date:2015/4/1
'''
from _env import addPaths
addPaths(".")
import json,datetime,os,re,shutil,copy,threading,unittest
import pyLibs.t_com as t_com
import pyLibs.t_multitask as t_multitask
def geSmartNowTime(): #return yyyymmddHHMMSS.mirsec
timenow = datetime.datetime.now()
return "%04d%02d%02d%02d%02d%02d.%02s" % (timenow.year,timenow.month,timenow.day,timenow.hour,timenow.minute,timenow.second,str(timenow.microsecond)[:2])
class ConfigFile:
def __init__(self, full_filename, indent=2, smart=False, tasker=None, intervals=(3, 2, 2), ensure_ascii=True):
self.filename,self.indent,self.smart,self.tasker = full_filename,indent,smart,tasker
self.autoSaveInv,self.deletelastBad,self.deletelastBak = intervals
self.autoSaveInv = self.autoSaveInv if self.autoSaveInv > 0 else 3
self.is_ext_tasker = True
self.entrys = None
self.changed = False
self.gen_auto_flush = None
self.rlock = threading.RLock()
self.ensure_ascii = ensure_ascii
if os.path.isdir(self.filename):
raise ValueError('cfg file is must be file')
if not os.path.exists(self.filename):
fp = open(self.filename, 'w')
fp.write('{}')
fp.close()
fp = open(self.filename, 'r')
try:
self.entrys = json.load(fp)
fp.close()
except Exception as ex:
fp.close()
if not self.smart or not self.__auto_fix():
raise ex
fp = open(self.filename, 'r')
self.entrys = json.load(fp)
fp.close()
if smart and self.tasker is None:
self.tasker = t_multitask.tMultitaskMgr('cfg-tasker@'+self.filename)
self.tasker.run(t_com.default_mini_thread_stack_size)
self.is_ext_tasker = False
if self.tasker is not None:
self.gen_auto_flush = self._g_auto_flush()
self.tasker.add(self.gen_auto_flush)
def __del__(self):
self.stop()
def stop(self):
self.rlock.acquire()
self.__flush2file()
self.rlock.release()
if self.gen_auto_flush is not None:
self.gen_auto_flush.close()
self.gen_auto_flush = None
if self.tasker is not None and not self.is_ext_tasker:
self.tasker.stop()
self.tasker = None
def Entrys(self):
self.rlock.acquire()
rout = copy.deepcopy(self.entrys)
self.rlock.release()
return rout
def Save(self, force=False):
self.rlock.acquire()
self.__flush2file(force)
self.rlock.release()
def getValue(self, domains, default=None):
'''e.g. file:
{
'China': {'JiangSu': 2014, 'XiangHai': 2015},
'Japan': {...}
}
domains = ['China', 'XiangHai'] -> return 2015
'''
if not isinstance(domains, list):
raise ValueError('domains MUST be list')
self.rlock.acquire()
found = self.entrys
for domain in domains:
if found.has_key(domain):
found = found[domain]
else:
self.rlock.release()
return default
if isinstance(found, list) or isinstance(found, dict): #TODO:ok?
out = copy.deepcopy(found)
self.rlock.release()
return out
self.rlock.release()
return found
def setValue(self, domains, value, save=True):
if not isinstance(domains, list):
raise ValueError('domains MUST be list')
keys,lastkey = domains[:-1],domains[-1]
self.rlock.acquire()
catched = self.entrys
for domain in keys:
if not catched.has_key(domain):
catched[domain] = {}
catched = catched[domain]
if catched.has_key(lastkey):
if catched[lastkey] == value:
self.rlock.release()
return
if isinstance(value, list) or isinstance(value, dict): #TODO:ok?
catched[lastkey] = copy.deepcopy(value)
else:
catched[lastkey] = value
self.changed = True
if save and not self.smart:
self.__flush2file()
self.rlock.release()
def _g_auto_flush(self):
while True:
yield t_multitask.sleep(self.autoSaveInv)
self.rlock.acquire()
self.__flush2file()
self.rlock.release()
def __flush2file(self, force=False):
if self.changed or force:
self.__auto_bak()
fp = open(self.filename, 'w')
json.dump(self.entrys, fp, indent=self.indent, ensure_ascii=self.ensure_ascii)
fp.close()
self.changed = False
def __auto_bak(self):
path = os.path.dirname(self.filename)
name = os.path.basename(self.filename)
if path == '':
path = './'
files = os.listdir(path)
baks = []
for file_ in files:
if not os.path.isfile(os.path.join(path, file_)):
continue
m = re.match(r'^.%s.(\d+\.\d+)$' % name, file_)
if m is None:
continue
baks.append(m.groups()[0])
baks.sort(reverse=True)
while len(baks) >= self.deletelastBak:
oldest = '.%s.%s' % (name,baks.pop())
oldest = os.path.join(path, oldest)
os.remove(oldest)
bak_name = '.%s.%s' % (name, geSmartNowTime())
shutil.copyfile(self.filename, os.path.join(path, bak_name))
def __auto_fix(self):
path = os.path.dirname(self.filename)
name = os.path.basename(self.filename)
files = os.listdir(path)
baks = []
bads = []
for file_ in files:
if not os.path.isfile(os.path.join(path, file_)):
continue
m = re.match(r'^%s.\d+\.\d+.bad$' % name, file_)
if m is not None:
bads.append(os.path.join(path, file_))
continue
m = re.match(r'^.%s.(\d+\.\d+)$' % name, file_)
if m is None:
continue
baks.append(m.groups()[0])
baks.sort(reverse=True)
last_bak_file = None
if len(baks) > 0:
last_bak_file = '.%s.%s' % (name,baks[0])
last_bak_file = os.path.join(path, last_bak_file)
if last_bak_file is None:
return False
bads.sort(reverse=True)
while len(bads) >= self.deletelastBad:
os.remove(bads.pop())
os.rename(self.filename, '%s.%s.bad' % (self.filename, geSmartNowTime()))
shutil.copyfile(last_bak_file, self.filename)
return True
cfg = ConfigFile('./test.cfg', ensure_ascii=False)
gParams = cfg.getValue(["post"])
gIndex = 0
class LuaTest(unittest.TestCase):
def setUp(self):
print '--------------setUp--------------\n'
def testComm(self):
global gIndex
print '--------------testComm--------------\n'
url,param = gParams[gIndex]
gIndex += 1
print gIndex
print url
print param
def tearDown(self):
print '--------------tearDown--------------\n'
loaded = False
if not loaded:
loaded = True
funname = "test_name"
for i in xrange(0, len(gParams)-1):
fn = funname + str(i)
exec("LuaTest.%s = LuaTest.testComm" % fn)
def funcA(url, gParams):
gParamstr = ""
payload_1 = {'access_token': 'caf099ef-9c34-3af2-9ae1-e7264134e7e4'}
for key,value in gParams.iteritems():
gParamstr += "'%s'='%s'" % (key, value)
for url,info in gParams:
funcA(url, info)
if __name__ == '__main__':
cfg = ConfigFile('test.cfg', ensure_ascii=False)
cfg.setValue(["1", "2"], "啊啊")
|
#Indexing
import numpy as np
#1-D Arrays
"array indexing is the same as acessing an array element"
arr = np.array([1,2,3])
print(arr[0])
#2-D Arrays
"To access elements from a 2-d array we can use comma seperated integers representing the dimension and the index of the elements"
arr2 = np.array([[1,2,3],[4,5,6]])
print("2nd element on second dimension should be 5:",arr2[1,1])
#3-D Arrays
"To access elements from a 3-d array we can use comma seperated integers representing the dimension and the index of the elements"
arr3 = np.array([[[1,2,3],[4,5,6]],[[1,2,3],[4,5,6]]])
print("3rd element of the second array of the second array should be 6:",arr3[1,1,2])
#Negative indexing
"Negative indexing works the same as it would with a normal array"
print("last element of 1st array in a 2-d array should be 3:", arr2[0,-1])
|
from __future__ import absolute_import
from . import backends
from . import forms
from . import managers
from . import models
from . import urls
from . import views
default_app_config = 'provider.oauth2.apps.Oauth2'
|
import ast
from django.contrib import admin
from user_input.models import DailyUserInputEncouraged,\
DailyUserInputStrong,\
DailyUserInputOptional,\
InputsChangesFromThirdSources,\
UserDailyInput,\
Goals,\
DailyActivity
class DailyUserInputStrongInline(admin.StackedInline):
model = DailyUserInputStrong
class DailyUserInputEncouragedInline(admin.StackedInline):
model = DailyUserInputEncouraged
class DailyUserInputOptionalInline(admin.StackedInline):
model = DailyUserInputOptional
class InputsChangesFromThirdSourcesInline(admin.StackedInline):
model = InputsChangesFromThirdSources
class GoalsInline(admin.StackedInline):
model = Goals
class UserInputAdmin(admin.ModelAdmin):
list_display=('user','report_type','created_at', 'updated_at')
list_filter = ('created_at','updated_at',)
save_on_top = True
search_fields = ('user__username','user__email','user__first_name',
'user__last_name',)
inlines = [
DailyUserInputStrongInline,
DailyUserInputEncouragedInline,
DailyUserInputOptionalInline,
InputsChangesFromThirdSourcesInline,
GoalsInline
]
class DailyActivityAdmin(admin.ModelAdmin):
list_display = ('user','activity_name','created_at','duplicate','deleted')
list_filter = ('created_at',)
save_on_top = True
search_fields = ('user__username', 'created_at',)
def activity_name(self,obj):
activity_data = ast.literal_eval(obj.activity_data)
activity_name = activity_data.get('activityType','-')
return activity_name.lower()
admin.site.register(UserDailyInput,UserInputAdmin)
admin.site.register(DailyActivity, DailyActivityAdmin)
|
import os
import sys
import unittest
import mockings
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../core'))
sys.path.insert(1, path)
from now import Now
import chords
class TestRequiresVirtualEnv(unittest.TestCase):
def testModuleWithRequirements(self):
now = Now(tweak = "2015-01-01 12:00:00")
self.assertTrue(chords.requiresVirtualEnv(
mockings.createModule(
"WithRequirements",
"requirements = ['oauth2']\ndef main():\n\tpass"
)
))
def testModuleWithoutRequirements(self):
now = Now(tweak = "2015-01-01 12:00:00")
self.assertFalse(chords.requiresVirtualEnv(
mockings.createModule(
"WithRequirements",
"def main():\n\tpass"
)
))
def testVirtualEnvSignature(self):
self.assertEquals(
chords.virtualEnvSignature(["oauth2", "dnspython", "boto"]),
"478af6c3556b9c39a2668fe7fbe567bf77a153d3"
)
def testVirtualEnvSignatureDuplicatesDoNotMatter(self):
self.assertEquals(
chords.virtualEnvSignature(["dnspython", "oauth2", "boto", "dnspython", "boto"]),
"478af6c3556b9c39a2668fe7fbe567bf77a153d3"
)
def testVirtualEnvSignatureOrderDoesNotMatter(self):
self.assertEquals(
chords.virtualEnvSignature(["dnspython", "boto", "oauth2"]),
"478af6c3556b9c39a2668fe7fbe567bf77a153d3"
)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
from urllib.error import HTTPError
import requests
import lxml.html as lh
import pandas as pd
import wget
import os
url = 'https://www.slickcharts.com/sp500'
page = requests.get(url)
doc = lh.fromstring(page.content)
tr_elements = doc.xpath('//tr')
tr_elements = doc.xpath('//tr')
# Create empty list
col = []
# For each row, store each first element (header) and an empty list
for t in tr_elements[0]:
name = t.text_content()
col.append((name, []))
for j in range(1, len(tr_elements)):
# T is our j'th row
T = tr_elements[j]
# If row is not of size 10, the //tr data is not from our table
if len(T) != 7:
break
# i is the index of our column
i = 0
# Iterate through each element of the row
for t in T.iterchildren():
data = t.text_content()
# Check if row is empty
if i > 0:
# Convert any numerical value to integers
try:
data = int(data)
except:
pass
# Append the data to the empty list of the i'th column
col[i][1].append(data)
# Increment i for the next column
i += 1
Dict = {title: column for (title, column) in col}
df = pd.DataFrame(Dict)
names = df.Symbol.unique()
namesList = list(names)
tickers = namesList
# todo comprobar que la lista de símbolos ticker del S&P 500 sea correcta
for t in tickers:
t = t.replace(".", "-")
url = "https://query1.finance.yahoo.com/v7/finance/download/" + t + \
"?period1=1325376000&period2=1588204800&interval=1wk&events=history"
try:
wget.download(url, out=os.path.join(os.path.dirname(__file__), "datos", "bolsa"))
except HTTPError:
print("No se pudo descargar " + t)
|
from django.conf.urls.defaults import *
from blog.views import *
urlpatterns = patterns('blog.views',
url(r'^$', IndexView.as_view(), name="blog"),
url(r'^post/(?P<post_id>\d+)/$', obsolete_post),
url(r'^(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/(?P<slug>[-\w]+)/$', PostView.as_view(), name="post"),
)
|
# -*- coding: utf-8 -*-
from datetime import date
from unittest import TestCase
from mock import patch
import six
from .helpers import example_file
from popolo_data.importer import Popolo
EXAMPLE_EVENT_JSON = b'''
{
"events": [
{
"classification": "legislative period",
"end_date": "2015-03-23",
"id": "term/12",
"identifiers": [
{
"identifier": "Q967549",
"scheme": "wikidata"
}
],
"name": "12th Riigikogu",
"organization_id": "1ba661a9-22ad-4d0f-8a60-fe8e28f2488c",
"start_date": "2011-03-27"
}
],
"organizations": [
{
"classification": "legislature",
"id": "1ba661a9-22ad-4d0f-8a60-fe8e28f2488c",
"identifiers": [
{
"identifier": "Q217799",
"scheme": "wikidata"
}
],
"name": "Riigikogu",
"seats": 101
}
]
}
'''
EXAMPLE_EVENT_NON_ASCII_JSON = b'''
{
"events": [
{
"classification": "legislative period",
"end_date": "2015-03-23",
"id": "2015",
"name": "2015\xe2\x80\x94",
"start_date": "2015-03-01"
}
]
}
'''
EXAMPLE_MULTIPLE_EVENTS = b'''
{
"events": [
{
"classification": "legislative period",
"end_date": "2015-03-23",
"id": "term/12",
"identifiers": [
{
"identifier": "Q967549",
"scheme": "wikidata"
}
],
"name": "12th Riigikogu",
"organization_id": "1ba661a9-22ad-4d0f-8a60-fe8e28f2488c",
"start_date": "2011-03-27"
},
{
"classification": "general election",
"end_date": "2015-03-01",
"id": "Q16412592",
"identifiers": [
{
"identifier": "Q16412592",
"scheme": "wikidata"
}
],
"name": "Estonian parliamentary election, 2015",
"start_date": "2015-03-01"
},
{
"classification": "legislative period",
"id": "term/13",
"identifiers": [
{
"identifier": "Q20530392",
"scheme": "wikidata"
}
],
"name": "13th Riigikogu",
"organization_id": "1ba661a9-22ad-4d0f-8a60-fe8e28f2488c",
"start_date": "2015-03-30"
}
],
"memberships": [
{
"area_id": "area/tartu_linn",
"legislative_period_id": "term/13",
"on_behalf_of_id": "IRL",
"organization_id": "1ba661a9-22ad-4d0f-8a60-fe8e28f2488c",
"person_id": "014f1aac-a694-4538-8b4f-a533233acb60",
"role": "member",
"start_date": "2015-04-09"
},
{
"legislative_period_id": "term/12",
"on_behalf_of_id": "IRL",
"organization_id": "1ba661a9-22ad-4d0f-8a60-fe8e28f2488c",
"person_id": "0259486a-0410-49f3-aef9-8b79c15741a7",
"role": "member"
},
{
"area_id": "area/harju-_ja_raplamaa",
"legislative_period_id": "term/13",
"on_behalf_of_id": "RE",
"organization_id": "1ba661a9-22ad-4d0f-8a60-fe8e28f2488c",
"person_id": "06d37ec8-45bc-44fe-a138-3427ef12c8dc",
"role": "member"
}
]
}
'''
class TestEvents(TestCase):
def test_empty_file_gives_no_events(self):
popolo = Popolo({})
assert len(popolo.events) == 0
def test_single_event_with_label(self):
with example_file(EXAMPLE_EVENT_JSON) as fname:
popolo = Popolo.from_filename(fname)
assert len(popolo.events) == 1
event = popolo.events[0]
assert event.name == '12th Riigikogu'
def test_start_and_end_dates(self):
with example_file(EXAMPLE_EVENT_JSON) as fname:
popolo = Popolo.from_filename(fname)
event = popolo.events.first
assert event.start_date == date(2011, 3, 27)
assert event.end_date == date(2015, 3, 23)
def test_event_id(self):
with example_file(EXAMPLE_EVENT_JSON) as fname:
popolo = Popolo.from_filename(fname)
event = popolo.events.first
assert event.id == 'term/12'
def test_event_organization_id(self):
with example_file(EXAMPLE_EVENT_JSON) as fname:
popolo = Popolo.from_filename(fname)
event = popolo.events.first
assert event.organization_id == \
'1ba661a9-22ad-4d0f-8a60-fe8e28f2488c'
def test_event_organization(self):
with example_file(EXAMPLE_EVENT_JSON) as fname:
popolo = Popolo.from_filename(fname)
event = popolo.events.first
org = popolo.organizations.first
assert event.organization == org
def test_event_classification(self):
with example_file(EXAMPLE_EVENT_JSON) as fname:
popolo = Popolo.from_filename(fname)
event = popolo.events.first
assert event.classification == 'legislative period'
def test_event_identifiers(self):
with example_file(EXAMPLE_EVENT_JSON) as fname:
popolo = Popolo.from_filename(fname)
event = popolo.events.first
assert event.identifiers == [
{
"identifier": "Q967549",
"scheme": "wikidata"
}
]
def test_event_repr(self):
with example_file(EXAMPLE_EVENT_JSON) as fname:
popolo = Popolo.from_filename(fname)
event = popolo.events.first
if six.PY2:
assert repr(event) == b"<Event: 12th Riigikogu>"
else:
assert repr(event) == u"<Event: 12th Riigikogu>"
def test_event_repr_non_ascii(self):
with example_file(EXAMPLE_EVENT_NON_ASCII_JSON) as fname:
popolo = Popolo.from_filename(fname)
event = popolo.events.first
if six.PY2:
assert repr(event) == b"<Event: 2015\xe2\x80\x94>"
else:
assert repr(event) == u"<Event: 2015—>"
def test_event_identity_equality_and_inequality(self):
with example_file(EXAMPLE_EVENT_JSON) as fname:
popolo_a = Popolo.from_filename(fname)
event_a = popolo_a.events.first
with example_file(EXAMPLE_EVENT_JSON) as fname:
popolo_b = Popolo.from_filename(fname)
event_b = popolo_b.events.first
assert event_a == event_b
assert not (event_a != event_b)
def test_term_current_at_true(self):
with example_file(EXAMPLE_EVENT_JSON) as fname:
popolo = Popolo.from_filename(fname)
event = popolo.events[0]
assert event.current_at(date(2013, 1, 1))
def test_term_current_at_false_before(self):
with example_file(EXAMPLE_EVENT_JSON) as fname:
popolo = Popolo.from_filename(fname)
event = popolo.events[0]
assert not event.current_at(date(1980, 1, 1))
def test_term_current_at_false_after(self):
with example_file(EXAMPLE_EVENT_JSON) as fname:
popolo = Popolo.from_filename(fname)
event = popolo.events[0]
assert not event.current_at(date(2020, 1, 1))
@patch('popolo_data.base.date')
def test_term_current_true(self, mock_date):
mock_date.today.return_value = date(2013, 1, 1)
mock_date.side_effect = lambda *args, **kw: date(*args, **kw)
with example_file(EXAMPLE_EVENT_JSON) as fname:
popolo = Popolo.from_filename(fname)
event = popolo.events[0]
assert event.current
def test_no_elections(self):
with example_file(EXAMPLE_EVENT_JSON) as fname:
popolo = Popolo.from_filename(fname)
assert len(popolo.elections) == 0
def test_elections(self):
with example_file(EXAMPLE_MULTIPLE_EVENTS) as fname:
popolo = Popolo.from_filename(fname)
elections = popolo.elections
assert len(elections) == 1
assert elections.first.id == 'Q16412592'
def test_legislative_periods(self):
with example_file(EXAMPLE_MULTIPLE_EVENTS) as fname:
popolo = Popolo.from_filename(fname)
legislative_periods = popolo.legislative_periods
assert len(legislative_periods) == 2
for lp in legislative_periods:
assert lp.classification == 'legislative period'
assert popolo.terms.first == legislative_periods.first
def test_latest_legislative_period(self):
with example_file(EXAMPLE_MULTIPLE_EVENTS) as fname:
popolo = Popolo.from_filename(fname)
legislative_period = popolo.latest_legislative_period
assert legislative_period.id == 'term/13'
assert legislative_period == popolo.latest_term
def test_event_memberships(self):
with example_file(EXAMPLE_MULTIPLE_EVENTS) as fname:
popolo = Popolo.from_filename(fname)
term = popolo.latest_term
memberships = term.memberships
assert len(memberships) == 2
|
#!/usr/bin/env python3
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Usage: change_mach_o_flags.py [--executable-heap] [--no-pie] <executablepath>
Arranges for the executable at |executable_path| to have its data (heap)
pages protected to prevent execution on Mac OS X 10.7 ("Lion"), and to have
the PIE (position independent executable) bit set to enable ASLR (address
space layout randomization). With --executable-heap or --no-pie, the
respective bits are cleared instead of set, making the heap executable or
disabling PIE/ASLR.
This script is able to operate on thin (single-architecture) Mach-O files
and fat (universal, multi-architecture) files. When operating on fat files,
it will set or clear the bits for each architecture contained therein.
NON-EXECUTABLE HEAP
Traditionally in Mac OS X, 32-bit processes did not have data pages set to
prohibit execution. Although user programs could call mprotect and
mach_vm_protect to deny execution of code in data pages, the kernel would
silently ignore such requests without updating the page tables, and the
hardware would happily execute code on such pages. 64-bit processes were
always given proper hardware protection of data pages. This behavior was
controllable on a system-wide level via the vm.allow_data_exec sysctl, which
is set by default to 1. The bit with value 1 (set by default) allows code
execution on data pages for 32-bit processes, and the bit with value 2
(clear by default) does the same for 64-bit processes.
In Mac OS X 10.7, executables can "opt in" to having hardware protection
against code execution on data pages applied. This is done by setting a new
bit in the |flags| field of an executable's |mach_header|. When
MH_NO_HEAP_EXECUTION is set, proper protections will be applied, regardless
of the setting of vm.allow_data_exec. See xnu-1699.22.73/osfmk/vm/vm_map.c
override_nx and xnu-1699.22.73/bsd/kern/mach_loader.c load_machfile.
The Apple toolchain has been revised to set the MH_NO_HEAP_EXECUTION when
producing executables, provided that -allow_heap_execute is not specified
at link time. Only linkers shipping with Xcode 4.0 and later (ld64-123.2 and
later) have this ability. See ld64-123.2.1/src/ld/Options.cpp
Options::reconfigureDefaults() and
ld64-123.2.1/src/ld/HeaderAndLoadCommands.hpp
HeaderAndLoadCommandsAtom<A>::flags().
This script sets the MH_NO_HEAP_EXECUTION bit on Mach-O executables. It is
intended for use with executables produced by a linker that predates Apple's
modifications to set this bit itself. It is also useful for setting this bit
for non-i386 executables, including x86_64 executables. Apple's linker only
sets it for 32-bit i386 executables, presumably under the assumption that
the value of vm.allow_data_exec is set in stone. However, if someone were to
change vm.allow_data_exec to 2 or 3, 64-bit x86_64 executables would run
without hardware protection against code execution on data pages. This
script can set the bit for x86_64 executables, guaranteeing that they run
with appropriate protection even when vm.allow_data_exec has been tampered
with.
POSITION-INDEPENDENT EXECUTABLES/ADDRESS SPACE LAYOUT RANDOMIZATION
This script sets or clears the MH_PIE bit in an executable's Mach-O header,
enabling or disabling position independence on Mac OS X 10.5 and later.
Processes running position-independent executables have varying levels of
ASLR protection depending on the OS release. The main executable's load
address, shared library load addresses, and the heap and stack base
addresses may be randomized. Position-independent executables are produced
by supplying the -pie flag to the linker (or defeated by supplying -no_pie).
Executables linked with a deployment target of 10.7 or higher have PIE on
by default.
This script is never strictly needed during the build to enable PIE, as all
linkers used are recent enough to support -pie. However, it's used to
disable the PIE bit as needed on already-linked executables.
"""
import optparse
import os
import struct
import sys
# <mach-o/fat.h>
FAT_MAGIC = 0xcafebabe
FAT_CIGAM = 0xbebafeca
# <mach-o/loader.h>
MH_MAGIC = 0xfeedface
MH_CIGAM = 0xcefaedfe
MH_MAGIC_64 = 0xfeedfacf
MH_CIGAM_64 = 0xcffaedfe
MH_EXECUTE = 0x2
MH_PIE = 0x00200000
MH_NO_HEAP_EXECUTION = 0x01000000
class MachOError(Exception):
"""A class for exceptions thrown by this module."""
pass
def CheckedSeek(file, offset):
"""Seeks the file-like object at |file| to offset |offset| and raises a
MachOError if anything funny happens."""
file.seek(offset, os.SEEK_SET)
new_offset = file.tell()
if new_offset != offset:
raise MachOError, \
'seek: expected offset %d, observed %d' % (offset, new_offset)
def CheckedRead(file, count):
"""Reads |count| bytes from the file-like |file| object, raising a
MachOError if any other number of bytes is read."""
bytes = file.read(count)
if len(bytes) != count:
raise MachOError, \
'read: expected length %d, observed %d' % (count, len(bytes))
return bytes
def ReadUInt32(file, endian):
"""Reads an unsigned 32-bit integer from the file-like |file| object,
treating it as having endianness specified by |endian| (per the |struct|
module), and returns it as a number. Raises a MachOError if the proper
length of data can't be read from |file|."""
bytes = CheckedRead(file, 4)
(uint32,) = struct.unpack(endian + 'I', bytes)
return uint32
def ReadMachHeader(file, endian):
"""Reads an entire |mach_header| structure (<mach-o/loader.h>) from the
file-like |file| object, treating it as having endianness specified by
|endian| (per the |struct| module), and returns a 7-tuple of its members
as numbers. Raises a MachOError if the proper length of data can't be read
from |file|."""
bytes = CheckedRead(file, 28)
magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = \
struct.unpack(endian + '7I', bytes)
return magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags
def ReadFatArch(file):
"""Reads an entire |fat_arch| structure (<mach-o/fat.h>) from the file-like
|file| object, treating it as having endianness specified by |endian|
(per the |struct| module), and returns a 5-tuple of its members as numbers.
Raises a MachOError if the proper length of data can't be read from
|file|."""
bytes = CheckedRead(file, 20)
cputype, cpusubtype, offset, size, align = struct.unpack('>5I', bytes)
return cputype, cpusubtype, offset, size, align
def WriteUInt32(file, uint32, endian):
"""Writes |uint32| as an unsigned 32-bit integer to the file-like |file|
object, treating it as having endianness specified by |endian| (per the
|struct| module)."""
bytes = struct.pack(endian + 'I', uint32)
assert len(bytes) == 4
file.write(bytes)
def HandleMachOFile(file, options, offset=0):
"""Seeks the file-like |file| object to |offset|, reads its |mach_header|,
and rewrites the header's |flags| field if appropriate. The header's
endianness is detected. Both 32-bit and 64-bit Mach-O headers are supported
(mach_header and mach_header_64). Raises MachOError if used on a header that
does not have a known magic number or is not of type MH_EXECUTE. The
MH_PIE and MH_NO_HEAP_EXECUTION bits are set or cleared in the |flags| field
according to |options| and written to |file| if any changes need to be made.
If already set or clear as specified by |options|, nothing is written."""
CheckedSeek(file, offset)
magic = ReadUInt32(file, '<')
if magic == MH_MAGIC or magic == MH_MAGIC_64:
endian = '<'
elif magic == MH_CIGAM or magic == MH_CIGAM_64:
endian = '>'
else:
raise MachOError, \
'Mach-O file at offset %d has illusion of magic' % offset
CheckedSeek(file, offset)
magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = \
ReadMachHeader(file, endian)
assert magic == MH_MAGIC or magic == MH_MAGIC_64
if filetype != MH_EXECUTE:
raise MachOError, \
'Mach-O file at offset %d is type 0x%x, expected MH_EXECUTE' % \
(offset, filetype)
original_flags = flags
if options.no_heap_execution:
flags |= MH_NO_HEAP_EXECUTION
else:
flags &= ~MH_NO_HEAP_EXECUTION
if options.pie:
flags |= MH_PIE
else:
flags &= ~MH_PIE
if flags != original_flags:
CheckedSeek(file, offset + 24)
WriteUInt32(file, flags, endian)
def HandleFatFile(file, options, fat_offset=0):
"""Seeks the file-like |file| object to |offset| and loops over its
|fat_header| entries, calling HandleMachOFile for each."""
CheckedSeek(file, fat_offset)
magic = ReadUInt32(file, '>')
assert magic == FAT_MAGIC
nfat_arch = ReadUInt32(file, '>')
for index in xrange(0, nfat_arch):
cputype, cpusubtype, offset, size, align = ReadFatArch(file)
assert size >= 28
# HandleMachOFile will seek around. Come back here after calling it, in
# case it sought.
fat_arch_offset = file.tell()
HandleMachOFile(file, options, offset)
CheckedSeek(file, fat_arch_offset)
def main(me, args):
parser = optparse.OptionParser('%prog [options] <executable_path>')
parser.add_option(
'--executable-heap',
action='store_false',
dest='no_heap_execution',
default=True,
help='Clear the MH_NO_HEAP_EXECUTION bit')
parser.add_option(
'--no-pie',
action='store_false',
dest='pie',
default=True,
help='Clear the MH_PIE bit')
(options, loose_args) = parser.parse_args(args)
if len(loose_args) != 1:
parser.print_usage()
return 1
executable_path = loose_args[0]
executable_file = open(executable_path, 'rb+')
magic = ReadUInt32(executable_file, '<')
if magic == FAT_CIGAM:
# Check FAT_CIGAM and not FAT_MAGIC because the read was little-endian.
HandleFatFile(executable_file, options)
elif magic == MH_MAGIC or magic == MH_CIGAM or \
magic == MH_MAGIC_64 or magic == MH_CIGAM_64:
HandleMachOFile(executable_file, options)
else:
raise MachOError, '%s is not a Mach-O or fat file' % executable_file
executable_file.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[0], sys.argv[1:]))
|
import sys
import traceback
from six.moves import _thread, queue as Queue
import time
import cx_Oracle
from tc_lib import sub, send
from copy import deepcopy
import wx
from pprint import pprint
e=sys.exit
import itertools
import threading
#----------------------------------------------------------------------
def formatExceptionInfo(maxTBlevel=5):
cla, exc, trbk = sys.exc_info()
excName = cla.__name__
try:
excArgs = exc.__dict__["args"]
except KeyError:
excArgs = "<no args>"
excTb = traceback.format_tb(trbk, maxTBlevel)
#print(excName, excArgs, excTb)
return ', '.join([excName, excArgs, ', '.join(excTb)])
class DbThread2(threading.Thread):
def __init__(self,win, list_id,query, cur, update_evt=update_evt, exit_evt=exit_evt, log_evt=log_evt):
threading.Thread.__init__(self)
self.cur = cur
self.win = win
self.list_id=list_id
self.query = query
self.q=self.query[0]
self.running_query=None
if len(self.query)>1:
self.running_query=self.query[1]
self.itemDataMap={}
self.description={}
self.update_evt=update_evt
self.exit_evt=exit_evt
self.log_evt=log_evt
def info(self, msg):
evt = self.log_evt[0](id=0,value = (self.list_id,msg))
wx.PostEvent(self.win, evt)
def run(self):
if 1: #while self.keepGoing:
# We communicate with the UI by sending events to it. There can be
# no manipulation of UI objects from the worker thread.
#print(int(self.val))
#wx.PostEvent(self.win, evt)
#send("start_timer", () )
#time.sleep(2)
try:
self.info('Strating db request')
#evt = self.log_evt[0](id=0,value = (self.list_id, 'Strating db request',))
#wx.PostEvent(self.win, evt)
self.doQuery(self.q)
#send("stop_timer", (28282882,) )
#print ('3131231')
#for d in self.description:
#print (dir(d[1]))
#e(0)
if 0:
wx.CallAfter(send,"update_list", (self.list_id,self.itemDataMap,self.description) )
else:
evt = self.update_evt[0](value = (self.list_id,self.itemDataMap,self.description,'update_list'))
#print (evt)
wx.PostEvent(self.win, evt)
if self.running_query and self.running_ids:
self.info('Extracting running stats.')
self.cur.arraysize=1
#print(self.running_query % '4820')
self.q = self.running_query % ','.join(self.running_ids)
self.cur.execute(self.q)
running_stats={}
for i, row in enumerate(self.cur):
running_stats[row[0]]=row
evt = self.update_evt[0](value = (self.list_id,running_stats,None,'update_running'))
#print (evt)
wx.PostEvent(self.win, evt)
self.cur.close()
self.info('Closing db cursor.')
except cx_Oracle.DatabaseError as e:
error, = e.args
print ('#'*80)
print ('#'*71+type(self).__name__)
if error.code == 955:
print('Table already exists')
if error.code == 1031:
print("Insufficient privileges - are you sure you're using the owner account?")
print(error.code)
print(error.message)
print(error.context)
print(self.q)
print ('#'*80)
print ('#'*80)
print(formatExceptionInfo())
if 0:
try:
pass
except Exception as err:
print ('#'*80)
print ('#'*80)
print(str(err))
print ('#'*80)
print ('#'*80)
print(formatExceptionInfo())
if 0: #kill event
wx.PostEvent(self.win, self.exit_evt[0](1))
#print('Thread is done')
self.running = False
#return 999
def doQuery(self, query):
#connstr= 'oats/manage@jc1lbiorc1:1521/oradb1p'
#cur.execute('SELECt * FROM (%s) WHERE 1=2' % q)
#order by 1, rowid
#print(sel)
#e(0)
#self.cur = con.cursor()
self.info('Extracting query data.')
self.cur.arraysize=1
self.cur.execute(query)
self.running_ids=[]
#return self.cur
#log.info('Strating table list fetch.', extra=d)
clen=len(self.cur.description)
instatus =False
if clen>3 and self.cur.description[3][0] in ['STATUS']:
instatus=True
if 1:
for i, row in enumerate(self.cur):
if 1:
#print(len(row))
#sys.exit()
if row:
#print(clen , self.cur.description[3])
if clen>3 and instatus:
#print(row[3])
if '-RUNNING' in row[3]:
self.running_ids.append(str(row[0]))
#print(row)
if i%100==0:
self.info('%d rows processed.' % i)
self.itemDataMap[i]=[self.nvl(x,i) for i,x in enumerate(row)]
self.description=deepcopy(self.cur.description)
#print(self.running_ids)
#self.cur.close()
#con.close()
def nvl(self,val, col_id):
if val: return val
else:
return ''
def sub(self, val, col_id):
if val: return val
else:
if col_id in self.ints:
return -1
else:
return ''
def stop2(self):
#pprint(dir(self.cur))
#self.cur.close()
del self.cur
#pprint(dir(self))
|
import csv, os
data = []
with open('multi_school.csv', 'r') as file:
reader = csv.reader(file)
for row in reader:
data.append(row)
for datum in data:
print datum
|
#!/bin/python
from solution import solution
# Regular input
def test_solution_5_3446144():
assert solution(5, [3, 4, 4, 6, 1, 4, 4]) == [3, 2, 2, 4, 2]
|
#!/usr/bin/python
sample_dict={'a':"apple",'b':"ball"}
sample_dict.update({'b':"boy",'c':'cat'})
print(sample_dict['a'],sample_dict.get('b'),sample_dict.get('c'))
|
from operator import itemgetter
from collections import UserDict
from datetime import datetime
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Tuple, Union, cast
import pandas as pd
import pyModeS as pms
from tqdm.autonotebook import tqdm
from traffic.core import Flight, Traffic
from traffic.data.basic.airport import Airport
from traffic.data import airports
class Aircraft(object):
def __init__(self, icao24: str, lat0: float, lon0: float) -> None:
self.icao24 = icao24
self._callsign: Optional[str] = None
self.cumul: List[Dict] = []
self.t0: Optional[datetime] = None
self.t1: Optional[datetime] = None
self.tpos: Optional[datetime] = None
self.m0: Optional[str] = None
self.m1: Optional[str] = None
self.lat: Optional[float] = None
self.lon: Optional[float] = None
self.alt: Optional[float] = None
self.lat0: float = lat0
self.lon0: float = lon0
@property
def callsign(self):
return self._callsign
@callsign.setter
def callsign(self, args):
t, msg = args
callsign = pms.adsb.callsign(msg).strip("_")
if callsign == "":
return
self._callsign = callsign
self.cumul.append(
dict(timestamp=t, icao24=self.icao24, callsign=self._callsign)
)
@property
def speed(self):
pass
@speed.setter
def speed(self, args):
t, msg = args
vdata = pms.adsb.velocity(msg)
if vdata is None:
return
spd, trk, roc, tag = vdata
if tag != "GS":
return
if (spd is None) or (trk is None):
return
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
groundspeed=spd,
track_angle=trk,
vertical_speed=roc,
)
)
@property
def position(self):
pass
@position.setter
def position(self, args):
t, msg = args
oe = pms.adsb.oe_flag(msg)
setattr(self, "m" + str(oe), msg)
setattr(self, "t" + str(oe), t)
if (
self.t0 is not None
and self.t1 is not None
and abs((self.t0 - self.t1).total_seconds()) < 10
):
latlon = pms.adsb.position(
self.m0, self.m1, self.t0, self.t1, self.lat0, self.lon0
)
else:
latlon = None
if latlon is not None:
self.tpos = t
self.lat, self.lon = latlon
self.alt = pms.adsb.altitude(msg)
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
latitude=self.lat,
longitude=self.lon,
altitude=self.alt,
)
)
@property
def surface(self):
pass
@surface.setter
def surface(self, args):
t, msg = args
self.lat, self.lon = pms.adsb.surface_position_with_ref(
msg, self.lat0, self.lon0
)
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
latitude=self.lat,
longitude=self.lon,
)
)
@property
def bs20(self):
pass
@bs20.setter
def bs20(self, args):
t, msg = args
callsign = pms.adsb.callsign(msg).strip("_")
if callsign == "":
return
self._callsign = callsign
self.cumul.append(
dict(timestamp=t, icao24=self.icao24, callsign=self._callsign)
)
@property
def bs40(self):
pass
@bs40.setter
def bs40(self, args):
t, msg = args
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
alt_fms=pms.commb.alt40fms(msg),
alt_mcp=pms.commb.alt40mcp(msg),
p_baro=pms.commb.p40baro(msg),
)
)
@property
def bs44(self):
pass
@bs44.setter
def bs44(self, args):
t, msg = args
wind = pms.commb.wind44(msg)
wind = wind if wind is not None else (None, None)
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
humidity=pms.commb.hum44(msg),
pression=pms.commb.p44(msg),
temperature=pms.commb.temp44(msg),
windspeed=wind[0],
winddirection=wind[1],
)
)
@property
def bs50(self):
pass
@bs50.setter
def bs50(self, args):
t, msg = args
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
gs=pms.commb.gs50(msg),
roll=pms.commb.roll50(msg),
tas=pms.commb.tas50(msg),
track=pms.commb.trk50(msg),
track_rate=pms.commb.rtrk50(msg),
)
)
@property
def bs60(self):
pass
@bs60.setter
def bs60(self, args):
t, msg = args
self.cumul.append(
dict(
timestamp=t,
icao24=self.icao24,
ias=pms.commb.ias60(msg),
heading=pms.commb.hdg60(msg),
mach=pms.commb.mach60(msg),
vrbaro=pms.commb.vr60baro(msg),
vrins=pms.commb.vr60ins(msg),
)
)
class AircraftDict(UserDict):
lat0: float
lon0: float
def __missing__(self, key):
self[key] = value = Aircraft(key, self.lat0, self.lon0)
return value
def set_latlon(self, lat0, lon0):
self.lat0 = lat0
self.lon0 = lon0
for ac in self.values():
ac.lat0 = lat0
ac.lon0 = lon0
class Decoder:
def __init__(
self, reference: Union[str, Airport, Tuple[float, float]]
) -> None:
if isinstance(reference, str):
reference = airports[reference]
if isinstance(reference, Airport):
lat0, lon0 = reference.lat, reference.lon
else:
lat0, lon0 = cast(Tuple[float, float], reference)
self.acs = AircraftDict()
self.acs.set_latlon(lat0, lon0)
@classmethod
def from_file(
cls,
filename: Union[str, Path],
reference: Union[str, Airport, Tuple[float, float]],
):
if isinstance(filename, str):
filename = Path(filename)
with filename.open("r") as fh:
all_lines = fh.readlines()
decoder = cls(reference)
decoder.process(
list(
(
datetime.fromtimestamp(
float(line.strip().split(",")[0])
),
cast(str, line.strip().split(",")[1][18:]),
)
for line in all_lines
)
)
return decoder
def process(self, msgs: Iterable[Tuple[datetime, str]]):
for i, (time, msg) in tqdm(enumerate(msgs), total=sum(1 for _ in msgs)):
if i & 127 == 127:
# reset the reference lat/lon
pos = list(
(ac.lat, ac.lon)
for ac in self.acs.values()
if ac.alt is not None
and ac.alt < 5000
and ac.tpos is not None
and (time - ac.tpos).total_seconds() < 20 * 60
)
n = len(pos)
if n > 0:
self.acs.set_latlon(
sum(a[0] for a in pos) / n, sum(a[1] for a in pos) / n
)
if int(pms.crc(msg, encode=False), 2) != 0:
continue
icao = pms.icao(msg)
if icao is None:
print(icao)
continue
ac = self.acs[icao.lower()]
df = pms.df(msg)
if df == 17 or df == 18:
# ADS-B
tc = pms.adsb.typecode(msg)
if 1 <= tc <= 4:
ac.callsign = time, msg
if 5 <= tc <= 8:
ac.surface = time, msg
if tc == 19:
ac.speed = time, msg
if 9 <= tc <= 18:
ac.position = time, msg
# if 9 <= tc <= 18:
# ac["nic_bc"] = pms.adsb.nic_b(msg)
# if (5 <= tc <= 8) or (9 <= tc <= 18) or (20 <= tc <= 22):
# ac["HPL"], ac["RCu"], ac["RCv"] = pms.adsb.nuc_p(msg)
# if (ac["ver"] == 1) and ("nic_s" in ac.keys()):
# ac["Rc"], ac["VPL"] = pms.adsb.nic_v1(msg, ac["nic_s"])
# elif (
# (ac["ver"] == 2)
# and ("nic_a" in ac.keys())
# and ("nic_bc" in ac.keys())
# ):
# ac["Rc"] = pms.adsb.nic_v2(msg, ac["nic_a"], ac["nic_bc"])
# if tc == 19:
# ac["HVE"], ac["VVE"] = pms.adsb.nuc_v(msg)
# if ac["ver"] in [1, 2]:
# ac["EPU"], ac["VEPU"] = pms.adsb.nac_v(msg)
# if tc == 29:
# ac["PE_RCu"], ac["PE_VPL"], ac["base"] = pms.adsb.sil(
# msg, ac["ver"]
# )
# ac["HFOMr"], ac["VFOMr"] = pms.adsb.nac_p(msg)
# if tc == 31:
# ac["ver"] = pms.adsb.version(msg)
# ac["HFOMr"], ac["VFOMr"] = pms.adsb.nac_p(msg)
# ac["PE_RCu"], ac["PE_VPL"], ac["sil_base"] = pms.adsb.sil(
# msg, ac["ver"]
# )
# if ac["ver"] == 1:
# ac["nic_s"] = pms.adsb.nic_s(msg)
# elif ac["ver"] == 2:
# ac["nic_a"], ac["nic_bc"] = pms.adsb.nic_a_c(msg)
elif df == 20 or df == 21:
bds = pms.bds.infer(msg)
if bds == "BDS20":
ac.bs20 = time, msg
if bds == "BDS40":
ac.bs40 = time, msg
if bds == "BDS44":
ac.bs40 = time, msg
if bds == "BDS50":
ac.bds50 = time, msg
elif bds == "BDS60":
ac.bds60 = time, msg
@property
def aircraft(self):
return sorted(
(
dict(
icao24=key,
callsign=ac.callsign,
length=len(ac.cumul),
position=ac.lat is not None,
data=ac,
)
for (key, ac) in self.acs.items()
if len(ac.cumul) > 0 and ac.callsign is not None
),
key=itemgetter("length"),
reverse=True,
)
@property
def traffic(self):
return Traffic.from_flights(
[self[elt["icao24"]] for elt in self.aircraft]
)
def __getitem__(self, icao):
df = pd.DataFrame.from_records(self.acs[icao].cumul)
return Flight(
df.assign(
callsign=df.callsign.replace("", None)
.fillna(method="ffill")
.fillna(method="bfill")
)
)
|
import cv2
import numpy as np
import os.path
path = 'C:\\SDA\\SDA_Crop';
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
for file in range(num_files):
#import image
image = cv2.imread('C:\\SDA\\SDA_Crop\\crop'+str(file+1)+'.png')
#cv2.imshow('orig',image)
#cv2.waitKey(0)
#grayscale
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
#cv2.imshow('gray',gray)
cv2.waitKey(0)
#binary
ret,thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY_INV)
#cv2.imshow('second',thresh)
cv2.waitKey(0)
#dilation
kernel = np.ones((5,10), np.uint8)
img_dilation = cv2.dilate(thresh, kernel, iterations=1)
#cv2.imshow('dilated',img_dilation)
cv2.waitKey(0)
#find contours
im2,ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#sort contours
sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])
for i, ctr in enumerate(sorted_ctrs):
# Get bounding box
x, y, w, h = cv2.boundingRect(ctr)
# Getting ROI
roi = image[y:y+h, x:x+w]
if(h<50):
# show ROI
#cv2.imwrite('segment no:'+str(i) +".jpg",roi)
cv2.rectangle(image,(x,y),( x + w, y + h ),(255,255,255),thickness=cv2.FILLED);
# cv2.drawContours(roi,ctr,-1,(90,0,255),-1);
# cv2.imwrite('D:\\pythonScripts\\temp\\segment no'+str(i) +".jpg",image)l,
#emp = np.empty(roi.shape);
#image[roi] = emp;
# cv2.imshow('roi',roi);
# print(roi);
#cv2.waitKey(0);
cv2.imwrite("C:\\SDA\\SDA_Clean\\expsign"+str(file+1)+".png",image)
#cv2.imshow('marked areas',image)
#cv2.waitKey(0)
|
#!/usr/bin/env python3
# After running SNP_Utils (https://github.com/mojaveazure/SNP_Utils), some
# SNPs will fail and not have any BLAST hits (sometimes because our identity
# threshold is very high). So, we use IPK BLAST server (with the latest morex
# reference genome) to identify the best possible hit and the associated
# physical positions.
# IMPORTANT CAVEAT: Currently, this snp uses contextual fasta sequences and
# assumes the SNP is in the middle (we have the same number of bases on
# both sides of the SNP)
import os
import sys
import math
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
from bs4 import BeautifulSoup
# IUPAC nucleotide base code: [Base, Reverse complement]
IUPAC_TABLE = {
'R': [['A', 'G'], 'Y'],
'Y': [['C', 'T'], 'R'],
'S': [['G', 'C'], 'S'],
'W': [['A', 'T'], 'W'],
'K': [['T', 'G'], 'M'],
'M': [['A', 'C'], 'K'],
'B': [['C', 'G', 'T'], 'V'],
'D': [['A', 'G', 'T'], 'H'],
'H': [['A', 'C', 'T'], 'D'],
'V': [['A', 'C', 'G'], 'B'],
'N': [['A', 'C', 'G', 'T'], 'N']
}
def split(seq_str):
"""Split string."""
temp = [char for char in seq_str]
return temp
def read_fasta(fasta_file):
"""Parse the FASTA file and keep only the sequence identifiers and
sequences in a dictionary."""
fasta_dict = {}
with open(fasta_file, "rt") as handle:
for record in SeqIO.parse(handle, "fasta"):
fasta_dict[record.id] = split(record.seq)
return fasta_dict
def read_html(blast_results_html):
"""Read in html file containing IPK BLAST search results.
Multiple SNP searches can be stored in a single HTML file."""
with open(blast_results_html) as fp:
soup = BeautifulSoup(''.join(fp), features="lxml")
return soup
def closest(idx_lst, K):
"""Identify closest position to the middle of the sequence. For the BOPA and 9K
SNPs, many of the SNPs are located in the middle of the contextual sequence, however
this is not always the case. When the SNP is not in the center of the contextual
sequence, pick the closest SNP."""
return idx_lst[min(range(len(idx_lst)), key = lambda i: abs(idx_lst[i] - K))]
def context_snp_win(fasta_dict, snp_name):
"""Generate a 7bp window for the contextual sequence that contains the
SNP. For BOPA and 9K SNPs, most of the time the SNP is in the middle
of the contextual fasta sequence. If it is not in the middle, move
left and right until we find the SNP."""
# Identify the middle of the contextual sequence
mid_context_snpidx = math.floor(len(fasta_dict[snp_name])/2)
# Generate 7bp window containing SNP
# This is case insensitive
if fasta_dict[snp_name][mid_context_snpidx].upper() not in IUPAC_TABLE.keys():
print(snp_name)
print("Middle of sequence is not the SNP, move left/right until we identify the SNP")
# Move left from center until we find SNP
for i in range(mid_context_snpidx, 0, -1):
if fasta_dict[snp_name][i] not in IUPAC_TABLE.keys():
continue
else:
# We found the SNP, save SNP index
lsnp_idx = i
break
# Move right from the center until we find SNP
for i in range(mid_context_snpidx, len(fasta_dict[snp_name])):
if fasta_dict[snp_name][i].upper() not in IUPAC_TABLE.keys():
continue
else:
# We found the SNP
rsnp_idx = i
break
# Identify the position closest to center to use as SNP
# It's possible there is no SNP left of the midpoint, check this
try:
lsnp_idx
except NameError:
print("No SNP left of midpoint")
# Since we only have a SNP right of the midpoint, use that as SNP
context_snpidx = rsnp_idx
context_seq = [''.join(fasta_dict[snp_name][context_snpidx-3:context_snpidx]), fasta_dict[snp_name][context_snpidx], ''.join(fasta_dict[snp_name][context_snpidx+1:context_snpidx+4])]
else:
try:
rsnp_idx
except NameError:
print("No SNP right of midpoint")
# Since we only have a SNP left of the midpoint, use that as SNP
context_snpidx = lsnp_idx
context_seq = [''.join(fasta_dict[snp_name][context_snpidx-3:context_snpidx]), fasta_dict[snp_name][context_snpidx], ''.join(fasta_dict[snp_name][context_snpidx+1:context_snpidx+4])]
else:
cand_snp_idx = [lsnp_idx, rsnp_idx]
context_snpidx = closest(cand_snp_idx, mid_context_snpidx)
context_seq = [''.join(fasta_dict[snp_name][context_snpidx-3:context_snpidx]), fasta_dict[snp_name][context_snpidx], ''.join(fasta_dict[snp_name][context_snpidx+1:context_snpidx+4])]
else:
# Center position is the SNP
context_snpidx = mid_context_snpidx
context_seq = [''.join(fasta_dict[snp_name][context_snpidx-3:context_snpidx]), fasta_dict[snp_name][context_snpidx], ''.join(fasta_dict[snp_name][context_snpidx+1:context_snpidx+4])]
return (context_snpidx, context_seq)
def cat_query_seq(current_hit):
"""Takes in query sequence and concatenates sequence for
easier search for SNP."""
query_idx = []
query_seq_list = []
for i, elem in enumerate(current_hit):
if elem.startswith('Query'):
# Add index to query_idx
query_idx.append(i)
# Save query sequence
query_seq_list.append(",".join(elem.split()).split(',')[2])
# Concatenate the query sequence
query_seq = "".join(query_seq_list)
return query_seq
def pick_best_hit(current_snp, context_snp_seq):
"""Takes in a single SNP, picks the best BLAST hit that also contains
the SNP in the contextual sequence, and returns data for the best
BLAST hit for the current SNP."""
# Step 1: create a list containing summaries of hits for current snp
summary = []
for counter, h in enumerate(current_snp):
# This pattern depends on the html summary table and may need to
# be modified accordingly
if h.startswith('chr'):
summary.append([counter, h.split()])
# Pick the best hit
# BLAST hits are always sorted by the best hit (highest bit score, lowest
# E-value) first. We will use this assumption to pick the best SNP
# IMPORTANT note to self: Double check this assumption!!!! And make sure
# to link to documentation that specifies this!
best_hit = summary[0]
# Step 2: split hits for current snp into sublists for easier processing
# Each SNP can have multiple "hits". "Hits" usually start with '>lcl'
# and can have multiple matches (alignments)
# Get indices for lines that start with '>lcl'
hit_idx = []
for counter, h in enumerate(current_snp):
# This '>lcl' pattern will depend on the html file and may need to
# be modified accordingly
# Feature: Add check if no pattern is found, then exit with message
#if h.startswith('>lcl'):
if h.startswith('>chr'):
hit_idx.append(counter)
# Identify ending index of current SNP
# Each SNP search ends with a few lines that starts with 'Lambda'
tmp_end = []
for i, elem in enumerate(current_snp):
if "Lambda" in elem:
tmp_end.append(i)
# There are two 'Lambda' patterns that usually show up, we only need
# the index of the first occurrence of the pattern
snp_idx_end = tmp_end[0]
# For current SNP, split into sublists based on lines starting with '>lcl'
last_idx = len(hit_idx) - 1
snp_hit_split = []
for i in range(0, len(hit_idx)):
if i < last_idx:
current_hit = current_snp[hit_idx[i]:hit_idx[i+1]]
snp_hit_split.append(current_hit)
else:
# If last index, that means this is the last match for this
# SNP, use an ending index for the pattern 'Lambda'
current_hit = current_snp[hit_idx[i]:snp_idx_end]
snp_hit_split.append(current_hit)
# Step 3: Split multiple matches for each "hit" and store in list of lists
score_idx = []
for i, elem in enumerate(snp_hit_split[0]):
if elem.startswith(' Score'):
score_idx.append(i)
# For each chr, the best hits are also sorted by E-value by default
# So, we can create a range of indices to process
# We will pick the best hit that contains the SNP
log = []
for i, elem in enumerate(score_idx):
# If elem is not the last index
if elem != score_idx[-1]:
tmp_query_seq = cat_query_seq(snp_hit_split[0][score_idx[i]:score_idx[i+1]])
log.append(''.join(context_snp_seq) in tmp_query_seq.upper())
# Exit out of loop as soon as we find first query seq that contains the context seq
# again, the BLAST results are sorted from best hit to worst hit so this works
if ''.join(context_snp_seq) in tmp_query_seq.upper():
score_start_idx = score_idx[i]
score_end_idx = score_idx[i+1]
break
else:
# We are at the last element or have only one hit
# for this chromosome. Go until the end
tmp_query_seq = cat_query_seq(snp_hit_split[0][score_idx[i]:])
log.append(''.join(context_snp_seq) in tmp_query_seq.upper())
# Exit out of loop as soon as we find first query seq that contains the context seq
# again, the BLAST results are sorted from best hit to worst hit so this works
if ''.join(context_snp_seq) in tmp_query_seq.upper():
score_start_idx = score_idx[i]
score_end_idx = len(snp_hit_split[0]) - 1
break
# Check if we found the context_snp_seq in at least one of the tmp_query_seq
if True in log:
# Save chr and length info first
best_hit = snp_hit_split[0][0:2]
# Then add best hit containing SNP
best_hit.extend(snp_hit_split[0][score_start_idx:score_end_idx])
else:
print("Cannot resolve SNP:", current_snp[0].split()[1])
unresolved = ["NoHit", current_snp[0].split()[1]]
best_hit = '\t'.join(unresolved)
# if len(score_idx) > 1:
# best_hit.append(snp_hit_split[0][score_start_idx:score_end_idx])
# else:
# # If we only have one hit for this chromosome,
# # # go until the end
# best_hit.append(snp_hit_split[0][score_idx[0]:])
return best_hit
def plus_plus_strand(query_snp, ref_allele, iupac_table):
if query_snp not in ['A', 'C', 'T', 'G']:
if query_snp in ['B', 'D', 'H', 'V']:
print("We have more than 1 alternate alleles")
# For alt allele, use the one that is not the ref allele
alt_allele = []
for i, elem in enumerate(iupac_table[query_snp][0]):
# We only consider 2 nucleotides case
if elem != ref_allele:
alt_allele.append(elem)
else:
# For alt allele, use the one that is not the ref allele
for i, elem in enumerate(iupac_table[query_snp][0]):
# We only consider 2 nucleotides case
if elem != ref_allele:
alt_allele = elem
return alt_allele
def plus_minus_strand(query_snp, ref_allele, iupac_table):
if query_snp not in ['A', 'C', 'T', 'G']:
if query_snp in ['B', 'D', 'H', 'V']:
print("We have more than 1 alternate alleles")
# For alt allele, use the one that is not the ref allele
alt_allele = []
for i, elem in enumerate(iupac_table[query_snp][0]):
# We only consider 2 nucleotides case
if elem != ref_allele:
alt_allele.append(elem)
# print("Ref allele:", ref_allele)
# print("Alt allele:", alt_allele)
else:
# For alt allele, use the one that is not the ref allele
for i, elem in enumerate(iupac_table[query_snp][0]):
# We only consider 2 nucleotides case
if elem != ref_allele:
alt_allele = elem
# print("Ref allele:", ref_allele)
# print("Alt allele:", alt_allele)
# Now, take the reverse complement
rc_ref_allele = Seq(ref_allele, generic_dna).reverse_complement()[0]
# print("Rev comp ref allele:", rc_ref_allele)
rc_alt_allele = Seq(alt_allele, generic_dna).reverse_complement()[0]
# print("Rev comp alt allele:", rc_alt_allele)
return (rc_ref_allele, rc_alt_allele)
def extract_info(snp_name, current_best_hit, fasta_dict):
""" """
print(snp_name)
# Save chromosome info
chrom = current_best_hit[0].strip('>').strip()
# Save percent identity info
#identity = current_best_hit[3].strip().split(',')[0].split(' ')[3]
# Let's get a list of indices in best hit where the line start with
# "Query" or "Sbjct"
query_idx = []
sbjct_idx = []
query_seq_list = []
sbjct_seq_list = []
for i, elem in enumerate(current_best_hit):
# Let's store info about the % identity and strand
if elem.startswith(' Identities'):
tmp_idt = elem
if elem.startswith(' Strand'):
tmp_strand = elem
if elem.startswith('Query'):
# Add index to query_idx
query_idx.append(i)
# Save query sequence
query_seq_list.append(",".join(elem.split()).split(',')[2])
if elem.startswith('Sbjct'):
# Add index to sbjct_idx
sbjct_idx.append(i)
# Save subject sequence
sbjct_seq_list.append(",".join(elem.split()).split(',')[2])
# Pull out relevant parts and save for later
chrom = current_best_hit[0].strip('>').strip()
idt = tmp_idt.strip().split(',')[0].replace(" ", "")
strand = tmp_strand.split('=')[1]
info_field = "".join(["B;", idt, ",failed"])
qual = str('.')
filter_field = str('.')
# Concatenate the sequences
query_seq = "".join(query_seq_list).upper()
sbjct_seq = "".join(sbjct_seq_list).upper()
# Keep starting position of subject (ref)
sbjct_start_pos = ",".join(current_best_hit[sbjct_idx[0]].split()).split(',')[1]
# Identify the contextual sequence surrounding SNP
# Example format is: ['TAT', 'Y', 'GTG']
# where middle element in list is the SNP
context_seq = fasta_dict[snp_name][1]
# Find the physical position of the SNP in the reference
# This assumes Plus/Plus strand
if strand == "Plus/Plus":
if query_seq.find(''.join(context_seq)) != -1:
# Return the leftmost index of the context_seq + context_seq left of SNP
qsnp_idx = query_seq.find(''.join(context_seq)) + len(context_seq[0])
query_snp = query_seq[qsnp_idx]
# Now, we have the index for the SNP
# Let's get the associated position in the reference (Sbjct)
ref_allele = sbjct_seq[qsnp_idx]
if ref_allele == "-":
print("Reference has an insertion, please manually remove this SNP:", snp_name, "\n")
print("Contextual sequence: ", context_seq)
# Count number of indels that occur prior to SNP
num_indels = sbjct_seq[:qsnp_idx].count('-')
# To get the correct reference position, we need to subtract the number of indels
# that occur prior to the SNP
subject_pos = int(sbjct_start_pos) + qsnp_idx - num_indels
# Identify alternate allele
alt_allele = plus_plus_strand(query_snp, ref_allele, IUPAC_TABLE)
else:
print("Could not resolve position for SNP", snp_name, "\n")
# Add code here to save SNP to log file
elif strand == "Plus/Minus":
# print("coordinates are reversed")
if query_seq.find(''.join(context_seq)) != -1:
# Return the leftmost index of the context_seq + context_seq left of SNP
qsnp_idx = query_seq.find(''.join(context_seq)) + len(context_seq[0])
query_snp = query_seq[qsnp_idx]
# Now, we have the index for the SNP
# Let's get the associated position in the reference (Sbjct)
ref_allele = sbjct_seq[qsnp_idx]
# print("Ref", ref_allele)
# Count number of indels that occur prior to SNP
num_indels = sbjct_seq[:qsnp_idx].count('-')
# To get the correct reference position, we need to add the number of indels
# due to Plus/Minus strand
subject_pos = int(sbjct_start_pos) - qsnp_idx + num_indels
# Identify alternate allele and take reverse complement
ref_allele, alt_allele = plus_minus_strand(query_snp, ref_allele, IUPAC_TABLE)
# print("RC Ref", ref_allele)
# print("RC Alt", alt_allele)
else:
print("Strand is Minus/Plus for SNP", snp_name, "\n")
# Save VCF line
return [chrom, str(subject_pos), snp_name, ref_allele, alt_allele, qual, filter_field, info_field]
def main(FASTA, BLAST_RESULTS_HTML, OUT_FILE):
"""Driver function."""
# Read in fasta file
fasta_dict = read_fasta(os.path.expanduser(FASTA))
# Read in HTML file that contains IPK BLAST search results
# where the HTML contains search results for one or more SNP searches
soup = read_html(os.path.expanduser(BLAST_RESULTS_HTML))
# Process soup object and get it into a workable data structure.
# Extract text
text = soup.get_text()
# Split by newline delimiter
content = text.split('\n')
# Identify start and end indices of SNPs (elements starting with 'Query=')
start_idx = [i for i, j in enumerate(content) if j.startswith('Query=')]
end_idx = [i for i, j in enumerate(content) if j.startswith('Effective search space used:')]
# Create list of lists for each SNP
l = []
for i in range(0, len(start_idx)):
tmp = content[start_idx[i]:end_idx[i]+1]
l.append(tmp)
# Generate 7bp windows of contextual sequence containing the SNP
fasta_win_dict = {}
for i, elem in enumerate(fasta_dict.keys()):
tmp_context_snpidx, tmp_context_seq = context_snp_win(fasta_dict, elem)
fasta_win_dict[elem] = [tmp_context_snpidx, tmp_context_seq]
print("Picking best hit for each SNP...")
# Start from clean log file, check if file exists
log_dir = os.path.dirname(OUT_FILE)
temp = [log_dir, "/unresolved_snps.log"]
log_filename = ''.join(temp)
if os.path.exists(os.path.expanduser(log_filename)):
os.remove(os.path.expanduser(log_filename))
# Pick the best hit for each SNP using the pick_best_hit function
bhs = {}
for i, elem in enumerate(l):
# Save current snp name
csnp_name = elem[0].split()[1]
print(csnp_name)
# "No hits found" issue during IPK BLAST search
if "No hits found" in elem[5]:
print("No hits found in IPK BLAST search for SNP: " + csnp_name)
# Save SNP to log file
with open(os.path.expanduser(log_filename), 'a') as f:
f.write(csnp_name + " - No hits found in IPK BLAST search" + "\n")
else:
# Probably need to store the output from pick_best_hist somehow
cbhs = pick_best_hit(current_snp=elem, context_snp_seq=fasta_win_dict[csnp_name][1])
# "NoHit" from not being able to resolve SNP, but there was an IPK BLAST search result
if "NoHit" in cbhs:
print("NoHit")
# Save SNP to log file
with open(os.path.expanduser(log_filename), 'a') as f:
f.write(cbhs + "\n")
else:
# Add new dictionary key,value pair
bhs[csnp_name] = cbhs
# Start from clean file, check if file exists
if os.path.exists(os.path.expanduser(OUT_FILE)):
os.remove(os.path.expanduser(OUT_FILE))
# Extract info from best hit and save to file
print("Extracting relevant info...")
with open(os.path.expanduser(OUT_FILE), 'a') as f:
# Add header line
f.write("\t".join(["#CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO"]) + "\n")
for i, elem in enumerate(bhs):
vcf_line = extract_info(snp_name=elem, current_best_hit=bhs[elem], fasta_dict=fasta_win_dict)
# Save VCF line to file
f.write("\t".join(vcf_line) + "\n")
print("Done.")
return
main(sys.argv[1], sys.argv[2], sys.argv[3]) # Run the program
|
import sys
import math
print(type(1))
print(isinstance(1, int))
print(1+1)
print(1+1.)
print(float(2))
print(int(2.5))
print(int(-2.5))
print(11/2)
print(11//2)
print(-11//2)
print(11.//2)
print(11**2)
print(11%2)
print(math.pi)
print(math.sin(5))
print(math.tan(math.pi/4))
def is_it_true(anything):
if anything:
print("yes,it`s true")
else:
print("no,it`s false")
is_it_true(1)
aList = [5,6,7,8,9,20]
print(aList)
aList += [2,200]
print(aList)
aList.append(True)
print(aList)
aList.extend(["ddd","sax"])
print(aList)
aList.insert(0,"sdf")
print(aList)
del aList[0]
print(aList)
aList.append(6)
aList.remove(6)
print(aList)
aList.remove(6)
print(aList)
print(aList.pop())
a_tuple = (1,2,3,4,5)
print(a_tuple)
print(a_tuple[-1])
(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
print(MONDAY)
|
userAgent = ''
cID = ''
cSC = ''
userN = ''
userP = ''
|
from __future__ import division
import serial
import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import cPickle as pickle
from os.path import join
import os
import csv
import time
from hercubit import settings
from hercubit.device import sensor_stream
from ast import literal_eval
# filename=join("labeled_data",raw_input("name this training set: "))
samples=[]
# else: yield data_gen()
# data_gen.t = 0
userName="Morgan"
# userName= raw_input("userName: ")
# for i in sensors:
exerciseType="bicep curls"
#set up maximum allowed time for exercise
max_time=25
fig, ax = plt.subplots(3, sharex=True)
# fig.set_size_inches(10.5,10.5)
fig.subplots_adjust(hspace=.10)
plots={'accel':ax[0],'gyro':ax[1],'magnet':ax[2]}
#Setup lines in graph
sensors=('accel','gyro','magnet')
axes=('x','y','z')
colors=('-r','-g','-b')
lines={}
for sensor in sensors:
lines[sensor]={}
for axis in axes:
i=axes.index(axis)
# print sensor+"("+axis+") : "+colors[i]
lines[sensor][axis]=ax[sensors.index(sensor)].plot([], [],colors[i], lw=1)
for i in range(len(axes)):
ax[i].grid()
ax[i].set_xlim(0, max_time)
ax[0].set_ylim(-2, 2)
ax[1].set_ylim(-30000, 30000)
ax[2].set_ylim(-2000, 2000)
ax[0].set_ylabel('acceleration (g)')
ax[1].set_ylabel('gyro (degrees/sec)')
ax[2].set_ylabel('magnetometer')
ax[2].set_xlabel('time (s)')
tdata=[]
all_data={}
for sensor in sensors:
all_data[sensor]={'x':[],'y':[],'z':[]}
def save(all_data,tdata):
'''Save csv and png of sampled data
'''
global dirname, sensors, axes, userName,ser
global filename
# variables for saving
now = time.strftime("%Y-%m-%d__%H-%M-%S")
dirname=join("saved_animations_and_data",now+"_"+exerciseType+"_"+userName)
filename=now+"_"+exerciseType
os.mkdir(dirname)
time.sleep(.5)
#samples should equal list of tuples of the data
samples=[]
for i in range(len(tdata)):
row = [userName,exerciseType,0]
row.append(tdata[i])
for sensor in sensors:
for axis in axes:
row.append(all_data[sensor][axis][i])
samples.append(row)
# try:
# except:
# pass
picpath=join(dirname,exerciseType+".png")
plt.savefig(picpath,dpi=200)
pickle.dump(samples,open(os.path.join(dirname,filename+".p"),"wb"))
with open(join(dirname,filename+".csv"),"wb") as csvFile:
writer=csv.writer(csvFile)
writer.writerow(['User','exerciseType','rep_count',"t (sec)","acc_x","acc_y","acc_z","gyro_x","gyro_y","gyro_z","magnet_x","magnet_y","magnet_z"]) #header
for i in samples:
writer.writerow((i))
#save in data directory for fast adding to training set
with open(join('data',"new_training.csv"),"wb") as csvFile:
writer=csv.writer(csvFile)
writer.writerow(['User','exerciseType','rep_count',"t (sec)","acc_x","acc_y","acc_z","gyro_x","gyro_y","gyro_z","magnet_x","magnet_y","magnet_z"]) #header
for i in samples:
writer.writerow((i))
# ser.close()
quit()
t0=0
def run(data):
global t0, lines, test
if t0==0: t0=time.time()
# data=literal_eval(data)
print data
#override t to be count of seconds
t=time.time()-t0
tdata.append(t)
for sensor in all_data:
for axis in all_data[sensor]:
if axis=="x":i=0
if axis=="y":i=1
if axis=="z":i=2
# print sensor+" ("+axis+")"
all_data[sensor][axis].append(data[sensor][i])
lines[sensor][axis][0].set_data(tdata, all_data[sensor][axis])
# all_lines=[[axis for axis in sensor.values()] for sensor in lines.values()]
# test=1
#MOVING WINDOW
xmin, xmax = ax[0].get_xlim()
if t>=max_time:
# Disable save for submission to Data Mining 290
save(all_data,tdata)
# quit()
return None
# print list(plt.xticks()[0])
# new_ticks=range(max_time*2)
# for i in range(len(new_ticks)):
# new_ticks[i]= new_ticks[i]/2
# plt.xticks(list(plt.xticks()[0]) + new_ticks)
from hercubit.device import connect
ser,conn_type=connect()
def gen():
global ser, conn_type
g=sensor_stream(ser,conn_type)
while True:
yield g.next()
ani = animation.FuncAnimation(fig, run, gen, blit=False, interval=100, repeat=False)
plt.show()
|
#!/usr/bin/env python
"""
An example client / server of xmlrpc transport with python.
You could have the server using one version of python and the client using another version of python (within reason : maybe not with Python 1.0 and Python3000...).
To use in it's current simple form:
1) start server by having "if 1:" under __main__
2) start client by having "if 0:" under __main__
"""
import os, sys
server_hostname = "192.168.1.25"
server_port = 23459
class Some_Class_We_Want_Remotely_Accessible:
""" Awesome Class which does awesome stuff.
"""
def __init__(self, important_parameter=123):
self.important_parameter = important_parameter
def some_method(self, passed_value):
print 'important_parameter=', self.important_parameter
print 'passed_value=', passed_value
if __name__ == '__main__':
if 1:
# server:
import SimpleXMLRPCServer
server = SimpleXMLRPCServer.SimpleXMLRPCServer( \
(server_hostname, server_port), \
allow_none=True)
server.register_instance( \
Some_Class_We_Want_Remotely_Accessible(important_parameter=1))
server.register_multicall_functions()
server.register_introspection_functions()
print 'XMLRPC Server is starting at:', server_hostname, server_port
server.serve_forever()
else:
# client:
import xmlrpclib
server = xmlrpclib.ServerProxy("http://%s:%d" % \
(server_hostname, server_port))
try:
print server.system.listMethods()
except:
print 'EXCEPT at server.system.listMethods() : Probably XMLRPC server is down!'
sys.exit()
print server.system.methodHelp("some_method")
#src_list = server.get_sources_for_radec(ra, dec, box_range)
src_list = server.some_method('hello')
|
# 1
#
# a = float(input('Digite a medida do lado A: '))
# b = float(input('Digite a medida do lado B: '))
# c = float(input('Digite a medida do lado C: '))
#
# if a < b + c and b < a + c and c < a + b:
# print('Os valores A, B e C podem formar um triângulo.')
#
# if a != b != c != a:
# print('Estes valores formam um triângulo ESCALENO')
# elif a == b == c:
# print('Estes valores formam um triângulo EQUILÁTERO')
# else:
# print('Estes valores formam um triângulo ISÓSCELES')
#
# else:
# print('Não é possível formar um triângulo com esses valores.')
#
# 2
#
# ano = int(input('Informe o ano que gostaria de saber se é ou não bissexto: '))
#
# if ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:
# print(f'O ano {ano} é bissexto.')
# else:
# print(f'O ano {ano} não é bissexto.')
#
# 3
#
# peso = float(input('Digite o peso dos peixes em quilogramas: '))
#
# if peso > 50:
# excesso = peso - 50
# multa = excesso * 4
#
# else:
# excesso = 0
# multa = 0
#
# print(f'João Papo-de-Pescador excedeu {excesso} kg, o valor da multa será de R${multa:.2f}')
#
# 4
#
# n1 = int(input('Digite o primeiro número: '))
# n2 = int(input('Digite o segundo número: '))
# n3 = int(input('Digite o terceiro número: '))
#
# if n1 > n2 and n1 > n3:
# print(f'O maior é: {n1}')
#
# if n2 > n1 and n2 > n3:
# print(f'O maior é: {n2}')
#
# if n3 > n1 and n3 > n1:
# print(f'O maior é: {n3}')
#
# 5
#
# n1 = int(input('Digite o primeiro número: '))
# n2 = int(input('Digite o segundo número: '))
# n3 = int(input('Digite o terceiro número: '))
# menor = n1
# maior = n1
#
# if n2 < n1 and n2 < n3:
# menor = n2
#
# if n3 < n1 and n3 < n2:
# menor = n3
#
# if n2 > n1 and n2 > n3:
# maior = n2
#
# if n3 > n1 and n3 > n2:
# maior = n3
#
# print(f'O menor valor digitado foi {menor}')
# print(f'O maior valor digitado foi {maior}')
#
# 6
#
# salario_hora = float(input('Quanto você ganha por hora: '))
# hora_mes = float(input('Quantas horas trabalhadas no mês: '))
#
# salario_bruto = salario_hora * hora_mes
# imposto_renda = salario_bruto * 0.11
# inss = salario_bruto * 0.08
# sindicato = salario_bruto * 0.05
# descontos = imposto_renda + inss + sindicato
# salario_liquido = salario_bruto - descontos
#
# print(f'A) + Salário Bruto: R${salario_bruto:.2f}')
# print(f'B) - IR (11%): R${imposto_renda:.2f}')
# print(f'C) - INSS (8%): R${inss:.2f}')
# print(f'D) - Sindicato (5%): R${sindicato:.2f}')
# print(f'E) = Salário Líquido: R${salario_liquido:.2f}')
#
# 7
#
# area = int(input('Qual o tamanho em m² da área a ser pintada: '))
# tinta = area / 3
#
# if area % 54 != 0:
# latas = int(area / 54) + 1
# else:
# latas = area / 54
#
# custo = latas * 80
#
# print(f'Você precisará de {latas} lata(s), irá custar R${custo:.2f}')
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
class NfsExportCreateParams(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
NfsExportCreateParams - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'all_dirs': 'bool',
'block_size': 'int',
'can_set_time': 'bool',
'case_insensitive': 'bool',
'case_preserving': 'bool',
'chown_restricted': 'bool',
'clients': 'list[str]',
'commit_asynchronous': 'bool',
'description': 'str',
'directory_transfer_size': 'int',
'encoding': 'str',
'link_max': 'int',
'map_all': 'NfsExportMapAll',
'map_failure': 'NfsExportMapAll',
'map_full': 'bool',
'map_lookup_uid': 'bool',
'map_non_root': 'NfsExportMapAll',
'map_retry': 'bool',
'map_root': 'NfsExportMapAll',
'max_file_size': 'int',
'name_max_size': 'int',
'no_truncate': 'bool',
'paths': 'list[str]',
'read_only': 'bool',
'read_only_clients': 'list[str]',
'read_transfer_max_size': 'int',
'read_transfer_multiple': 'int',
'read_transfer_size': 'int',
'read_write_clients': 'list[str]',
'readdirplus': 'bool',
'readdirplus_prefetch': 'int',
'return_32bit_file_ids': 'bool',
'root_clients': 'list[str]',
'security_flavors': 'list[str]',
'setattr_asynchronous': 'bool',
'snapshot': 'str',
'symlinks': 'bool',
'time_delta': 'float',
'write_datasync_action': 'str',
'write_datasync_reply': 'str',
'write_filesync_action': 'str',
'write_filesync_reply': 'str',
'write_transfer_max_size': 'int',
'write_transfer_multiple': 'int',
'write_transfer_size': 'int',
'write_unstable_action': 'str',
'write_unstable_reply': 'str',
'zone': 'str'
}
self.attribute_map = {
'all_dirs': 'all_dirs',
'block_size': 'block_size',
'can_set_time': 'can_set_time',
'case_insensitive': 'case_insensitive',
'case_preserving': 'case_preserving',
'chown_restricted': 'chown_restricted',
'clients': 'clients',
'commit_asynchronous': 'commit_asynchronous',
'description': 'description',
'directory_transfer_size': 'directory_transfer_size',
'encoding': 'encoding',
'link_max': 'link_max',
'map_all': 'map_all',
'map_failure': 'map_failure',
'map_full': 'map_full',
'map_lookup_uid': 'map_lookup_uid',
'map_non_root': 'map_non_root',
'map_retry': 'map_retry',
'map_root': 'map_root',
'max_file_size': 'max_file_size',
'name_max_size': 'name_max_size',
'no_truncate': 'no_truncate',
'paths': 'paths',
'read_only': 'read_only',
'read_only_clients': 'read_only_clients',
'read_transfer_max_size': 'read_transfer_max_size',
'read_transfer_multiple': 'read_transfer_multiple',
'read_transfer_size': 'read_transfer_size',
'read_write_clients': 'read_write_clients',
'readdirplus': 'readdirplus',
'readdirplus_prefetch': 'readdirplus_prefetch',
'return_32bit_file_ids': 'return_32bit_file_ids',
'root_clients': 'root_clients',
'security_flavors': 'security_flavors',
'setattr_asynchronous': 'setattr_asynchronous',
'snapshot': 'snapshot',
'symlinks': 'symlinks',
'time_delta': 'time_delta',
'write_datasync_action': 'write_datasync_action',
'write_datasync_reply': 'write_datasync_reply',
'write_filesync_action': 'write_filesync_action',
'write_filesync_reply': 'write_filesync_reply',
'write_transfer_max_size': 'write_transfer_max_size',
'write_transfer_multiple': 'write_transfer_multiple',
'write_transfer_size': 'write_transfer_size',
'write_unstable_action': 'write_unstable_action',
'write_unstable_reply': 'write_unstable_reply',
'zone': 'zone'
}
self._all_dirs = None
self._block_size = None
self._can_set_time = None
self._case_insensitive = None
self._case_preserving = None
self._chown_restricted = None
self._clients = None
self._commit_asynchronous = None
self._description = None
self._directory_transfer_size = None
self._encoding = None
self._link_max = None
self._map_all = None
self._map_failure = None
self._map_full = None
self._map_lookup_uid = None
self._map_non_root = None
self._map_retry = None
self._map_root = None
self._max_file_size = None
self._name_max_size = None
self._no_truncate = None
self._paths = None
self._read_only = None
self._read_only_clients = None
self._read_transfer_max_size = None
self._read_transfer_multiple = None
self._read_transfer_size = None
self._read_write_clients = None
self._readdirplus = None
self._readdirplus_prefetch = None
self._return_32bit_file_ids = None
self._root_clients = None
self._security_flavors = None
self._setattr_asynchronous = None
self._snapshot = None
self._symlinks = None
self._time_delta = None
self._write_datasync_action = None
self._write_datasync_reply = None
self._write_filesync_action = None
self._write_filesync_reply = None
self._write_transfer_max_size = None
self._write_transfer_multiple = None
self._write_transfer_size = None
self._write_unstable_action = None
self._write_unstable_reply = None
self._zone = None
@property
def all_dirs(self):
"""
Gets the all_dirs of this NfsExportCreateParams.
If true, all directories under the specified paths are mountable.
:return: The all_dirs of this NfsExportCreateParams.
:rtype: bool
"""
return self._all_dirs
@all_dirs.setter
def all_dirs(self, all_dirs):
"""
Sets the all_dirs of this NfsExportCreateParams.
If true, all directories under the specified paths are mountable.
:param all_dirs: The all_dirs of this NfsExportCreateParams.
:type: bool
"""
self._all_dirs = all_dirs
@property
def block_size(self):
"""
Gets the block_size of this NfsExportCreateParams.
The block size returned by the NFS STATFS procedure.
:return: The block_size of this NfsExportCreateParams.
:rtype: int
"""
return self._block_size
@block_size.setter
def block_size(self, block_size):
"""
Sets the block_size of this NfsExportCreateParams.
The block size returned by the NFS STATFS procedure.
:param block_size: The block_size of this NfsExportCreateParams.
:type: int
"""
self._block_size = block_size
@property
def can_set_time(self):
"""
Gets the can_set_time of this NfsExportCreateParams.
If true, the client may set file times using the NFS SETATTR request. This option is advisory and the server always behaves as if it is true.
:return: The can_set_time of this NfsExportCreateParams.
:rtype: bool
"""
return self._can_set_time
@can_set_time.setter
def can_set_time(self, can_set_time):
"""
Sets the can_set_time of this NfsExportCreateParams.
If true, the client may set file times using the NFS SETATTR request. This option is advisory and the server always behaves as if it is true.
:param can_set_time: The can_set_time of this NfsExportCreateParams.
:type: bool
"""
self._can_set_time = can_set_time
@property
def case_insensitive(self):
"""
Gets the case_insensitive of this NfsExportCreateParams.
If true, the server will report that it ignores case for file names.
:return: The case_insensitive of this NfsExportCreateParams.
:rtype: bool
"""
return self._case_insensitive
@case_insensitive.setter
def case_insensitive(self, case_insensitive):
"""
Sets the case_insensitive of this NfsExportCreateParams.
If true, the server will report that it ignores case for file names.
:param case_insensitive: The case_insensitive of this NfsExportCreateParams.
:type: bool
"""
self._case_insensitive = case_insensitive
@property
def case_preserving(self):
"""
Gets the case_preserving of this NfsExportCreateParams.
If true, the server will report that it always preserves case for file names.
:return: The case_preserving of this NfsExportCreateParams.
:rtype: bool
"""
return self._case_preserving
@case_preserving.setter
def case_preserving(self, case_preserving):
"""
Sets the case_preserving of this NfsExportCreateParams.
If true, the server will report that it always preserves case for file names.
:param case_preserving: The case_preserving of this NfsExportCreateParams.
:type: bool
"""
self._case_preserving = case_preserving
@property
def chown_restricted(self):
"""
Gets the chown_restricted of this NfsExportCreateParams.
If true, the server will report that only the superuser may change file ownership.
:return: The chown_restricted of this NfsExportCreateParams.
:rtype: bool
"""
return self._chown_restricted
@chown_restricted.setter
def chown_restricted(self, chown_restricted):
"""
Sets the chown_restricted of this NfsExportCreateParams.
If true, the server will report that only the superuser may change file ownership.
:param chown_restricted: The chown_restricted of this NfsExportCreateParams.
:type: bool
"""
self._chown_restricted = chown_restricted
@property
def clients(self):
"""
Gets the clients of this NfsExportCreateParams.
Clients that have access to the export.
:return: The clients of this NfsExportCreateParams.
:rtype: list[str]
"""
return self._clients
@clients.setter
def clients(self, clients):
"""
Sets the clients of this NfsExportCreateParams.
Clients that have access to the export.
:param clients: The clients of this NfsExportCreateParams.
:type: list[str]
"""
self._clients = clients
@property
def commit_asynchronous(self):
"""
Gets the commit_asynchronous of this NfsExportCreateParams.
If true, allows NFS commit requests to execute asynchronously.
:return: The commit_asynchronous of this NfsExportCreateParams.
:rtype: bool
"""
return self._commit_asynchronous
@commit_asynchronous.setter
def commit_asynchronous(self, commit_asynchronous):
"""
Sets the commit_asynchronous of this NfsExportCreateParams.
If true, allows NFS commit requests to execute asynchronously.
:param commit_asynchronous: The commit_asynchronous of this NfsExportCreateParams.
:type: bool
"""
self._commit_asynchronous = commit_asynchronous
@property
def description(self):
"""
Gets the description of this NfsExportCreateParams.
A human readable description of the export.
:return: The description of this NfsExportCreateParams.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this NfsExportCreateParams.
A human readable description of the export.
:param description: The description of this NfsExportCreateParams.
:type: str
"""
self._description = description
@property
def directory_transfer_size(self):
"""
Gets the directory_transfer_size of this NfsExportCreateParams.
The preferred size for directory read operations. This option is advisory.
:return: The directory_transfer_size of this NfsExportCreateParams.
:rtype: int
"""
return self._directory_transfer_size
@directory_transfer_size.setter
def directory_transfer_size(self, directory_transfer_size):
"""
Sets the directory_transfer_size of this NfsExportCreateParams.
The preferred size for directory read operations. This option is advisory.
:param directory_transfer_size: The directory_transfer_size of this NfsExportCreateParams.
:type: int
"""
self._directory_transfer_size = directory_transfer_size
@property
def encoding(self):
"""
Gets the encoding of this NfsExportCreateParams.
The character encoding of clients connecting to the export.
:return: The encoding of this NfsExportCreateParams.
:rtype: str
"""
return self._encoding
@encoding.setter
def encoding(self, encoding):
"""
Sets the encoding of this NfsExportCreateParams.
The character encoding of clients connecting to the export.
:param encoding: The encoding of this NfsExportCreateParams.
:type: str
"""
self._encoding = encoding
@property
def link_max(self):
"""
Gets the link_max of this NfsExportCreateParams.
The reported maximum number of links to a file.
:return: The link_max of this NfsExportCreateParams.
:rtype: int
"""
return self._link_max
@link_max.setter
def link_max(self, link_max):
"""
Sets the link_max of this NfsExportCreateParams.
The reported maximum number of links to a file.
:param link_max: The link_max of this NfsExportCreateParams.
:type: int
"""
self._link_max = link_max
@property
def map_all(self):
"""
Gets the map_all of this NfsExportCreateParams.
User and group mapping.
:return: The map_all of this NfsExportCreateParams.
:rtype: NfsExportMapAll
"""
return self._map_all
@map_all.setter
def map_all(self, map_all):
"""
Sets the map_all of this NfsExportCreateParams.
User and group mapping.
:param map_all: The map_all of this NfsExportCreateParams.
:type: NfsExportMapAll
"""
self._map_all = map_all
@property
def map_failure(self):
"""
Gets the map_failure of this NfsExportCreateParams.
User and group mapping.
:return: The map_failure of this NfsExportCreateParams.
:rtype: NfsExportMapAll
"""
return self._map_failure
@map_failure.setter
def map_failure(self, map_failure):
"""
Sets the map_failure of this NfsExportCreateParams.
User and group mapping.
:param map_failure: The map_failure of this NfsExportCreateParams.
:type: NfsExportMapAll
"""
self._map_failure = map_failure
@property
def map_full(self):
"""
Gets the map_full of this NfsExportCreateParams.
If true, user mappings queries the OneFS user database. If false, only local authentication is queried.
:return: The map_full of this NfsExportCreateParams.
:rtype: bool
"""
return self._map_full
@map_full.setter
def map_full(self, map_full):
"""
Sets the map_full of this NfsExportCreateParams.
If true, user mappings queries the OneFS user database. If false, only local authentication is queried.
:param map_full: The map_full of this NfsExportCreateParams.
:type: bool
"""
self._map_full = map_full
@property
def map_lookup_uid(self):
"""
Gets the map_lookup_uid of this NfsExportCreateParams.
If true, incoming UIDs are mapped to users in the OneFS user database. If false, incoming UIDs are applied directly to file operations.
:return: The map_lookup_uid of this NfsExportCreateParams.
:rtype: bool
"""
return self._map_lookup_uid
@map_lookup_uid.setter
def map_lookup_uid(self, map_lookup_uid):
"""
Sets the map_lookup_uid of this NfsExportCreateParams.
If true, incoming UIDs are mapped to users in the OneFS user database. If false, incoming UIDs are applied directly to file operations.
:param map_lookup_uid: The map_lookup_uid of this NfsExportCreateParams.
:type: bool
"""
self._map_lookup_uid = map_lookup_uid
@property
def map_non_root(self):
"""
Gets the map_non_root of this NfsExportCreateParams.
User and group mapping.
:return: The map_non_root of this NfsExportCreateParams.
:rtype: NfsExportMapAll
"""
return self._map_non_root
@map_non_root.setter
def map_non_root(self, map_non_root):
"""
Sets the map_non_root of this NfsExportCreateParams.
User and group mapping.
:param map_non_root: The map_non_root of this NfsExportCreateParams.
:type: NfsExportMapAll
"""
self._map_non_root = map_non_root
@property
def map_retry(self):
"""
Gets the map_retry of this NfsExportCreateParams.
Determines whether lookups for users specified in map_all, map_root or map_nonroot are retried if the look fails.
:return: The map_retry of this NfsExportCreateParams.
:rtype: bool
"""
return self._map_retry
@map_retry.setter
def map_retry(self, map_retry):
"""
Sets the map_retry of this NfsExportCreateParams.
Determines whether lookups for users specified in map_all, map_root or map_nonroot are retried if the look fails.
:param map_retry: The map_retry of this NfsExportCreateParams.
:type: bool
"""
self._map_retry = map_retry
@property
def map_root(self):
"""
Gets the map_root of this NfsExportCreateParams.
User and group mapping.
:return: The map_root of this NfsExportCreateParams.
:rtype: NfsExportMapAll
"""
return self._map_root
@map_root.setter
def map_root(self, map_root):
"""
Sets the map_root of this NfsExportCreateParams.
User and group mapping.
:param map_root: The map_root of this NfsExportCreateParams.
:type: NfsExportMapAll
"""
self._map_root = map_root
@property
def max_file_size(self):
"""
Gets the max_file_size of this NfsExportCreateParams.
The maximum file size in the export.
:return: The max_file_size of this NfsExportCreateParams.
:rtype: int
"""
return self._max_file_size
@max_file_size.setter
def max_file_size(self, max_file_size):
"""
Sets the max_file_size of this NfsExportCreateParams.
The maximum file size in the export.
:param max_file_size: The max_file_size of this NfsExportCreateParams.
:type: int
"""
self._max_file_size = max_file_size
@property
def name_max_size(self):
"""
Gets the name_max_size of this NfsExportCreateParams.
The reported maximum length of a file name.
:return: The name_max_size of this NfsExportCreateParams.
:rtype: int
"""
return self._name_max_size
@name_max_size.setter
def name_max_size(self, name_max_size):
"""
Sets the name_max_size of this NfsExportCreateParams.
The reported maximum length of a file name.
:param name_max_size: The name_max_size of this NfsExportCreateParams.
:type: int
"""
self._name_max_size = name_max_size
@property
def no_truncate(self):
"""
Gets the no_truncate of this NfsExportCreateParams.
If true, report that too-long file names result in an error
:return: The no_truncate of this NfsExportCreateParams.
:rtype: bool
"""
return self._no_truncate
@no_truncate.setter
def no_truncate(self, no_truncate):
"""
Sets the no_truncate of this NfsExportCreateParams.
If true, report that too-long file names result in an error
:param no_truncate: The no_truncate of this NfsExportCreateParams.
:type: bool
"""
self._no_truncate = no_truncate
@property
def paths(self):
"""
Gets the paths of this NfsExportCreateParams.
The paths under /ifs that are exported.
:return: The paths of this NfsExportCreateParams.
:rtype: list[str]
"""
return self._paths
@paths.setter
def paths(self, paths):
"""
Sets the paths of this NfsExportCreateParams.
The paths under /ifs that are exported.
:param paths: The paths of this NfsExportCreateParams.
:type: list[str]
"""
self._paths = paths
@property
def read_only(self):
"""
Gets the read_only of this NfsExportCreateParams.
If true, the export is read-only.
:return: The read_only of this NfsExportCreateParams.
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""
Sets the read_only of this NfsExportCreateParams.
If true, the export is read-only.
:param read_only: The read_only of this NfsExportCreateParams.
:type: bool
"""
self._read_only = read_only
@property
def read_only_clients(self):
"""
Gets the read_only_clients of this NfsExportCreateParams.
Clients that have read only access to the export.
:return: The read_only_clients of this NfsExportCreateParams.
:rtype: list[str]
"""
return self._read_only_clients
@read_only_clients.setter
def read_only_clients(self, read_only_clients):
"""
Sets the read_only_clients of this NfsExportCreateParams.
Clients that have read only access to the export.
:param read_only_clients: The read_only_clients of this NfsExportCreateParams.
:type: list[str]
"""
self._read_only_clients = read_only_clients
@property
def read_transfer_max_size(self):
"""
Gets the read_transfer_max_size of this NfsExportCreateParams.
The maximum buffer size that clients should use on NFS read requests. This option is advisory.
:return: The read_transfer_max_size of this NfsExportCreateParams.
:rtype: int
"""
return self._read_transfer_max_size
@read_transfer_max_size.setter
def read_transfer_max_size(self, read_transfer_max_size):
"""
Sets the read_transfer_max_size of this NfsExportCreateParams.
The maximum buffer size that clients should use on NFS read requests. This option is advisory.
:param read_transfer_max_size: The read_transfer_max_size of this NfsExportCreateParams.
:type: int
"""
self._read_transfer_max_size = read_transfer_max_size
@property
def read_transfer_multiple(self):
"""
Gets the read_transfer_multiple of this NfsExportCreateParams.
The preferred multiple size for NFS read requests. This option is advisory.
:return: The read_transfer_multiple of this NfsExportCreateParams.
:rtype: int
"""
return self._read_transfer_multiple
@read_transfer_multiple.setter
def read_transfer_multiple(self, read_transfer_multiple):
"""
Sets the read_transfer_multiple of this NfsExportCreateParams.
The preferred multiple size for NFS read requests. This option is advisory.
:param read_transfer_multiple: The read_transfer_multiple of this NfsExportCreateParams.
:type: int
"""
self._read_transfer_multiple = read_transfer_multiple
@property
def read_transfer_size(self):
"""
Gets the read_transfer_size of this NfsExportCreateParams.
The optimal size for NFS read requests. This option is advisory.
:return: The read_transfer_size of this NfsExportCreateParams.
:rtype: int
"""
return self._read_transfer_size
@read_transfer_size.setter
def read_transfer_size(self, read_transfer_size):
"""
Sets the read_transfer_size of this NfsExportCreateParams.
The optimal size for NFS read requests. This option is advisory.
:param read_transfer_size: The read_transfer_size of this NfsExportCreateParams.
:type: int
"""
self._read_transfer_size = read_transfer_size
@property
def read_write_clients(self):
"""
Gets the read_write_clients of this NfsExportCreateParams.
Clients that have read and write access to the export, even if the export is read-only.
:return: The read_write_clients of this NfsExportCreateParams.
:rtype: list[str]
"""
return self._read_write_clients
@read_write_clients.setter
def read_write_clients(self, read_write_clients):
"""
Sets the read_write_clients of this NfsExportCreateParams.
Clients that have read and write access to the export, even if the export is read-only.
:param read_write_clients: The read_write_clients of this NfsExportCreateParams.
:type: list[str]
"""
self._read_write_clients = read_write_clients
@property
def readdirplus(self):
"""
Gets the readdirplus of this NfsExportCreateParams.
If true, readdirplus requests are enabled.
:return: The readdirplus of this NfsExportCreateParams.
:rtype: bool
"""
return self._readdirplus
@readdirplus.setter
def readdirplus(self, readdirplus):
"""
Sets the readdirplus of this NfsExportCreateParams.
If true, readdirplus requests are enabled.
:param readdirplus: The readdirplus of this NfsExportCreateParams.
:type: bool
"""
self._readdirplus = readdirplus
@property
def readdirplus_prefetch(self):
"""
Gets the readdirplus_prefetch of this NfsExportCreateParams.
This field is deprecated and does not do anything.
:return: The readdirplus_prefetch of this NfsExportCreateParams.
:rtype: int
"""
return self._readdirplus_prefetch
@readdirplus_prefetch.setter
def readdirplus_prefetch(self, readdirplus_prefetch):
"""
Sets the readdirplus_prefetch of this NfsExportCreateParams.
This field is deprecated and does not do anything.
:param readdirplus_prefetch: The readdirplus_prefetch of this NfsExportCreateParams.
:type: int
"""
self._readdirplus_prefetch = readdirplus_prefetch
@property
def return_32bit_file_ids(self):
"""
Gets the return_32bit_file_ids of this NfsExportCreateParams.
Limits the size of file identifiers returned by NFSv3+ to 32-bit values.
:return: The return_32bit_file_ids of this NfsExportCreateParams.
:rtype: bool
"""
return self._return_32bit_file_ids
@return_32bit_file_ids.setter
def return_32bit_file_ids(self, return_32bit_file_ids):
"""
Sets the return_32bit_file_ids of this NfsExportCreateParams.
Limits the size of file identifiers returned by NFSv3+ to 32-bit values.
:param return_32bit_file_ids: The return_32bit_file_ids of this NfsExportCreateParams.
:type: bool
"""
self._return_32bit_file_ids = return_32bit_file_ids
@property
def root_clients(self):
"""
Gets the root_clients of this NfsExportCreateParams.
Clients that have root access to the export.
:return: The root_clients of this NfsExportCreateParams.
:rtype: list[str]
"""
return self._root_clients
@root_clients.setter
def root_clients(self, root_clients):
"""
Sets the root_clients of this NfsExportCreateParams.
Clients that have root access to the export.
:param root_clients: The root_clients of this NfsExportCreateParams.
:type: list[str]
"""
self._root_clients = root_clients
@property
def security_flavors(self):
"""
Gets the security_flavors of this NfsExportCreateParams.
The authentication flavors that are supported for this export.
:return: The security_flavors of this NfsExportCreateParams.
:rtype: list[str]
"""
return self._security_flavors
@security_flavors.setter
def security_flavors(self, security_flavors):
"""
Sets the security_flavors of this NfsExportCreateParams.
The authentication flavors that are supported for this export.
:param security_flavors: The security_flavors of this NfsExportCreateParams.
:type: list[str]
"""
self._security_flavors = security_flavors
@property
def setattr_asynchronous(self):
"""
Gets the setattr_asynchronous of this NfsExportCreateParams.
If true, allows setattr operations to execute asynchronously.
:return: The setattr_asynchronous of this NfsExportCreateParams.
:rtype: bool
"""
return self._setattr_asynchronous
@setattr_asynchronous.setter
def setattr_asynchronous(self, setattr_asynchronous):
"""
Sets the setattr_asynchronous of this NfsExportCreateParams.
If true, allows setattr operations to execute asynchronously.
:param setattr_asynchronous: The setattr_asynchronous of this NfsExportCreateParams.
:type: bool
"""
self._setattr_asynchronous = setattr_asynchronous
@property
def snapshot(self):
"""
Gets the snapshot of this NfsExportCreateParams.
Use this snapshot for all mounts.
:return: The snapshot of this NfsExportCreateParams.
:rtype: str
"""
return self._snapshot
@snapshot.setter
def snapshot(self, snapshot):
"""
Sets the snapshot of this NfsExportCreateParams.
Use this snapshot for all mounts.
:param snapshot: The snapshot of this NfsExportCreateParams.
:type: str
"""
self._snapshot = snapshot
@property
def symlinks(self):
"""
Gets the symlinks of this NfsExportCreateParams.
If true, paths reachable by symlinks are exported.
:return: The symlinks of this NfsExportCreateParams.
:rtype: bool
"""
return self._symlinks
@symlinks.setter
def symlinks(self, symlinks):
"""
Sets the symlinks of this NfsExportCreateParams.
If true, paths reachable by symlinks are exported.
:param symlinks: The symlinks of this NfsExportCreateParams.
:type: bool
"""
self._symlinks = symlinks
@property
def time_delta(self):
"""
Gets the time_delta of this NfsExportCreateParams.
The resolution of all time values that are returned to clients.
:return: The time_delta of this NfsExportCreateParams.
:rtype: float
"""
return self._time_delta
@time_delta.setter
def time_delta(self, time_delta):
"""
Sets the time_delta of this NfsExportCreateParams.
The resolution of all time values that are returned to clients.
:param time_delta: The time_delta of this NfsExportCreateParams.
:type: float
"""
self._time_delta = time_delta
@property
def write_datasync_action(self):
"""
Gets the write_datasync_action of this NfsExportCreateParams.
The synchronization type.
:return: The write_datasync_action of this NfsExportCreateParams.
:rtype: str
"""
return self._write_datasync_action
@write_datasync_action.setter
def write_datasync_action(self, write_datasync_action):
"""
Sets the write_datasync_action of this NfsExportCreateParams.
The synchronization type.
:param write_datasync_action: The write_datasync_action of this NfsExportCreateParams.
:type: str
"""
allowed_values = ["DATASYNC", "FILESYNC", "UNSTABLE"]
if write_datasync_action is not None and write_datasync_action not in allowed_values:
raise ValueError(
"Invalid value for `write_datasync_action`, must be one of {0}"
.format(allowed_values)
)
self._write_datasync_action = write_datasync_action
@property
def write_datasync_reply(self):
"""
Gets the write_datasync_reply of this NfsExportCreateParams.
The synchronization type.
:return: The write_datasync_reply of this NfsExportCreateParams.
:rtype: str
"""
return self._write_datasync_reply
@write_datasync_reply.setter
def write_datasync_reply(self, write_datasync_reply):
"""
Sets the write_datasync_reply of this NfsExportCreateParams.
The synchronization type.
:param write_datasync_reply: The write_datasync_reply of this NfsExportCreateParams.
:type: str
"""
allowed_values = ["DATASYNC", "FILESYNC", "UNSTABLE"]
if write_datasync_reply is not None and write_datasync_reply not in allowed_values:
raise ValueError(
"Invalid value for `write_datasync_reply`, must be one of {0}"
.format(allowed_values)
)
self._write_datasync_reply = write_datasync_reply
@property
def write_filesync_action(self):
"""
Gets the write_filesync_action of this NfsExportCreateParams.
The synchronization type.
:return: The write_filesync_action of this NfsExportCreateParams.
:rtype: str
"""
return self._write_filesync_action
@write_filesync_action.setter
def write_filesync_action(self, write_filesync_action):
"""
Sets the write_filesync_action of this NfsExportCreateParams.
The synchronization type.
:param write_filesync_action: The write_filesync_action of this NfsExportCreateParams.
:type: str
"""
allowed_values = ["DATASYNC", "FILESYNC", "UNSTABLE"]
if write_filesync_action is not None and write_filesync_action not in allowed_values:
raise ValueError(
"Invalid value for `write_filesync_action`, must be one of {0}"
.format(allowed_values)
)
self._write_filesync_action = write_filesync_action
@property
def write_filesync_reply(self):
"""
Gets the write_filesync_reply of this NfsExportCreateParams.
The synchronization type.
:return: The write_filesync_reply of this NfsExportCreateParams.
:rtype: str
"""
return self._write_filesync_reply
@write_filesync_reply.setter
def write_filesync_reply(self, write_filesync_reply):
"""
Sets the write_filesync_reply of this NfsExportCreateParams.
The synchronization type.
:param write_filesync_reply: The write_filesync_reply of this NfsExportCreateParams.
:type: str
"""
allowed_values = ["DATASYNC", "FILESYNC", "UNSTABLE"]
if write_filesync_reply is not None and write_filesync_reply not in allowed_values:
raise ValueError(
"Invalid value for `write_filesync_reply`, must be one of {0}"
.format(allowed_values)
)
self._write_filesync_reply = write_filesync_reply
@property
def write_transfer_max_size(self):
"""
Gets the write_transfer_max_size of this NfsExportCreateParams.
The maximum buffer size that clients should use on NFS write requests. This option is advisory.
:return: The write_transfer_max_size of this NfsExportCreateParams.
:rtype: int
"""
return self._write_transfer_max_size
@write_transfer_max_size.setter
def write_transfer_max_size(self, write_transfer_max_size):
"""
Sets the write_transfer_max_size of this NfsExportCreateParams.
The maximum buffer size that clients should use on NFS write requests. This option is advisory.
:param write_transfer_max_size: The write_transfer_max_size of this NfsExportCreateParams.
:type: int
"""
self._write_transfer_max_size = write_transfer_max_size
@property
def write_transfer_multiple(self):
"""
Gets the write_transfer_multiple of this NfsExportCreateParams.
The preferred multiple size for NFS write requests. This option is advisory.
:return: The write_transfer_multiple of this NfsExportCreateParams.
:rtype: int
"""
return self._write_transfer_multiple
@write_transfer_multiple.setter
def write_transfer_multiple(self, write_transfer_multiple):
"""
Sets the write_transfer_multiple of this NfsExportCreateParams.
The preferred multiple size for NFS write requests. This option is advisory.
:param write_transfer_multiple: The write_transfer_multiple of this NfsExportCreateParams.
:type: int
"""
self._write_transfer_multiple = write_transfer_multiple
@property
def write_transfer_size(self):
"""
Gets the write_transfer_size of this NfsExportCreateParams.
The optimal size for NFS read requests. This option is advisory.
:return: The write_transfer_size of this NfsExportCreateParams.
:rtype: int
"""
return self._write_transfer_size
@write_transfer_size.setter
def write_transfer_size(self, write_transfer_size):
"""
Sets the write_transfer_size of this NfsExportCreateParams.
The optimal size for NFS read requests. This option is advisory.
:param write_transfer_size: The write_transfer_size of this NfsExportCreateParams.
:type: int
"""
self._write_transfer_size = write_transfer_size
@property
def write_unstable_action(self):
"""
Gets the write_unstable_action of this NfsExportCreateParams.
The synchronization type.
:return: The write_unstable_action of this NfsExportCreateParams.
:rtype: str
"""
return self._write_unstable_action
@write_unstable_action.setter
def write_unstable_action(self, write_unstable_action):
"""
Sets the write_unstable_action of this NfsExportCreateParams.
The synchronization type.
:param write_unstable_action: The write_unstable_action of this NfsExportCreateParams.
:type: str
"""
allowed_values = ["DATASYNC", "FILESYNC", "UNSTABLE"]
if write_unstable_action is not None and write_unstable_action not in allowed_values:
raise ValueError(
"Invalid value for `write_unstable_action`, must be one of {0}"
.format(allowed_values)
)
self._write_unstable_action = write_unstable_action
@property
def write_unstable_reply(self):
"""
Gets the write_unstable_reply of this NfsExportCreateParams.
The synchronization type.
:return: The write_unstable_reply of this NfsExportCreateParams.
:rtype: str
"""
return self._write_unstable_reply
@write_unstable_reply.setter
def write_unstable_reply(self, write_unstable_reply):
"""
Sets the write_unstable_reply of this NfsExportCreateParams.
The synchronization type.
:param write_unstable_reply: The write_unstable_reply of this NfsExportCreateParams.
:type: str
"""
allowed_values = ["DATASYNC", "FILESYNC", "UNSTABLE"]
if write_unstable_reply is not None and write_unstable_reply not in allowed_values:
raise ValueError(
"Invalid value for `write_unstable_reply`, must be one of {0}"
.format(allowed_values)
)
self._write_unstable_reply = write_unstable_reply
@property
def zone(self):
"""
Gets the zone of this NfsExportCreateParams.
The zone in which the export is valid
:return: The zone of this NfsExportCreateParams.
:rtype: str
"""
return self._zone
@zone.setter
def zone(self, zone):
"""
Sets the zone of this NfsExportCreateParams.
The zone in which the export is valid
:param zone: The zone of this NfsExportCreateParams.
:type: str
"""
self._zone = zone
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
import tensorflow as tf
import numpy as np
def choose_best(neural_networks, fitnesses):
with tf.name_scope('Choose_best') as scope:
print("fitness totais")
print(fitnesses)
top_2_idx = np.argsort(fitnesses)[-2:]
print(top_2_idx)
top_2_values = [neural_networks[i] for i in top_2_idx]
print("fitness escolhidos")
print([fitnesses[i] for i in top_2_idx])
sess = tf.Session()
sess.run(tf.global_variables_initializer());
neural_networks_selected = [];
neural_networks_selected.append(sess.run(top_2_values[0]));
neural_networks_selected.append(sess.run(top_2_values[1]));
print("fim choose best")
#tf.reset_default_graph;
return neural_networks_selected;
#neural_networs_output = []
#for neural_network in neural_networks_selected:
# temp_neural_network = []
# for weight in neural_network:
# temp_neural_network.append(tf.constant(weight))
# neural_networs_output.append(temp_neural_network[:])
#return neural_networs_output;
def create_constants(neural_networks):
neural_networs_output = []
for current_neural_network in neural_networks:
temp_neural_network = []
#print("NEURAL NETWORK")
i = 0
for weight in current_neural_network:
#if (type(weight) != tf.Tensor):
#print(type(weight))
temp_neural_network.append(tf.constant(weight))
i += 1
neural_networs_output.append(temp_neural_network[:])
return neural_networs_output;
def choose_best_tensor(neural_networks, fitnesses):
with tf.name_scope('Choose_best') as scope:
top_values, top_indices = tf.nn.top_k(tf.reshape(fitnesses, (-1,)), 2)
new_neural_networks = tf.gather()
print("fitness totais")
print(fitnesses)
top_2_idx = np.argsort(fitnesses)[-2:]
print(top_2_idx)
top_2_values = [neural_networks[i] for i in top_2_idx]
print("fitness escolhidos")
print([fitnesses[i] for i in top_2_idx])
sess = tf.Session()
sess.run(tf.global_variables_initializer());
neural_networks_selected = sess.run(top_2_values);
tf.reset_default_graph;
neural_networs_output = []
for neural_network in neural_networks_selected:
temp_neural_network = []
print("NEURAL NETOWORK")
for weight in neural_network:
print(weight)
temp_neural_network.append(tf.constant(weight))
neural_networs_output.append(temp_neural_network[:])
return neural_networs_output;
|
from __future__ import division
import theano.tensor as T
import theano
import numpy
class LRTuner:
def __init__(self, low, high, inc):
self.low = low
self.high = high
self.inc = inc
self.prev_error = numpy.inf
def adapt_lr(self, curr_error, curr_lr):
if curr_error >= self.prev_error:
lr = max(curr_lr / 2, self.low)
else:
lr = min(curr_lr + self.inc, self.high)
self.prev_error = curr_error
return lr
|
a=int(input())
b=int(input())
power=1
if b>0:
for i in range(0,b,1):
power*=a
else:
for i in range(0,b,-1):
power/=a
print(power)
|
# Copyright 2021-2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
import cunumeric as num
def test_basic():
x = np.linspace(2.0, 3.0, num=5)
y = num.linspace(2.0, 3.0, num=5)
assert np.array_equal(x, y)
def test_endpoint():
x = np.linspace(2.0, 3.0, num=5, endpoint=False)
y = num.linspace(2.0, 3.0, num=5, endpoint=False)
assert np.array_equal(x, y)
def test_retstep():
x = np.linspace(2.0, 3.0, num=5, retstep=True)
y = np.linspace(2.0, 3.0, num=5, retstep=True)
assert np.array_equal(x[0], y[0])
assert x[1] == y[1]
def test_axis():
x = np.array([[0, 1], [2, 3]])
y = np.array([[4, 5], [6, 7]])
xp = num.array(x)
yp = num.array(y)
z = np.linspace(x, y, num=5, axis=0)
w = num.linspace(xp, yp, num=5, axis=0)
assert np.array_equal(z, w)
z = np.linspace(x, y, num=5, axis=1)
w = num.linspace(xp, yp, num=5, axis=1)
assert np.array_equal(z, w)
z = np.linspace(x, y, num=5, axis=2)
w = num.linspace(xp, yp, num=5, axis=2)
assert np.array_equal(z, w)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv))
|
from django.conf import settings
from django.conf.urls.defaults import *
from basket import admin
urlpatterns = patterns('',
('^subscriptions/', include('subscriptions.urls')),
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^admin/', include(admin.site.urls)),
('^nagios/', include('nagios.urls')),
)
if settings.DEBUG:
# Remove leading and trailing slashes so the regex matches.
media_url = settings.MEDIA_URL.lstrip('/').rstrip('/')
urlpatterns += patterns('',
(r'^%s/(?P<path>.*)$' % media_url, 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 13 08:12:15 2020
@author: TOP Artes
"""
# Importa as bibliotecas de estruturação e visualização dos dados
import pandas as pd
import numpy as np
# Importa as classes de controle de cada objeto
from control.control_estado import ControlEstado
from control.control_municipio import ControlMunicipio
from control.control_analise import ControlAnalise
from control.control_regressor import ControlRegressor
class Inscricao(object):
def __init__(self, regiao):
self.regiao = str(regiao)
self.control_estado = ControlEstado() # Instancia a classe de controle dos dados UF
self.control_municipio = ControlMunicipio() # Instancia a classe de controle dos dados Municipais
self.control_analise = ControlAnalise() # Instancia a classe de controle de análise exploratória(UF)
self.control_regressor = ControlRegressor()
self.df = pd.DataFrame()
self.df_balanced = pd.DataFrame()
self.df_metrics = pd.DataFrame()
self.target = int
self.dct_baseline = dict({})
self.dct_baseline_sc = dict({})
self.dct_balanced_base = dict({})
self.dct_balanced_base_sc = dict({})
self.dct_basetunned = dict({})
self.dct_basetunned_sc = dict({})
self.dct_balanced_tunned = dict({})
self.dct_balanced_tunned_sc = dict({})
self.dct_compare = dict({})
self.modelo = tuple()
if self.regiao == 'estadual':
self.control = self.control_estado
elif self.regiao == 'municipio':
self.control = self.control_municipio
def set_dataframe(self):
self.df = self.control.get_raw() # Invoca a função de leitura dos CSVs
## Recebe o DataFrame com os dados originais
self.df_balanced = self.control.balance_data(self.df) # Invoca a função enviando o DataFrame original
## Recebe o DataFrame com os dados originais
self.target = self.control.set_target(self.df) # Seleciona o target
return
def set_predicted(self, predicted=False):
self.df_balanced = self.control.balance_data() # Invoca a função enviando o DataFrame original
return
def set_metrics(self, test_size=0.15, random_state=0, base=False, balanced=False, scaled=False, balance=[], baseline=[], scale=[], plot=True, validation=False):
Dataframe = [self.df, self.df_balanced]
dct_metrics = self.control.pre_process(
Dataframe, test_size, random_state, base, balanced, scaled, balance, baseline, scale, plot, validation)
if validation:
self.modelo = dct_metrics
return self.modelo
if len(balance) > 0:
self.dct_compare = dct_metrics
return
if base:
if balanced:
if scaled:
self.dct_balanced_base_sc = dct_metrics
return
self.dct_balanced_base = dct_metrics
return
else:
if scaled:
self.dct_baseline_sc = dct_metrics
return
self.dct_baseline = dct_metrics
return
else:
if balanced:
if scaled:
self.dct_balanced_tunned_sc = dct_metrics
return
self.dct_balanced_tunned = dct_metrics
return
else:
if scaled:
self.dct_basetunned_sc = dct_metrics
return
self.dct_basetunned = dct_metrics
return
return ## Recebe o DataFrame com os dados originais
def set_predict(self, modelo, balanced=False):
previsores = self.control.get_previsores(self.df_balanced, balanced, modelo) # Invoca a função enviando o DataFrame pré-processado
## Recebe os previsores de 2019
previsoes = self.control_regressor.get_predict(previsores, modelo) # Invoca a função enviando os previsores e o modelo selecionado
return previsoes
self.df_balanced = self.control.set_previsoes(
self.df_balanced, 'ano', 2019, self.control.lst_targets[0], previsoes) # Insere as previsões no DataFrame enviando o DataFrame, Coluna Index, Valor Query, Coluna Target e Previsões
## Recebe o DataFrame com as previsões
return
def view_metrics(self):
"""
Cria a estrutura do DataFrame de métricas para comparação"""
self.df_metrics['model'] = [model for model in list(['arvore',
'linear_poly',
'rede_neural',
'support_vector'])]
self.df_metrics['baseline(mae)'] = [model[1][1] for model in list(self.dct_baseline.items())]
self.df_metrics['baseline_sc(mae)'] = [model[1][1] for model in list(self.dct_baseline_sc.items())]
self.df_metrics['balanced(mae)'] = [model[1][1] for model in list(self.dct_balanced_base.items())]
self.df_metrics['balanced_sc(mae)'] = [model[1][1] for model in list(self.dct_balanced_base_sc.items())]
self.df_metrics['tunned(mae)'] = [model[1][1] for model in list(self.dct_basetunned.items())]
self.df_metrics['tunned_sc(mae)'] = [model[1][1] for model in list(self.dct_basetunned_sc.items())]
self.df_metrics['balanced_tunned(mae)'] = [model[1][1] for model in list(self.dct_balanced_tunned.items())]
self.df_metrics['balanced_tunned_sc(mae)'] = [model[1][1] for model in list(self.dct_balanced_tunned_sc.items())]
self.df_metrics['compare(mae)'] = [model[1][1] for model in list(self.dct_compare.items())]
self.df_metrics['baseline(score)'] = [model[1][0] for model in list(self.dct_baseline.items())]
self.df_metrics['baseline_sc(score)'] = [model[1][0] for model in list(self.dct_baseline_sc.items())]
self.df_metrics['balanced(score)'] = [model[1][0] for model in list(self.dct_balanced_base.items())]
self.df_metrics['balanced_sc(score)'] = [model[1][0] for model in list(self.dct_balanced_base_sc.items())]
self.df_metrics['tunned(score)'] = [model[1][0] for model in list(self.dct_basetunned.items())]
self.df_metrics['tunned_sc(score)'] = [model[1][0] for model in list(self.dct_basetunned_sc.items())]
self.df_metrics['balanced_tunned(score)'] = [model[1][0] for model in list(self.dct_balanced_tunned.items())]
self.df_metrics['balanced_tunned_sc(score)']= [model[1][0] for model in list(self.dct_balanced_tunned_sc.items())]
self.df_metrics['compare(score)'] = [model[1][0] for model in list(self.dct_compare.items())]
return
def view_plot(self, data, predicted=False):
if predicted or data == 'estrutura':
df = self.df_balanced
else:
df = self.df
if data == 'inscricao':
self.control_analise.inscritos_ano(df, predicted) # Plota distribuição de Inscrições no ENEM por ANO(2010 a 2019)/UF
return
if data == 'estrutura':
self.control_analise.estrutura_ano(df, predicted) # Plota distribuição da estrutura educacional ANO(2010 a 2019)/UF
return
print("Por getileza, especifique os dados que deseja plotar ('inscricao' ou 'estrutura')")
return
|
import os
import glob
import sqlite3
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
params = {
'font.size': 14,
'figure.constrained_layout.use': True,
'savefig.dpi': 200.0,
}
plt.rcParams.update(params)
def mkdir_if_not_exists(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
print("Created dir:", dirname)
def read_db_file(dbname):
""" Reads .db files
:type dbname: str
:param str dbname: .db file path
:returns: contents of Photons and Surfaces tables
"""
conn = sqlite3.connect(dbname)
df = pd.read_sql_query("SELECT * FROM Photons", con=conn)
ids = pd.read_sql_query("SELECT * FROM Surfaces", con=conn)
conn.close()
return df, ids
def read_db_files(dbfiles):
records = []
for dbfile in tqdm(dbfiles):
angle = float(os.path.basename(dbfile)[:-3])
# pos = float(pos) * 1000 # converts abs position to mm
photons, surfaces = read_db_file(dbfile)
try:
absorber_id = surfaces['id'][surfaces["Path"].str.contains("Cyl_abs")].values[0] # Finds absorber id
aux_id = surfaces['id'][surfaces["Path"].str.contains("aux")].values[0] # Finds auxiliary surface id
abs_hits = photons['surfaceID'].value_counts()[absorber_id]
aux_hits = photons['surfaceID'].value_counts()[aux_id]
nj = abs_hits/aux_hits
records.append({'angle': angle, 'intercept factor': nj})
except IndexError as e:
print(dbfile, e)
df = pd.DataFrame.from_records(records)
return df
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
from math import factorial
try:
window_size = np.abs(int(window_size))
order = np.abs(int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
def plot_equation_test(df):
x = df["angle"].values
y = df['intercept factor'].values
plt.plot(x,y, linewidth=4)
# yhat = savitzky_golay(y, 51, 3) # window size 51, polynomial order 3
# plt.plot(x[8:-8], smooth(y,6)[8:-8], linewidth=3, label="Discrete Linear Convolution")
# plt.plot(x,yhat, color='red', linewidth=3, label="Savitzky-Golay filter")
# plt.legend()
plt.xlabel('$\\theta_{az} \ (\degree)$')
plt.ylabel('$\gamma$')
plt.savefig("plots/equation_test.png")
plt.show()
# dbfiles = glob.glob(os.getcwd() + '/raweq/*.db')
# df = read_db_files(dbfiles)
# df.to_csv("data/equation_test.csv")
df = pd.read_csv("data/equation_test.csv")
df = df.groupby(df.index // 10).mean()
plot_equation_test(df)
|
#!/usr/bin/env python3
import os, sys
import argparse
import csv
import wave
def LoadKaldiArk(path):
d = {}
with open(path, 'r', encoding = 'utf-8') as f:
for line in [ l.strip() for l in f if l.strip() ]:
key, content = line.split(maxsplit=1)
if d.get(key) == None:
d[key] = content
else:
print(F'ERROR: found duplicated key {key}', file = sys.stderr)
raise RuntimeError
return d
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str)
args = parser.parse_args()
print(args, file = sys.stderr)
# load wav.scp
if os.path.isfile(os.path.join(args.dir, 'wav.scp')):
wavscp = LoadKaldiArk(os.path.join(args.dir, 'wav.scp'))
else:
raise RuntimeError(F"Cannot find scp file {os.path.join(args.dir, 'wav.scp')}")
# load text/trans.txt
if os.path.isfile(os.path.join(args.dir, 'text')):
utt2text = LoadKaldiArk(os.path.join(args.dir, 'text'))
elif os.path.isfile(os.path.join(args.dir, 'trans.txt')):
utt2text = LoadKaldiArk(os.path.join(args.dir, 'trans.txt'))
else:
text_path = {}
# load utt2spk
if os.path.isfile(os.path.join(args.dir, 'utt2spk')):
utt2spk = LoadKaldiArk(os.path.join(args.dir, 'utt2spk'))
else:
utt2spk = {}
# load utt2dur
if os.path.isfile(os.path.join(args.dir, 'utt2dur')):
utt2dur = LoadKaldiArk(os.path.join(args.dir, 'utt2dur'))
else:
utt2dur = {}
utts = []
k = 0
for uttid in wavscp.keys():
audio = wavscp[uttid]
if utt2dur:
duration = utt2dur.get(uttid)
else:
wav = wave.open(os.path.join(args.dir, audio), 'r')
duration = wav.getnframes() / wav.getframerate()
utt = {
'ID' : uttid,
'AUDIO' : audio,
'DURATION' : F'{duration:.3f}',
}
if utt2spk:
utt['SPEAKER'] = utt2spk.get(uttid)
if utt2text:
utt['TEXT'] = utt2text.get(uttid)
utts.append(utt)
k += 1
if k % 10000 == 0:
print(F'Processed {k} utts', file = sys.stderr)
utts.sort(key = lambda e: e['ID'])
with open(os.path.join(args.dir, 'metadata.tsv'), 'w+', encoding = 'utf-8') as fo:
csv_header_fields = ['ID', 'AUDIO', 'DURATION']
if utt2spk:
csv_header_fields.append('SPEAKER')
if utt2text:
csv_header_fields.append('TEXT')
csv_writer = csv.DictWriter(fo, fieldnames=csv_header_fields, delimiter='\t', lineterminator='\n')
csv_writer.writeheader()
for audio in utts:
csv_writer.writerow(audio)
|
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Progress(Component):
"""A Progress component.
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional): The children of this component
- id (string; optional): The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.
- style (dict; optional): Defines CSS styles which will override styles previously set.
- className (string; optional): Often used with CSS to style elements with common properties.
- key (string; optional): A unique identifier for the component, used to improve
performance by React.js while rendering components
See https://reactjs.org/docs/lists-and-keys.html for more info
- tag (string; optional): HTML tag to use for the progress bar, default: div
- bar (boolean; optional): Apply progress-bar class, for use inside a multi progress bar.
- multi (boolean; optional): Create container for multiple progress bars
- max (string | number; optional): Upper limit for value, default: 100
- value (string | number; optional): Specify progress, value from 0 to max inclusive.
- animated (boolean; optional): Animate the bar, must have striped set to True to work.
- striped (boolean; optional): Use striped progress bar
- color (string; optional): Set color of the progress bar, options: primary, secondary, success,
warning, danger, info.
- barClassName (string; optional): CSS classes to apply to the bar."""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, style=Component.UNDEFINED, className=Component.UNDEFINED, key=Component.UNDEFINED, tag=Component.UNDEFINED, bar=Component.UNDEFINED, multi=Component.UNDEFINED, max=Component.UNDEFINED, value=Component.UNDEFINED, animated=Component.UNDEFINED, striped=Component.UNDEFINED, color=Component.UNDEFINED, barClassName=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'style', 'className', 'key', 'tag', 'bar', 'multi', 'max', 'value', 'animated', 'striped', 'color', 'barClassName']
self._type = 'Progress'
self._namespace = 'dash_bootstrap_components/_components'
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'style', 'className', 'key', 'tag', 'bar', 'multi', 'max', 'value', 'animated', 'striped', 'color', 'barClassName']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Progress, self).__init__(children=children, **args)
def __repr__(self):
if(any(getattr(self, c, None) is not None
for c in self._prop_names
if c is not self._prop_names[0])
or any(getattr(self, c, None) is not None
for c in self.__dict__.keys()
if any(c.startswith(wc_attr)
for wc_attr in self._valid_wildcard_attributes))):
props_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self._prop_names
if getattr(self, c, None) is not None])
wilds_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self.__dict__.keys()
if any([c.startswith(wc_attr)
for wc_attr in
self._valid_wildcard_attributes])])
return ('Progress(' + props_string +
(', ' + wilds_string if wilds_string != '' else '') + ')')
else:
return (
'Progress(' +
repr(getattr(self, self._prop_names[0], None)) + ')')
|
from rubicon_ml.client import Base, TagMixin
from rubicon_ml.client.utils.exception_handling import failsafe
from rubicon_ml.exceptions import RubiconException
class Dataframe(Base, TagMixin):
"""A client dataframe.
A `dataframe` is a two-dimensional, tabular dataset with
labeled axes (rows and columns) that provides value to the
model developer and/or reviewer when visualized.
For example, confusion matrices, feature importance tables
and marginal residuals can all be logged as a `dataframe`.
A `dataframe` is logged to a `project` or an `experiment`.
Parameters
----------
domain : rubicon.domain.Dataframe
The dataframe domain model.
parent : rubicon.client.Project or rubicon.client.Experiment
The project or experiment that the artifact is
logged to.
"""
def __init__(self, domain, parent):
super().__init__(domain, parent._config)
self._data = None
self._parent = parent
@failsafe
def get_data(self, df_type="pandas"):
"""Loads the data associated with this Dataframe
into a `pandas` or `dask` dataframe.
Parameters
----------
df_type : str, optional
The type of dataframe to return. Valid options include
["dask", "pandas"]. Defaults to "pandas".
"""
project_name, experiment_id = self.parent._get_identifiers()
self._data = self.repository.get_dataframe_data(
project_name,
self.id,
experiment_id=experiment_id,
df_type=df_type,
)
return self._data
@failsafe
def plot(self, df_type="pandas", plotting_func=None, **kwargs):
"""Render the dataframe using `plotly.express`.
Parameters
----------
df_type : str, optional
The type of dataframe. Can be either `pandas` or `dask`.
Defaults to 'pandas'.
plotting_func : function, optional
The `plotly.express` plotting function used to visualize the
dataframes. Available options can be found at
https://plotly.com/python-api-reference/plotly.express.html.
Defaults to `plotly.express.line`.
kwargs : dict, optional
Keyword arguments to be passed to `plotting_func`. Available options
can be found in the documentation of the individual functions at the
URL above.
Examples
--------
>>> # Log a line plot
>>> dataframe.plot(x='Year', y='Number of Subscriptions')
>>> # Log a bar plot
>>> import plotly.express as px
>>> dataframe.plot(plotting_func=px.bar, x='Year', y='Number of Subscriptions')
"""
try:
import plotly.express as px
if plotting_func is None:
plotting_func = px.line
except ImportError:
raise RubiconException(
"`ui` extras are required for plotting. Install with `pip install rubicon-ml[ui]`."
)
return plotting_func(self.get_data(df_type=df_type), **kwargs)
@property
def id(self):
"""Get the dataframe's id."""
return self._domain.id
@property
def name(self):
"""Get the dataframe's name."""
return self._domain.name
@property
def description(self):
"""Get the dataframe's description."""
return self._domain.description
@property
def created_at(self):
"""Get the time this dataframe was created."""
return self._domain.created_at
@property
def parent(self):
"""Get the dataframe's parent client object."""
return self._parent
|
###################################################################
#
# CSSE1001 - Assignment 2
#
# Student Number: 43034002
#
# Student Name: Jiefeng Hou(Nick)
#
###################################################################
####################################################################
#
# Do not change the following code
#
####################################################################
from Tkinter import *
import tkMessageBox
import tkFileDialog
# Formatting for use in the __str__ methods
PART_FORMAT = "{0:10}{1:30}{2:>10}"
COMPOUND_FORMAT = "{0:10}{1:30}{2:>10} {3}"
# Note: some of the supplied definitions below rely on the classes
# you need to write.
def load_items_from_file(products, filename):
"""Add the items in the supplied file to the products object.
load_items_from_file(Products, str) -> None
Precondition: Assumes the supplied is of the correct format
"""
fid = open(filename, 'U')
for line in fid:
item_info = line.split(',')
if len(item_info) > 2: # ignores blank lines
item_id = item_info[0].strip()
item_name = item_info[1].strip()
if ':' in item_info[2]: # compound
items = item_info[2:]
products.add_item(item_id,
Compound(item_id, item_name, products,
get_components(items)))
else: # part
item_cost = int(item_info[2].strip())
products.add_item(item_id, Part(item_id, item_name, item_cost))
fid.close()
def get_components(items):
"""Return a list of pairs of IDs and numbers in items.
get_components(list(str)) -> list((str, int))
"""
components = []
for item in items:
item = item.strip()
itemid, _, itemnumstr = item.partition(':')
itemid = itemid.strip()
itemnumstr = itemnumstr.strip()
components.append((itemid, int(itemnumstr)))
return components
def save_items_to_file(products, filename):
"""Save the items in products to the given file.
save_items_to_file(Products, str) -> None
"""
f = open(filename, 'w')
keys = products.get_keys()
for key in keys:
f.write("{0}\n".format(repr(products.get_item(key))))
f.close()
def items_string(items_list):
"""Convert a list of Id, number pairs into a string representation.
items_string(list((str, int))) -> str
"""
result = []
for itemid, num in items_list:
result.append("{0}:{1}".format(itemid, num))
return ','.join(result)
####################################################################
#
# Insert your code below
#
####################################################################
class Item(object):
"""takes a strings representing the item ID and name of the item"""
def __init__(self,itemID,name):
"""Constructor: Item(str,str)
"""
self._itemID=itemID
self._name=name
self._depend=None
def get_name(self):
"""returns the name of the item
get_name()->str
"""
return self._name
def get_ID(self):
"""returns the ID of the item
get_ID()->str
"""
return self._itemID
def set_name(self,name):
"""updates the name of the item
set_name(name)->None
"""
self._name=name
def get_depend(self):
"""returns the empty list
get_depend()->list
"""
return []
class Part(Item):
"""takes a strings representing the item ID and name of the item as well as the cost of the part"""
def __init__(self,itemID,name,cost):
"""Constructor: Part(str,str,int)
"""
Item.__init__(self,itemID,name)
self._cost=cost
def get_cost(self):
"""returns the cost of the item
get_cost()->int
"""
return self._cost
def set_cost(self,cost):
"""updates the cost of the item
set_cost(int)->None
"""
self._cost=cost
def __repr__(self):
"""return a string representation of the item(used to write the item data to a file)
"""
return '{0},{1},{2}'.format(self._itemID,self._name,self._cost)
def __str__(self):
"""return a more detailed representation of the item(used to write the item data to the listbox)
"""
return PART_FORMAT.format(self._itemID,self._name,self._cost)
class Compound(Item):
"""takes a strings representing the item ID and name of the item, the products and a list of
pairs of IDs and numbers representing the components of the compound item
"""
def __init__(self,itemID,name,products,itemlist):
"""Constructor: Compound(str,str,str,list((str,int)))
"""
Item.__init__(self,itemID,name)
self._products=products
self._itemlist=itemlist
def get_cost(self):
"""returns the cost of the item
get_cost()->int
"""
self.get_items_list()
cost=0
if self._itemlist is not None:
for ID in self._itemlist:
for key in self._products.get_keys():
if ID[0]==key:
cost +=int(ID[1])*int(self._products.get_item(ID[0]).get_cost())
self._cost=cost
return self._cost
def get_items_list(self):
"""returns the items list
get_items_list()->list((str,int))
"""
if self._itemlist==None:
return 'None'
if ':' in self._itemlist:
self._itemlist=get_components(self._itemlist.split(','))
return self._itemlist
else:
return self._itemlist
def get_items_str(self):
"""returns a string representation items list
get_items_str()->str
"""
if self._itemlist==None:
return 'None'
if ':' not in self._itemlist:
self._itemlist=items_string(self._itemlist)
return self._itemlist
else:
return self._itemlist
def set_items(self,itemlist):
"""updates the items list
set_items(list((str,int))->None
"""
if ':' in itemlist:
itemlist=itemlist.split(',')
self._itemlist=get_components(itemlist)
else:
self._itemlist=itemlist
def get_depend(self):
"""overrides the method in the super class and returns the list of all the item IDs in the items list
get_depend()->list(str)
"""
self._depend=[]
self.set_items(self._itemlist)
for c in self._itemlist:
self._depend.append(c[0])
return self._depend
def __repr__(self):
"""return a string representation of the item(used to write the item data to a file)
"""
return '{0},{1},{2}'.format(self._itemID,self._name,self.get_items_str())
def __str__(self):
"""return a more detailed representation of the item (used to write the item data to the listbox)
"""
return COMPOUND_FORMAT.format(self._itemID,self._name,self.get_cost(),self.get_items_str())
class Products():
"""This is the model for the program and is used to keep track of all the items
using a dictionary whose keys are item IDs and whose values are item objects.
"""
def __init__(self,dic=None):
"""Constructor: Products()
"""
if dic is None:
dic={}
self._dic=dic
def load_items(self, filename):
"""loads the items from a file
load_items(str)->None
"""
self.delete_all()
load_items_from_file(self, filename)
def save_items(self, filename):
"""saves the items to a file
save_items(str)->None
"""
save_items_to_file(self, filename)
def get_item(self,itemID):
"""returns the item for a given item ID
get_item(str)->list(str)
"""
for c in self._dic.keys():
if c==itemID:
return self._dic[c]
def add_item(self,itemID,value):
"""adds a new item to the dictionary
add_item(str,Parts)->None
"""
self._itemID=itemID
self._value=value
self._dic[itemID]=value
def get_keys(self):
"""returns all the keys in the dictionary in sorted order
get_keys()->list(str)
"""
a=self._dic.keys()
a.sort()
return a
def remove_item(self,itemID):
"""removes a given item from the dictionary
remove_item(str)->None
"""
for c in self._dic.keys():
if c==itemID:
del self._dic[c]
def delete_all(self):
"""resets the dictionary to be empty
delete_all()->None
"""
self._dic.clear()
def check_depend(self,itemID):
"""checks if any item in the dictionary depends on the item with the given ID
check_depend(str)->bool
"""
for key in self.get_keys():
a=self.get_item(key).get_depend()
if itemID in a:
return True
return False
class View(Listbox):
"""This class provides the view of the item information list and should inherit form the listbox class
"""
def __init__(self,master,font=('Courier',10)):
"""Constructor: View(Listbox)
"""
Listbox.__init__(self,master,font=('Courier',10))
def update(self, products):
"""delete all the items in the view, and then (re)display them
update(list(str))->None
"""
self.delete(0,END)
for ID in products.get_keys():
self.insert(END,str(products.get_item(ID)))
class Controller(object):
"""This class is responsible for creating all the GUI components and interacting with the user
"""
def __init__(self,master):
"""Constructor:Controller(object)
"""
self._master = master
self._master.title("Bikes R Us: Products")
self._frame1=Frame(self._master)
self._frame1.pack(side=TOP,expand=True,fill=BOTH)
self._frame2=Frame(self._master)
self._frame2.pack(side=BOTTOM,padx=10, pady=5)
self._frame3=Frame(self._master)
self._frame3.pack(side=BOTTOM,padx=10, pady=5)
self._products=Products()
self._var=StringVar()
self._command=''
self._ID=''
"""three menu items in file menu
"""
menubar = Menu(self._master)
master.config(menu=menubar)
filemenu = Menu(menubar)
menubar.add_cascade(label="File", menu=filemenu)
filemenu.add_command(label="Open Products File", command=self.open_file)
filemenu.add_command(label="Save Products File", command=self.save_file)
filemenu.add_command(label="Exit", command=self.close)
"""Listbox - display the required item information in alphabetic order on item IDs
"""
self._Listbox=View(self._frame1)
self._Listbox.pack(side=TOP, expand=True, fill=BOTH, pady=20)
"""Entry Widget - all other interactions require data to be entered/modified.
"""
"""Label"""
label=Label(self._frame2, textvariable=self._var)
label.pack(side=LEFT,padx=10, pady=5)
"""Entry widget"""
self._entry=Entry(self._frame2,width=80)
self._entry.pack(side=LEFT,padx=10, pady=5)
"""OK button"""
button=Button(self._frame2, text="OK", command=self.ok)
button.pack(side=LEFT,padx=10, pady=5)
"""Buttons"""
button1=Button(self._frame3, text="Add Part", command=self.add_part)
button1.pack(side=LEFT,padx=10, pady=5)
button2=Button(self._frame3, text="Add Compound", command=self.add_compound)
button2.pack(side=LEFT,padx=10, pady=5)
button3=Button(self._frame3, text="Update Name",command=self.update_name)
button3.pack(side=LEFT,padx=10, pady=5)
button4=Button(self._frame3, text="Update Cost", command=self.update_cost)
button4.pack(side=LEFT,padx=10, pady=5)
button5=Button(self._frame3, text="Update Items", command=self.update_items)
button5.pack(side=LEFT,padx=10, pady=5)
button6=Button(self._frame3, text="Remove Item", command=self.remove_item)
button6.pack(side=LEFT,padx=10, pady=5)
def open_file(self):
"""open a le containing items information using the askopenfilename widget
"""
self._filename = tkFileDialog.askopenfilename()
if self._filename:
self._products.load_items(self._filename)
self._Listbox.delete(0,END)
for key in self._products.get_keys():
self._Listbox.insert(END,str(self._products.get_item(key)))
def save_file(self):
"""save all the current items information using the asksaveasfilename widget
"""
self._filename = tkFileDialog.asksaveasfilename()
if self._filename:
self._products.save_items(self._filename)
def close(self):
"""exit the program
"""
self._master.destroy()
"""OK Button"""
def ok(self):
"""Add Part Button
"""
if self._command=='add_part':
if self._entry.get() =='':
tkMessageBox.showerror("Add Part Error", "ID cannot be empty")
else:
if self._entry.get() in self._products.get_keys():
tkMessageBox.showerror("Add Part Error", "ID already exists")
else:
self._products.add_item(self._entry.get(),Part(self._entry.get(),'No Name',0))
"""Add Compound Button
"""
if self._command=='add_compound':
if self._entry.get() =='':
tkMessageBox.showerror("Add Compound Error", "ID cannot be empty")
else:
if self._entry.get() in self._products.get_keys():
tkMessageBox.showerror("Add Compound Error", "ID already exists")
else:
self._products.add_item(self._entry.get(),Compound(self._entry.get(),'No Name',self._products,None))
"""Update Name Button
"""
if self._command=='update_name':
if self.select_item().isdigit()==False:
tkMessageBox.showerror("Selection Error", "No item selected")
else:
self._ID=self._products.get_keys()[int(self._selection)]
self._products.get_item(self._ID).set_name(self._entry.get())
"""Update Cost Button
"""
if self._command=='update_cost':
if self.select_item().isdigit()==False:
tkMessageBox.showerror("Selection Error", "No item selected")
else:
if self._entry.get().isdigit()==False:
tkMessageBox.showerror("Update cost", "Cost must be a number")
else:
"""Test you select a Compound or not"""
if isinstance(self._products.get_item(self._ID), Compound):
tkMessageBox.showerror("Part Error", "This item is not a part")
else:
self._ID=self._products.get_keys()[int(self._selection)]
self._products.get_item(self._ID).set_cost(self._entry.get())
"""Update Item Button
"""
"""Test itemlist which you enter is valid or not """
if self._command=='update_item':
try:
itemlist=self._entry.get()
get_components(itemlist.split(','))
except:
tkMessageBox.showerror("Compound Item", "Invalid item list")
else:
"""Test you select a Part or not"""
if isinstance(self._products.get_item(self._ID), Part):
tkMessageBox.showerror("Commpound Error", "This item is not a compound")
else:
"""Test the itemID in itemlist which you enter exist in PartID"""
partID=[]
a=0
for key in self._products.get_keys():
if isinstance(self._products.get_item(key), Part):
partID.append(key)
for item in get_components(itemlist.split(',')):
if item[0] not in partID:
tkMessageBox.showerror("Compound Item", "Invalid item list")
a=1
if a==0:
self._ID=self._products.get_keys()[int(self._selection)]
self._products.get_item(self._ID).set_items(self._entry.get())
self._Listbox.update(self._products)
self._command=''
self._var.set('')
self._entry.delete(0,END)
def add_part(self):
"""The "Add Part" button is used when the user wishes to add a new part
"""
self._var.set('Add Part ID:')
self._command='add_part'
def add_compound(self):
"""The "Add Part" button is used when the user wishes to add a new compound
"""
self._var.set('Add Compound ID:')
self._command='add_compound'
def update_name(self):
"""The "Update Name" button is used to edit the name of the selected item
"""
self._entry.delete(0,END)
"""Test you select a item from the listbox or not"""
if self.select_item().isdigit()==False:
tkMessageBox.showerror("Selection Error", "No item selected")
else:
self._var.set('Update Name:')
self._ID=self._products.get_keys()[int(self._selection)]
self._entry.insert(0,self._products.get_item(self._ID).get_name())
self._command='update_name'
def update_cost(self):
"""The "Update Cost" button is used to edit the cost of the selected part(not used for compound items)
"""
self._entry.delete(0,END)
"""Test you select a item from the listbox or not"""
if self.select_item().isdigit()==False:
tkMessageBox.showerror("Selection Error", "No item selected")
else:
self._ID=self._products.get_keys()[int(self._selection)]
if isinstance(self._products.get_item(self._ID), Compound):
tkMessageBox.showerror("Part Error", "This item is not a part")
else:
self._var.set('Update Cost:')
self._entry.insert(0,self._products.get_item(self._ID).get_cost())
self._command='update_cost'
def update_items(self):
"""The "Update Items" button is used to edit the items that make up the the selected compound item
"""
self._entry.delete(0,END)
"""Test you select a item from the listbox or not"""
if self.select_item().isdigit()==False:
tkMessageBox.showerror("Selection Error", "No item selected")
else:
self._ID=self._products.get_keys()[int(self._selection)]
if isinstance(self._products.get_item(self._ID), Compound):
self._var.set('Update Compound Items:')
self._entry.insert(0,self._products.get_item(self._ID).get_items_str())
else:
tkMessageBox.showerror("Commpound Error", "This item is not a compound")
self._var.set('')
self._entry.delete(0,END)
self._command='update_item'
def remove_item(self):
"""The "Remove Item" button removes the selected item
"""
self._var.set('')
self._entry.delete(0,END)
"""Test you select a item from the listbox or not"""
if self.select_item().isdigit()==False:
tkMessageBox.showerror("Selection Error", "No item selected")
else:
self._ID=self._products.get_keys()[int(self._selection)]
if self._products.check_depend(self._ID)==1:
tkMessageBox.showerror("Remove Error", "At least one compound item refers to this item")
else:
self._products.remove_item(self._ID)
self._Listbox.update(self._products)
self._selection=''
def select_item(self):
"""select one item which you want to edit
"""
try:
self._Listbox.curselection()[0]
except:
self._selection=''
return self._selection
else:
self._selection=self._Listbox.curselection()[0]
return self._selection
####################################################################
#
# WARNING: Leave the following code at the end of your code
#
# DO NOT CHANGE ANYTHING BELOW
#
####################################################################
class StoreApp():
def __init__(self, master=None):
master.title("Bikes R Us: Products")
self.controller = Controller(master)
def main():
root = Tk()
app = StoreApp(root)
root.mainloop()
if __name__ == '__main__':
main()
|
# Generated by Django 2.2 on 2020-05-18 22:02
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hotel', '0005_auto_20200518_2202'),
]
operations = [
migrations.RemoveField(
model_name='room',
name='last_modification',
),
migrations.AddField(
model_name='room',
name='last_modified',
field=models.DateTimeField(default=datetime.datetime(2020, 5, 18, 22, 2, 57, 713471)),
),
migrations.AlterField(
model_name='booking',
name='end_datetime',
field=models.DateTimeField(default=datetime.datetime(2020, 5, 18, 22, 2, 57, 713915)),
),
migrations.AlterField(
model_name='booking',
name='start_datetime',
field=models.DateTimeField(default=datetime.datetime(2020, 5, 18, 22, 2, 57, 713895)),
),
migrations.AlterField(
model_name='checkin',
name='end_datetime',
field=models.DateTimeField(default=datetime.datetime(2020, 5, 18, 22, 2, 57, 714378)),
),
migrations.AlterField(
model_name='checkin',
name='start_datetime',
field=models.DateTimeField(default=datetime.datetime(2020, 5, 18, 22, 2, 57, 714357)),
),
]
|
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import cv2
from flask import Flask, Response
import time
import threading
import multiprocessing
import urllib
import json
camMult = 2
flaskServer = Flask(__name__)
camera = cv2.VideoCapture(0)
img = None
def video_thread(camera):
while True:
global img
ret, img = camera.read()
def start_flask_server():
camThread = threading.Thread(target=video_thread, args=(camera,))
camThread.setDaemon(True)
camThread.start()
flaskServer.run(host='0.0.0.0', port="5510")
@flaskServer.route('/')
def index():
return "Camera Server Active."
def generate_frame():
while True:
time.sleep(0.02)
# ret, jpg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), 50])
ret, jpg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), 50])
frame = jpg.tobytes()
yield (b'--frame\r\n'
b'Content-Type:image/jpeg\r\n'
b'Content-Length: ' + f"{len(frame)}".encode() + b'\r\n'
b'\r\n' + frame + b'\r\n')
@flaskServer.route('/status')
def status_check():
global img
if img is not None:
return Response(status=200)
else:
return Response(status=503)
@flaskServer.route('/stream.mjpg')
def video_feed():
return Response(generate_frame(), mimetype='multipart/x-mixed-replace; boundary=frame')
class videoThread(QThread):
changePixmap = pyqtSignal(QImage)
def __init__(self,_,address):
super(videoThread,self).__init__()
self.address = address
def run(self):
cap = cv2.VideoCapture(self.address)
while True:
ret, frame = cap.read()
if ret:
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h, w, ch = rgbImage.shape
bytesPerLine = ch * w
convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertToQtFormat.scaled(640*camMult, 480*camMult, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class CameraDisplay(QWidget):
def __init__(self):
super().__init__()
@pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def initUI(self, cameraIp, statusText = None):
self.statusText = statusText
self.setWindowTitle("AI Camera Stream")
self.resize(1800, 1200)
# create a label
self.label = QLabel(self)
# self.label.move(0,0)
self.label.resize(640*camMult, 480*camMult)
self.label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.label.setAlignment(Qt.AlignCenter)
self.layout = QGridLayout()
self.layout.addWidget(self.label, 0, 0)
self.setLayout(self.layout)
th = videoThread(self, cameraIp + "/video_feed")
th.changePixmap.connect(self.setImage)
th.start()
self.show()
def closeEvent(self, event):
if self.statusText:
self.statusText.setText("VIDEO DISPLAY: EXITED")
self.statusText.setStyleSheet("color: red")
event.accept()
class ControlPanel(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("Control Panel")
self.setMinimumWidth(250)
self.titleText = QLabel("MaskPass Control Pannel")
self.serverIp = QLineEdit("http://mc.ai1to1.com:5000")
self.cameraToggle = QPushButton("Start Camera Server")
self.cameraToggle.clicked.connect(self.toggleCamera)
self.cameraStatus = QLabel("CAMERA: OFFLINE")
self.cameraStatus.setStyleSheet("color: red")
self.aiToggle = QPushButton("Send Start Command to AI Server")
self.aiToggle.clicked.connect(self.toggleAi)
self.aiStatus = QLabel("AI SERVER: OFFLINE")
self.aiStatus.setStyleSheet("color: red")
self.arduinoToggle = QPushButton("Start Arduino Service")
self.arduinoToggle.clicked.connect(self.toggleArduino)
self.arduinoStatus = QLabel("ARDUINO SERVICE: OFFLINE")
self.arduinoStatus.setStyleSheet("color: red")
self.videoToggle = QPushButton("Start Video Service")
self.videoToggle.clicked.connect(self.toggleVideo)
self.videoStatus = QLabel("VIDEO DISPLAY: OFFLINE")
self.videoStatus.setStyleSheet("color: red")
self.stopServerToggle = QPushButton("Stop AI Server")
self.stopServerToggle.clicked.connect(self.stopServer)
self.stopServerToggle.setStyleSheet("background-color: red")
self.exitToggle = QPushButton("EXIT")
self.exitToggle.clicked.connect(self.toggleExit)
self.exitToggle.setStyleSheet("background-color: red")
self.layout = QVBoxLayout()
self.layout.addWidget(self.titleText)
self.layout.addWidget(self.serverIp)
self.layout.addWidget(self.cameraToggle)
self.layout.addWidget(self.cameraStatus)
self.layout.addWidget(self.aiToggle)
self.layout.addWidget(self.aiStatus)
self.layout.addWidget(self.arduinoToggle)
self.layout.addWidget(self.arduinoStatus)
self.layout.addWidget(self.videoToggle)
self.layout.addWidget(self.videoStatus)
self.layout.addWidget(self.stopServerToggle)
self.layout.addWidget(self.exitToggle)
self.setLayout(self.layout)
self.show()
def toggleCamera(self):
self.cameraStatus.setText("CAMERA: LOADING")
self.cameraStatus.setStyleSheet("color: orange")
self.repaint()
try:
flaskThread = multiprocessing.Process(target=start_flask_server)
flaskThread.start()
while True:
try:
with urllib.request.urlopen("http://localhost:5510/status") as url:
print(url)
if url.status == 200:
break
except Exception as e:
print(e)
self.cameraStatus.setText("CAMERA: ONLINE")
self.cameraStatus.setStyleSheet("color: green")
except Exception as e:
print(e)
self.cameraStatus.setText("CAMERA: FAILED")
self.cameraStatus.setStyleSheet("color: red")
def toggleAi(self):
self.aiStatus.setText("AI SERVER: LOADING")
self.aiStatus.setStyleSheet("color: orange")
self.repaint()
try:
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
req = urllib.request.Request(self.serverIp.text() + "/start_ai")
req.add_header('Content-Type', 'application/json; charset=utf-8')
jsondata = json.dumps({'camera': "http://" + external_ip + ":5510" + "/stream.mjpg"})
jsondataasbytes = jsondata.encode('utf-8') # needs to be bytes
req.add_header('Content-Length', len(jsondataasbytes))
with urllib.request.urlopen(req, jsondataasbytes) as url:
if url.status != 200:
raise Exception(url.status)
self.aiStatus.setText("AI SERVER: ONLINE")
self.aiStatus.setStyleSheet("color: green")
except Exception as e:
print(e)
self.aiStatus.setText("AI SERVER: FAILED")
self.aiStatus.setStyleSheet("color: red")
def toggleArduino(self):
self.arduinoStatus.setText("ARDUINO SERVICE: LOADING")
self.arduinoStatus.setStyleSheet("color: orange")
self.repaint()
arduinoThread = threading.Thread(target=arduinoHandler, args=(self.serverIp.text(),))
arduinoThread.setDaemon(True)
arduinoThread.start()
self.arduinoStatus.setText("ARDUINO SERVICE: ONLINE")
self.arduinoStatus.setStyleSheet("color: green")
def toggleVideo(self):
self.videoStatus.setText("VIDEO DISPLAY: LOADING")
self.videoStatus.setStyleSheet("color: orange")
self.repaint()
self.cameraService = CameraDisplay()
self.cameraService.initUI(self.serverIp.text(), statusText = self.videoStatus)
self.cameraService.show()
self.videoStatus.setText("VIDEO DISPLAY: ONLINE")
self.videoStatus.setStyleSheet("color: green")
def stopServer(self):
with urllib.request.urlopen(self.serverIp.text() + "/stop") as url:
pass
self.aiStatus.setText("AI SERVER: STOPPED")
self.aiStatus.setStyleSheet("color: red")
def toggleExit(self):
global app
del app
time.sleep(1)
sys.exit(0)
# FILL IN THE CODE HERE!
def arduino_open_door():
print("DOOR OPEN")
time.sleep(5) # remove this. this is to emulate a door opening
pass
def arduino_close_door():
print("DOOR CLOSE")
time.sleep(5) # remove this. this is to emulate a door closing
pass
def arduinoHandler(serverIp):
while True:
try:
time.sleep(1)
with urllib.request.urlopen(serverIp + "/open_door") as url:
if url.status == 200:
res = url.read().decode('utf-8')
if res == "True":
arduino_open_door()
time.sleep(5)
arduino_close_door()
else:
pass
else:
raise Exception(url.status)
except Exception as e:
print(e)
if __name__ == '__main__':
import sys
global app
app = QApplication(sys.argv)
runApp = ControlPanel()
runApp.show()
sys.exit(app.exec_())
|
import torch
import torch.nn as nn
from torch import hub
# PCA_PARAMS = "https://github.com/harritaylor/torchvggish/" \
# "releases/download/v0.1/vggish_pca_params-970ea276.pth"
class VGG(nn.Module):
def __init__(self, features, postprocess):
super(VGG, self).__init__()
self.postprocess = postprocess
self.features = features
self.embeddings = nn.Sequential(
nn.Linear(512 * 4 * 6, 4096),
nn.ReLU(True),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Linear(4096, 128),
nn.ReLU(True),
)
if postprocess:
self.pproc = Postprocessor()
def forward(self, x):
x = self.features(x)
# Transpose the output from features to
# remain compatible with vggish embeddings
x = torch.transpose(x, 1, 3)
x = torch.transpose(x, 1, 2)
x = x.contiguous()
x = x.view(x.size(0), -1)
x = self.embeddings(x)
x = self.pproc.postprocess(x) if self.postprocess else x
return x
class Postprocessor(object):
"""Post-processes VGGish embeddings. Returns a torch.Tensor instead of a
numpy array in order to preserve the gradient.
"The initial release of AudioSet included 128-D VGGish embeddings for each
segment of AudioSet. These released embeddings were produced by applying
a PCA transformation (technically, a whitening transform is included as well)
and 8-bit quantization to the raw embedding output from VGGish, in order to
stay compatible with the YouTube-8M project which provides visual embeddings
in the same format for a large set of YouTube videos. This class implements
the same PCA (with whitening) and quantization transformations."
"""
def __init__(self):
"""Constructs a postprocessor."""
params = hub.load_state_dict_from_url(PCA_PARAMS)
self._pca_matrix = torch.as_tensor(params["pca_eigen_vectors"]).float()
self._pca_means = torch.as_tensor(params["pca_means"].reshape(-1, 1)).float()
def postprocess(self, embeddings_batch):
"""Applies tensor postprocessing to a batch of embeddings.
Args:
embeddings_batch: An tensor of shape [batch_size, embedding_size]
containing output from the embedding layer of VGGish.
Returns:
A tensor of the same shape as the input, containing the PCA-transformed,
quantized, and clipped version of the input.
"""
pca_applied = torch.mm(
self._pca_matrix, (embeddings_batch.t() - self._pca_means)
).t()
clipped_embeddings = torch.clamp(pca_applied, -2.0, +2.0)
quantized_embeddings = torch.round(
(clipped_embeddings - -2.0) * (255.0 / (+2.0 - -2.0))
)
return torch.squeeze(quantized_embeddings)
def make_layers():
layers = []
in_channels = 1
for v in [64, "M", 128, "M", 256, 256, "M", 512, 512, "M"]:
if v == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def _vgg(postprocess=False):
return VGG(make_layers(), postprocess)
def vggish(state_dict, postprocess=False):
"""
VGGish is a PyTorch port of Tensorflow's VGGish architecture
used to create embeddings for Audioset. It produces a 128-d
embedding of a 96ms slice of audio.
"""
model = _vgg(postprocess)
model.load_state_dict(torch.load(state_dict))
return model
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given a 2d grid map of '1's (land) and '0's (water), count the number of islands.
# An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically.
# You may assume all four edges of the grid are all surrounded by water.
#
# Example 1:
# 11110
# 11010
# 11000
# 00000
# Answer: 1
#
# Example 2:
# 11000
# 11000
# 00100
# 00011
# Answer: 3
class Solution(object):
# def numIslands(self, grid):
# """
# :type grid: List[List[str]]
# :rtype: int
# """
# if not grid:
# return 0
#
# def dfs(row ,col, i, j):
# if i < 0 or j < 0 or i >= row or j >= col or grid[i][j] != '1':
# return
# grid[i][j] = '0'
# dfs(row, col, i - 1, j)
# dfs(row, col, i + 1, j)
# dfs(row, col, i, j - 1)
# dfs(row, col, i, j + 1)
#
# grid = [list(r) for r in grid]
# row = len(grid)
# col = len(grid[0])
# count = 0
# for i in range(row):
# for j in range(col):
# if grid[i][j] == '1':
# dfs(row, col, i, j)
# count += 1
# return count
# This way is failed in Python3.x, but in Python2.x it's worked.
# def numIslands(self, grid):
# grid = [list(r) for r in grid]
# def sink(i, j):
# if 0 <= i < len(grid) and 0 <= j < len(grid[i]) and grid[i][j] == '1':
# grid[i][j] = '0'
# map(sink, (i + 1, i - 1, i, i), (j, j, j + 1, j - 1))
# return 1
# return 0
# return [sink(i, j) for i in range(len(grid)) for j in range(len(grid[i]))]
# map() is different between the Python3.x and Python2.x.
# In Python3.x map() will return the object like <map object at 0x10f4923c8>.
# And it does not executed until it is used.
# In Python2.x map() will return the list like [0, 0, 0, 0], it was executed directly.
# This way is worked in Python3.x.
def numIslands(self, grid):
grid = [list(r) for r in grid]
def sink(i, j):
if 0 <= i < len(grid) and 0 <= j < len(grid[i]) and grid[i][j] == '1':
grid[i][j] = '0'
list(map(sink, (i + 1, i - 1, i, i), (j, j, j + 1, j - 1)))
return 1
return 0
return sum(sink(i, j) for i in range(len(grid)) for j in range(len(grid[i])))
if __name__ == '__main__':
# print(Solution().numIslands(["11110","11010","11000","00000"]))
# print(Solution().numIslands(["11010","11010","11000","00001"]))
print(Solution().numIslands(["111","010","111"]))
# 47 / 47 test cases passed.
# Status: Accepted
# Runtime: 102 ms
# Your runtime beats 82.81 % of python submissions.
def numIslands(self, grid):
def sink(i, j):
if 0 <= i < len(grid) and 0 <= j < len(grid[i]) and grid[i][j] == '1':
grid[i][j] = '0'
map(sink, (i+1, i-1, i, i), (j, j, j+1, j-1))
return 1
return 0
return sum(sink(i, j) for i in range(len(grid)) for j in range(len(grid[i])))
class Solution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if not grid or not grid[0]:
return 0
root = []
m, n = len(grid), len(grid[0])
res = 0
grid = [list(r) for r in grid]
for i in range(m):
for j in range(n):
if grid[i][j] == '1':
# print(i, j)
print(grid)
flag = True
if i > 0 and grid[i - 1][j] != '0':
flag = False
grid[i][j] = root[grid[i - 1][j]]
if j > 0 and grid[i][j - 1] != '0':
if flag:
grid[i][j] = root[grid[i][j - 1]]
else:
root[grid[i][j - 1]] = grid[i][j]
flag = False
if flag:
print(i, j )
grid[i][j] = res
root.append(res)
res += 1
return sum(root[i] == i for i in range(len(root)))
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate, login
from django.contrib import messages
# Create your views here.
def user_login(request):
if request.user.is_authenticated():
messages.info('Ya has iniciado sesion')
return HttpResponseRedirect('/')
if request.POST:
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/')
return render(request, 'registration/login.html')
def home(request):
return render(request, 'users/home.html')
|
from django.http import HttpResponse, Http404, HttpResponseRedirect
from ChitChat.models import Message
from django.shortcuts import render
from ChitChat.forms import CreateForm, SearchForm
from django.db.models.base import ObjectDoesNotExist
#Routes Views based on GET, POST, PUT, or DELETE
#Views Specified must be set to the correct Action in KWARGS
def router(request, *args, **kwargs):
get_view = kwargs.pop('GET',None)
post_view = kwargs.pop('POST',None)
put_view = kwargs.pop('PUT',None)
delete_view = kwargs.pop('DELETE',None)
if request.method == 'GET' and get_view is not None:
return get_view(request, *args, **kwargs)
elif request.method == 'POST' and post_view is not None:
return post_view(request, *args, **kwargs)
elif request.method == 'PUT' and put_view is not None:
return put_view(request, *args, **kwargs)
elif request.method == 'DELETE' and delete_view is not None:
return delete_view(request, *args, **kwargs)
raise Http404
def message_get(request, *args, **kwargs):
try:
msg = Message.objects.get(id=int(args[0]))
try:
kids = Message.objects.filter(parent_id=msg.id)
except ObjectDoesNotExist:
kids = None
try:
parent = Message.objects.get(id=msg.parent_id)
except ObjectDoesNotExist:
parent = None
return render(request,'singleMessage.html',{'message':msg,'children':kids,'parent':parent})
except ObjectDoesNotExist:
return HttpResponse("No Message Found")
def message_getall(request):
try:
all_msg = Message.objects.all()[:8] #limit to 20
except:
all_msg = ({'user':'-','text':'N/A','id':'-1'},)
return render(request,'allMessages.html',{'messages':all_msg})
def message_search(request,*args,**kwargs):
if request.method == 'GET':
try:
search_msg = Message.objects.filter(text__icontains=args[0])
except:
search_msg = ({'user':'-','text':'N/A','id':'-1'},)
elif request.method == 'POST':
try:
search_msg = Message.objects.filter(text__icontains=request.POST.get('search'))
except:
search_msg = ({'user':'-','text':'N/A','id':'-1'},)
else:
return Http404
return render(request,'allMessages.html',{'messages':search_msg})
def message_post(request, *args, **kwargs):
#Post a new Message (via POST)
assert request.method == 'POST'
form = CreateForm(request.POST)
if form.is_valid:
nm = request.POST.get("user")
ms = request.POST.get("message")
new_msg = Message(user=nm,text=ms)
if (request.POST.get("parent")):
try:
new_msg.parent = Message.objects.get(id=int(request.POST.get("parent")))
new_msg.save()
return HttpResponseRedirect('/msg/'+str(new_msg.id)+'/')
except ObjectDoesNotExist:
return HttpResponse("Invalid Parent ID!")
new_msg.save()
return HttpResponseRedirect('/msg/')
else:
raise Http404
def new_message(request, *args, **kwargs):
assert request.method == 'GET'
form = CreateForm()
if args:
if int(args[0]) > -1:
#Check that the message object exists for that ID and set the return form to that.
try:
msg_test = Message.objects.get(id=int(args[0]))
if msg_test:
form = CreateForm(initial={'parent':args[0]})
except Message.DoesNotExist:
pass
return render(request,'newMessage.html',{'form':form})
def search_message(request):
assert request.method == 'GET'
form = SearchForm()
return render(request,'searchMessage.html',{'form':form})
def show_single(request, *args, **kwargs):
assert request.method == 'GET'
try:
mess = Message.objects.get(id = int(args[0]))
except Message.DoesNotExist:
return Http404
return render(request,'message.html',{'msg':mess},content_type='text/plain')
|
from django.contrib import admin
from .models import Post #whenever you are making models, need to register here
# adds posts section into admin page
admin.site.register(Post)
|
# 15-112, Summer 1, Homework 1.2
######################################
# Full name: Joyce Moon
# Andrew ID: seojinm
# Section: B
######################################
######### IMPORTANT NOTE #############
# You are not allowed to import any modules, or use loops, strings, lists, or recursion.
# Given an integer n, return the ones-digit of n,
# i.e. the first digit of n from the right.
def onesDigit(n):
return abs(n)%10
# Given an integer n, return the tens-digit of n,
# i.e. the 2nd digit of n from the right.
def tensDigit(n):
return (abs(n)//10)%10
# Given an integer n and k, return the (k+1)'th digit of n from the right.
# So k = 0 refers to the ones-digit, k = 1 refers to the tens-digit, etc.
# You can assume k is non-negative.
def kthDigit(n, k):
return (abs(n)//10**k)%10
# Given integers n, k and d, replace the kthDigit of n with d.
# You can assume k is non-negative, and d is an integer between 0 and 9.
def setKthDigit(n, k, d):
if n<0:
return n-(d-kthDigit(n,k))*(10**k)
return n+(d-kthDigit(n,k))*(10**k)
# Given as input four int or float values representing the (x,y)
# coordinates of two points, return the distance between those points.
def distance(x1, y1, x2, y2):
return ((abs(x2-x1)**2)+(abs(y2-y1)**2))**0.5
# Given an integer n, round it to the nearest integer.
# You are not allowed to use the built-in round function.
def myRound(n):
if n%1==0:
return n
elif (n>0 and (abs(n)%1<0.5)) or (n<0 and (abs(n)%1>0.5)):
return (n//1)
elif (n>0 and (abs(n)%1>=0.5)) or (n<0 and (abs(n)%1<=0.5)):
return (n//1)+1
# Given an integer n, round it down to the nearest integer.
# You are not allowed to use anything from the math module.
def floor(n):
return n//1
# Given an integer n, round it to the nearest odd integer.
# In case of a tie, round down.
def nearestOdd(n):
if n%2==0:
return n-1
else:
return 2*(n//2)+1
# See here for the description of the function:
# https://www.cs.cmu.edu/~112/notes/hw1.html
def nearestBusStop(street):
if street%8<=4:
return 8*(street//8)
else:
return 8*(street//8+1)
# If you have written the functions correctly, you should not get any errors
# when you run this file, i.e., you should pass all the tests.
######################################################################
# ignore_rest: The autograder will ignore all code below here
######################################################################
import math
def almostEqual(d1, d2, epsilon=10**-7):
return abs(d1 - d2) < epsilon
def testOnesDigit():
print("Testing onesDigit()...", end="")
assert(onesDigit(0) == 0)
assert(onesDigit(789) == 9)
assert(onesDigit(7) == 7)
assert(onesDigit(-1234) == 4)
assert(onesDigit(-3) == 3)
print("Passed.")
def testTensDigit():
print("Testing tensDigit()...", end="")
assert(tensDigit(0) == 0)
assert(tensDigit(1) == 0)
assert(tensDigit(10) == 1)
assert(tensDigit(21) == 2)
assert(tensDigit(-1234) == 3)
assert(tensDigit(-3) == 0)
assert(tensDigit(-10) == 1)
print("Passed.")
def testKthDigit():
print("Testing kthDigit()...", end="")
assert(kthDigit(0,0) == 0)
assert(kthDigit(789, 0) == 9)
assert(kthDigit(789, 1) == 8)
assert(kthDigit(789, 2) == 7)
assert(kthDigit(789, 3) == 0)
assert(kthDigit(-1234, 3) == 1)
assert(kthDigit(-3, 1) == 0)
print("Passed.")
def testSetKthDigit():
print("Testing setKthDigit()...", end="")
assert(setKthDigit(468, 0, 1) == 461)
assert(setKthDigit(468, 1, 1) == 418)
assert(setKthDigit(468, 2, 1) == 168)
assert(setKthDigit(468, 3, 1) == 1468)
print("Passed.")
def testDistance():
print("Testing distance()...", end="")
assert(distance(0, 0, 0, 0) == 0)
assert(distance(0, 0, 100, 0) == 100)
assert(almostEqual(distance(1.1, 2.2, 3.3, -4.4), 6.957010852370434))
print("Passed.")
def testMyRound():
print("Testing myRound()...", end="")
assert(myRound(0) == 0)
assert(myRound(1) == 1)
assert(myRound(-1) == -1)
assert(myRound(1.1) == 1)
assert(myRound(1.5) == 2)
assert(myRound(1.9) == 2)
assert(myRound(-1.1) == -1)
assert(myRound(-1.5) == -1)
assert(myRound(-1.9) == -2)
assert(myRound(0.1) == 0)
assert(myRound(0.5) == 1)
assert(myRound(0.9) == 1)
assert(myRound(-0.1) == 0)
assert(myRound(-0.5) == 0)
assert(myRound(-0.9) == -1)
print("Passed.")
def testFloor():
print("Testing floor()...", end="")
assert(floor(0) == math.floor(0))
assert(floor(1) == math.floor(1))
assert(floor(-1) == math.floor(-1))
assert(floor(1.1) == math.floor(1.1))
assert(floor(1.5) == math.floor(1.5))
assert(floor(1.9) == math.floor(1.9))
assert(floor(-1.1) == math.floor(-1.1))
assert(floor(-1.5) == math.floor(-1.5))
assert(floor(-1.9) == math.floor(-1.9))
assert(floor(0.1) == math.floor(0.1))
assert(floor(0.5) == math.floor(0.5))
assert(floor(0.9) == math.floor(0.9))
assert(floor(-0.1) == math.floor(-0.1))
assert(floor(-0.5) == math.floor(-0.5))
assert(floor(-0.9) == math.floor(-0.9))
print("Passed.")
def testNearestOdd():
print("Testing nearestOdd()...", end="")
assert(nearestOdd(0) == -1)
assert(nearestOdd(13) == 13)
assert(nearestOdd(12.001) == 13)
assert(nearestOdd(12) == 11)
assert(nearestOdd(11.999) == 11)
assert(nearestOdd(-13) == -13)
assert(nearestOdd(-12.001) == -13)
assert(nearestOdd(-12) == -13)
assert(nearestOdd(-11.999) == -11)
print("Passed.")
def testNearestBusStop():
print("Testing nearestBusStop()...", end="")
assert(nearestBusStop(0) == 0)
assert(nearestBusStop(4) == 0)
assert(nearestBusStop(5) == 8)
assert(nearestBusStop(12) == 8)
assert(nearestBusStop(13) == 16)
assert(nearestBusStop(20) == 16)
assert(nearestBusStop(21) == 24)
print("Passed.")
def testAll():
testOnesDigit()
testTensDigit()
testKthDigit()
testSetKthDigit()
testDistance()
testMyRound()
testFloor()
testNearestOdd()
testNearestBusStop()
testAll()
|
import define
import requests
def rsi(pricedata , index):
up = 0
down = 0
for i in range (define.rsinumber-1):
if (pricedata[(index+2+i)%define.rsinumber] - pricedata[(index + i +1) % define.rsinumber]) > 0 :
up = up + (pricedata[(index+2+i)%define.rsinumber] - pricedata[(index + i +1) % define.rsinumber])
else :
down = down - (pricedata[(index+2+i)%define.rsinumber] - pricedata[(index + i +1) % define.rsinumber])
rsi = 100 - 100 /(1+ (up / down))
return rsi
def update_data(symbol ,nextcall,data , timestamp , nextcallbig , databig , nextcallsmall , datasmall):
x52= 2 / 27
x24= 2 / 13
x18= 2 / 10
if(timestamp > nextcallsmall) and 0:
i = 0
while i == 0:
try:
result =requests.get('https://fapi.binance.com/fapi/v1/klines',{'symbol' : symbol , 'interval' : define.intervalsmall , 'limit' : 2}).json()
i = 1
except Exception as e:
print('connection error')
print(e)
i = 0
datasmall[define.smallbigmadata][datasmall[define.smallbigmaindex]] = float(result[0][4])
datasmall[define.smallbigmaindex] = (datasmall[define.smallbigmaindex] +1) % define.smallbigmacount
datasmall[define.smallsmallmadata][datasmall[define.smallsmallmaindex]] = float(result[0][4])
datasmall[define.smallsmallmaindex] = (datasmall[define.smallsmallmaindex] +1) % define.smallsmallmacount
datasmall[define.smallsmallma][0] = datasmall[define.smallsmallma][1]
datasmall[define.smallsmallma][1] = datasmall[define.smallsmallma][2]
datasmall[define.smallsmallma][2] = datasmall[define.smallsmallma][3]
datasmall[define.smallsmallma][3] = sum(datasmall[define.smallsmallmadata]) / define.smallsmallmacount
datasmall[define.smallsmallmaramp] = (datasmall[define.smallsmallma][3]-datasmall[define.smallsmallma][2]) * 3 +(datasmall[define.smallsmallma][3] - datasmall[define.smallsmallma][1]) + (datasmall[define.smallsmallma][3] - datasmall[define.smallsmallma][0])/3
datasmall[define.smallbigma][0] = datasmall[define.smallbigma][1]
datasmall[define.smallbigma][1] = datasmall[define.smallbigma][2]
datasmall[define.smallbigma][2] = datasmall[define.smallbigma][3]
datasmall[define.smallbigma][3] = sum(datasmall[define.smallbigmadata]) / define.smallbigmacount
datasmall[define.smallbigmaramp] = (datasmall[define.smallbigma][3]-datasmall[define.smallbigma][2]) * 3 +(datasmall[define.smallbigma][3] - datasmall[define.smallbigma][1]) + (datasmall[define.smallbigma][3] - datasmall[define.smallbigma][0])/3
nextcallsmall = result[1][6] +1
print('small data updated')
if(timestamp > nextcall):
i = 0
while i == 0:
try:
result =requests.get('https://fapi.binance.com/fapi/v1/klines',{'symbol' : symbol , 'interval' : define.interval , 'limit' : 2}).json()
i = 1
except Exception as e:
print('connection error')
print(e)
i = 0
data[define.ema52] = data[define.ema52]*(1-x52) + float(result[0][4])*x52
data[define.ema24] = data[define.ema24]*(1-x24) + float(result[0][4])*x24
data[define.signal18] = data[define.signal18]*(1-x18) + (data[define.ema24]-data[define.ema52])*x18
data[define.macd][0] = data[define.macd][1]
data[define.macd][1] = data[define.macd][2]
data[define.macd][2] = data[define.macd][3]
data[define.macd][3] = data[define.ema24]-data[define.ema52]
data[define.lastramp] = data[define.ramp]
data[define.price] = float(result[0][4])
data[define.ramp] = (data[define.macd][3]-data[define.macd][2])*3 + (data[define.macd][3]-data[define.macd][1]) + (data[define.macd][3]-data[define.macd][0])/3
data[define.hostogramhistory][0] = data[define.hostogramhistory][1]
data[define.hostogramhistory][1] = data[define.hostogramhistory][2]
data[define.hostogramhistory][2] = data[define.hostogramhistory][3]
data[define.hostogramhistory][3] = data[define.ema24] - data[define.ema52] - data[define.signal18]
data[define.pricedata][data[define.priceindex]] = float(result[0][4])
data[define.priceindex] = (data[define.priceindex] + 1)%define.rsinumber
data[define.rsi] = rsi(data[define.pricedata] , data[define.priceindex])
data[define.bigmadata][data[define.bigmaindex]] = float(result[i][4])
data[define.bigmaindex] = (data[define.bigmaindex] +1) % define.bigmacount
data[define.smallmadata][data[define.smallmaindex]] = float(result[i][4])
data[define.smallmaindex] = (data[define.smallmaindex] +1) % define.smallmacount
data[define.smallma] = sum( data[define.smallmadata]) / define.smallmacount
data[define.bigma] = sum( data[define.bigmadata]) / define.bigmacount
nextcall = result[1][6] +1
nextcallsmall = result[1][6] +1
print('data updated')
if(timestamp > nextcallbig):
i = 0
while i == 0:
try:
result =requests.get('https://fapi.binance.com/fapi/v1/klines',{'symbol' : symbol , 'interval' : define.intervalbig , 'limit' : 2}).json()
i = 1
except Exception as e:
print('connection error')
print(e)
i = 0
databig[define.bigema52] = databig[define.bigema52]*(1-x52) + float(result[0][4])*x52
databig[define.bigema24] = databig[define.bigema24]*(1-x24) + float(result[0][4])*x24
databig[define.bigsignal18] = databig[define.bigsignal18]*(1-x18) + (databig[define.bigema24] - databig[define.bigema52])*x18
databig[define.bigtwolasthistogram] =databig[define.biglasthistogram]
databig[define.biglasthistogram] = databig[define.bighistogram]
databig[define.bighistogram] = databig[define.bigema24] - databig[define.bigema52] - databig[define.bigsignal18]
databig[define.bigmacddata][0] =databig[define.bigmacddata][1]
databig[define.bigmacddata][1] =databig[define.bigmacddata][2]
databig[define.bigmacddata][2] =databig[define.bigmacddata][3]
databig[define.bigmacddata][3] = databig[define.bigema24] - databig[define.bigema52]
databig[define.bigmacdramp] = (databig[define.bigmacddata][3] - databig[define.bigmacddata][2]) * 3 + (databig[define.bigmacddata][3] - databig[define.bigmacddata][1]) + (databig[define.bigmacddata][3] - databig[define.bigmacddata][0]) / 3
databig[define.bigsignaldata][0] = databig[define.bigsignaldata][1]
databig[define.bigsignaldata][1] = databig[define.bigsignaldata][2]
databig[define.bigsignaldata][2] = databig[define.bigsignaldata][3]
databig[define.bigsignaldata][3] = databig[define.bigsignal18]
databig[define.bigsignalramp] = (databig[define.bigsignaldata][3] - databig[define.bigsignaldata][2]) * 3 + (databig[define.bigsignaldata][3] - databig[define.bigsignaldata][1]) + (databig[define.bigsignaldata][3] - databig[define.bigsignaldata][0]) / 3
nextcallbig = result[1][6] +1
print('big data updated')
return [data , nextcall , databig , nextcallbig , datasmall , nextcallsmall]
|
#from numpy.linalg import matrix_rank as mrank
from nuclear_alignment import nuclear_alignment
import transmission
import Message
import numpy as np
import sys
## Parameters
num_nodes = 16
min_eps = .1
num_msgs = 2
#
eps_vec = np.random.uniform(low=min_eps,size=num_nodes)
print 'Number of receivers(nodes): ', num_nodes
print
print 'Erasures are: ', eps_vec
# generate random messages. Each row i is desired by receiver i
message_matrix = np.random.random_integers(1, 255, [num_nodes, num_msgs])
Messages = Message.generate_messages(message_matrix)
# store final received messages. goal is to "de-NaN" by the end
final_messages = np.empty((num_nodes, num_msgs))
## Round Robin to start
msg_vec = Messages[:,0] # first round of messages
sym_vec = np.empty(num_nodes, dtype=object)
for i in range(num_nodes):
sym_vec[i] = Message.Symbol([1], [msg_vec[i]])
print
print 'Round Robin symbols are: ', sym_vec
R = transmission.transmit_symbols(sym_vec, eps_vec)
#print
#print 'Recieved symbols are: ', R
# if a desired message is received, move it to final_messages
for i in range(num_nodes):
for sym in R[i]:
if sym.msg_inds[0][0] == i: # if message is intended for receiver i
final_messages[i,sym.msg_inds[0][1]] = sym.val
Antidotes = transmission.update_antidotes(R)
print
print 'Antidotes are: ', Antidotes #-1 denotes node is out of Antidote set
Interferers, interf_map = transmission.compute_interferers(Antidotes)
print
print 'Remaining receivers are: ', interf_map
print 'Interference indicator sets are'
print Interferers
#### for quick test, should be replaced with symbol/message data structures
K = len(interf_map)
W = np.matrix(np.zeros((K,1)))
A = np.matrix(np.zeros((K, K)))
I = (Interferers - 1)*-1
I[np.diag_indices_from(I)] = 0
for index,node in enumerate(interf_map):
W[index,0] = sym_vec[node].val
for jindex,i in enumerate(I[index,:]):
if i == 1:
A[index,jindex] = sym_vec[interf_map[jindex]].val
## nuclear norm approx
num_transmissions = 2*K # int(K/2) + 1
print
print 'num transmissions is ', num_transmissions
V, U = nuclear_alignment(Interferers, num_transmissions, .1, 1)
print
print 'U.T*V is:'
print U.T*V
print
for k in range(K):
W_dec = ((U.T*V)[k,:]*W - (U.T*V)[k,:]*A[k,:].T)/(U.T*V)[k,k]
print 'Message %d truth: %d decoded: %.2f' % (k, W[k], W_dec)
|
import asyncio
import logging
import re
import threading
import time
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from multiprocessing import current_process
import pyppeteer
import requests
from pyppeteer import errors, launch
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("pyppeteer").setLevel(logging.ERROR)
logging.getLogger("websockets").setLevel(logging.ERROR)
log = logging.getLogger(__name__)
is_url_regex = re.compile(
r"^(?:http|ftp)s?://" # http:// or https://
r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|" # domain...
r"localhost|" # localhost...
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" # ...or ip
r"(?::\d+)?" # optional port
r"(?:/?|[/?]\S+)$",
re.IGNORECASE,
)
class RequestResult:
url = ""
parent = ""
duration = ""
response_code = ""
response_reason = ""
response_size = None
response_content_type = ""
links = []
async def _obtain_resources(url: str, parent_url: str, user_agent: str):
links = set()
start = time.time()
headers = {"User-Agent": user_agent}
page = requests.get(url, verify=False, timeout=60, headers=headers)
response_code = page.status_code
response_reason = page.reason
response_size = len(page.content)
response_content_type = (
page.headers["content-type"] if "content-type" in page.headers else "unknown"
)
duration = time.time() - start
async def request_callback(request):
links.add(request.url)
await request.continue_()
browser = None
if "content-type" in page.headers and "text/html" in page.headers["content-type"]:
try:
browser = await pyppeteer.launch({"headless": True})
py_page = await browser.newPage()
py_page.on("request", request_callback)
await py_page.setUserAgent(user_agent)
await py_page.setRequestInterception(True)
await py_page.goto(url)
# Select all non-empty links.
a_href_elems = await py_page.querySelectorAllEval(
"a", "(nodes => nodes.map(n => n.href))"
)
for href in a_href_elems:
if re.match(is_url_regex, href) is not None:
links.add(href)
finally:
if browser:
await browser.close()
result = {
"url": url,
"parent_url": parent_url,
"duration": duration,
"response_code": response_code,
"response_reason": response_reason,
"response_size": response_size,
"response_content_type": response_content_type,
"links": links,
"process_name": current_process().name,
}
return result
def get_links(url, parent, user_agent) -> RequestResult:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return asyncio.get_event_loop().run_until_complete(
_obtain_resources(url, parent, user_agent)
)
|
from django.shortcuts import render, redirect
from django.contrib.auth import logout, login, authenticate
from django.views import View
from authentication.forms import SignUpForm, LoginForm
from subreddit.models import Subreddit
from django.contrib import messages
from subreddit.helper import random_subreddits, subreddit_search
from post.models import Post
if Subreddit.objects.all():
search_subreddits = Subreddit.objects.all()
class IndexView(View):
def get(self, request):
subreddits = Subreddit.objects.all()
subreddit_filter = subreddit_search(request)
subreddits = random_subreddits()
search_subreddits = Subreddit.objects.all()
is_home = True
posts = []
if request.user.is_authenticated:
user_subreddits = Subreddit.objects.filter(members=request.user)
for sub in user_subreddits:
posts.extend(
list(Post.objects.filter(
subreddit=sub).order_by(
'created_at').reverse()))
else:
post_list = list(Post.objects.all())
post_list = sorted(post_list, key = lambda i: 0 if i.getPopularity() == 0 else -1 / i.getPopularity())
posts = post_list
context = {
'subreddits': subreddits,
'search_subreddits': search_subreddits,
'subreddit_filter': subreddit_filter,
'posts': posts,
'is_home': is_home
}
return render(
request, 'main.html', context)
class SignUpView(View):
def get(self, request):
form = SignUpForm()
subreddit_filter = subreddit_search(request)
context = {
'form': form,
'subreddit_filter': subreddit_filter,
'search_subreddits': search_subreddits
}
return render(
request, 'authentication/signupform.html', context
)
def post(self, request):
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
user = authenticate(
request, username=username, password=password)
if user:
login(request, user)
return redirect('index')
context = {'form': form}
return render(
request, 'authentication/signupform.html', context)
class LoginView(View):
def get(self, request):
form = LoginForm()
title = 'Login'
subreddit_filter = subreddit_search(request)
context = {
'form': form,
'title': title,
'subreddit_filter': subreddit_filter,
'search_subreddits': search_subreddits
}
return render(
request, 'authentication/generic_form.html', context
)
def post(self, request):
form = LoginForm(request.POST)
title = 'Login'
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(
request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('index')
else:
messages.error(request, 'Username or Password is incorrect.')
context = {'form': form, 'title': title}
return render(
request, 'authentication/generic_form.html', context)
context = {'form': form}
return render(
request, 'authentication/index.html', context)
class LogoutView(View):
def get(self, request):
logout(request)
return redirect('/')
def error_500_view(request):
return render(request, '500.html')
def error_404_view(request, exception):
return render(request, '404.html')
|
from exceptions.resource_not_found import ResourceNotFound
from entities.erequest import Erequest
from daos.erequest_dao import ErequestDAO
from services.erequest_service import ErequestService
from entities.manager import Manager
from entities.employee import Employee
from daos.employee_dao import EmployeeDAO
from services.employee_service import EmployeeService
class ErequestServiceImpl(ErequestService):
def __init__(self, erequest_dao: ErequestDAO):
self.erequest_dao = erequest_dao
def get_all_requests_by_eid(self, employee_id: int) -> [Erequest]:
return self.erequest_dao.get_all_requests_by_eid(employee_id)
def create_request(self, erequest: Erequest) -> Erequest:
return self.erequest_dao.create_request(erequest)
def get_all_requests(self) -> [Erequest]:
return self.erequest_dao.get_all_requests()
def update_request(self, erequest_id: int, rstatus: str, message: str) -> Erequest:
erequest = self.erequest_dao.get_request_by_rid(erequest_id)
return self.erequest_dao.update_request(erequest, rstatus, message)
# for statistics
def get_report_for_all(self) -> [dict]:
return self.erequest_dao.get_report_for_all()
|
import numpy as np
score_title=np.dtype({'names':['name','chinese','math','english'],'formats':['S32','i', 'i', 'i']})
score=np.array([('zhangfei', 68,65,30),('guanyu',95,76,98),('liubei',98,86,88),('dianwei',90,88,77),
('xuchu',80,90,90)], dtype=score_title)
chineses=score[:]['chinese']
maths=score[:]['math']
englishes=score[:]['english']
total=score[:]['chinese']+score[:]['math']+score[:]['english']
print('语文平均成绩:', np.mean(chineses))
print('数学平均成绩:', np.mean(maths))
print('英语平均成绩:', np.mean(englishes))
print('语文最小成绩:', np.amin(chineses))
print('数学最小成绩:', np.amin(maths))
print('英语最小成绩:', np.amin(englishes))
print('语文最大成绩:', np.amax(chineses))
print('数学最大成绩:', np.amax(maths))
print('英语最大成绩:', np.amax(englishes))
print('语文方差:', np.var(chineses))
print('数学方差:', np.var(maths))
print('英语方差:', np.var(englishes))
print('语文标准差:', np.std(chineses))
print('数学标准差:', np.std(maths))
print('英语标准差:', np.std(englishes))
print('总成绩排序:', np.sort(total))
|
import copy
import pytest
import torch
from common_utils import assert_equal
from torchvision.models.detection import _utils, backbone_utils
from torchvision.models.detection.transform import GeneralizedRCNNTransform
class TestModelsDetectionUtils:
def test_balanced_positive_negative_sampler(self):
sampler = _utils.BalancedPositiveNegativeSampler(4, 0.25)
# keep all 6 negatives first, then add 3 positives, last two are ignore
matched_idxs = [torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, -1, -1])]
pos, neg = sampler(matched_idxs)
# we know the number of elements that should be sampled for the positive (1)
# and the negative (3), and their location. Let's make sure that they are
# there
assert pos[0].sum() == 1
assert pos[0][6:9].sum() == 1
assert neg[0].sum() == 3
assert neg[0][0:6].sum() == 3
def test_box_linear_coder(self):
box_coder = _utils.BoxLinearCoder(normalize_by_size=True)
# Generate a random 10x4 boxes tensor, with coordinates < 50.
boxes = torch.rand(10, 4) * 50
boxes.clamp_(min=1.0) # tiny boxes cause numerical instability in box regression
boxes[:, 2:] += boxes[:, :2]
proposals = torch.tensor([0, 0, 101, 101] * 10).reshape(10, 4).float()
rel_codes = box_coder.encode(boxes, proposals)
pred_boxes = box_coder.decode(rel_codes, boxes)
torch.allclose(proposals, pred_boxes)
@pytest.mark.parametrize("train_layers, exp_froz_params", [(0, 53), (1, 43), (2, 24), (3, 11), (4, 1), (5, 0)])
def test_resnet_fpn_backbone_frozen_layers(self, train_layers, exp_froz_params):
# we know how many initial layers and parameters of the network should
# be frozen for each trainable_backbone_layers parameter value
# i.e. all 53 params are frozen if trainable_backbone_layers=0
# ad first 24 params are frozen if trainable_backbone_layers=2
model = backbone_utils.resnet_fpn_backbone("resnet50", weights=None, trainable_layers=train_layers)
# boolean list that is true if the param at that index is frozen
is_frozen = [not parameter.requires_grad for _, parameter in model.named_parameters()]
# check that expected initial number of layers are frozen
assert all(is_frozen[:exp_froz_params])
def test_validate_resnet_inputs_detection(self):
# default number of backbone layers to train
ret = backbone_utils._validate_trainable_layers(
is_trained=True, trainable_backbone_layers=None, max_value=5, default_value=3
)
assert ret == 3
# can't go beyond 5
with pytest.raises(ValueError, match=r"Trainable backbone layers should be in the range"):
ret = backbone_utils._validate_trainable_layers(
is_trained=True, trainable_backbone_layers=6, max_value=5, default_value=3
)
# if not trained, should use all trainable layers and warn
with pytest.warns(UserWarning):
ret = backbone_utils._validate_trainable_layers(
is_trained=False, trainable_backbone_layers=0, max_value=5, default_value=3
)
assert ret == 5
def test_transform_copy_targets(self):
transform = GeneralizedRCNNTransform(300, 500, torch.zeros(3), torch.ones(3))
image = [torch.rand(3, 200, 300), torch.rand(3, 200, 200)]
targets = [{"boxes": torch.rand(3, 4)}, {"boxes": torch.rand(2, 4)}]
targets_copy = copy.deepcopy(targets)
out = transform(image, targets) # noqa: F841
assert_equal(targets[0]["boxes"], targets_copy[0]["boxes"])
assert_equal(targets[1]["boxes"], targets_copy[1]["boxes"])
def test_not_float_normalize(self):
transform = GeneralizedRCNNTransform(300, 500, torch.zeros(3), torch.ones(3))
image = [torch.randint(0, 255, (3, 200, 300), dtype=torch.uint8)]
targets = [{"boxes": torch.rand(3, 4)}]
with pytest.raises(TypeError):
out = transform(image, targets) # noqa: F841
if __name__ == "__main__":
pytest.main([__file__])
|
from sys import stdin
line = stdin.readline ().strip ()
|
import scrape
import share
def main():
scrape.download_posts('dankmemes')
share.messenger()
if __name__ == '__main__':
main()
|
"""Unit test for treadmill.appcfg.configure.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import shutil
import sys
import tempfile
import unittest
import mock
import treadmill
from treadmill.appcfg import configure as app_cfg
from treadmill.apptrace import events
class AppCfgConfigureTest(unittest.TestCase):
"""Tests for teadmill.appcfg.configure"""
def setUp(self):
# Access protected module _base_service
# pylint: disable=W0212
self.root = tempfile.mkdtemp()
self.tm_env = mock.Mock(
apps_dir=os.path.join(self.root, 'apps'),
cleanup_dir=os.path.join(self.root, 'cleanup'),
running_tombstone_dir=os.path.join(self.root, 'tombstones',
'running')
)
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@unittest.skipUnless(sys.platform.startswith('linux'), 'Requires Linux')
@mock.patch('pwd.getpwnam', mock.Mock(auto_spec=True))
@mock.patch('shutil.copyfile', mock.Mock(auto_spec=True))
@mock.patch('treadmill.appcfg.manifest.load', auto_spec=True)
@mock.patch('treadmill.appevents.post', mock.Mock(auto_spec=True))
@mock.patch('treadmill.fs.write_safe', mock.mock_open())
@mock.patch('treadmill.subproc.get_aliases', mock.Mock(return_value={}))
@mock.patch('treadmill.subproc.resolve', mock.Mock(return_value='mock'))
@mock.patch('treadmill.supervisor.create_service', auto_spec=True)
@mock.patch('treadmill.utils.rootdir',
mock.Mock(return_value='/treadmill'))
def test_configure_linux(self, mock_create_svc, mock_load):
"""Tests that appcfg.configure creates necessary s6 layout."""
manifest = {
'proid': 'foo',
'environment': 'dev',
'shared_network': False,
'cpu': '100',
'memory': '100M',
'disk': '100G',
'services': [
{
'name': 'web_server',
'command': '/bin/true',
'restart': {
'limit': 5,
'interval': 60,
},
},
],
'environ': [
{
'name': 'Hello',
'value': 'World!',
},
],
'zookeeper': 'foo',
'cell': 'cell',
'system_services': [],
'endpoints': [
{
'name': 'http',
'port': '8000',
},
],
'name': 'proid.myapp#0',
'uniqueid': 'AAAAA',
}
mock_load.return_value = manifest
app_unique_name = 'proid.myapp-0-00000000AAAAA'
app_dir = os.path.join(self.root, 'apps', app_unique_name)
mock_create_svc.return_value.data_dir = os.path.join(app_dir, 'data')
app_cfg.configure(self.tm_env, '/some/event', 'linux')
mock_load.assert_called_with('/some/event')
mock_create_svc.assert_called_with(
self.tm_env.apps_dir,
name=app_unique_name,
app_run_script=mock.ANY,
downed=False,
monitor_policy={
'limit': 0,
'interval': 60,
'tombstone': {
'uds': False,
'path': self.tm_env.running_tombstone_dir,
'id': 'proid.myapp#0'
}
},
userid='root',
environ={},
environment='dev'
)
treadmill.fs.write_safe.assert_called_with(
os.path.join(app_dir, 'data', 'app.json'),
mock.ANY,
mode='w',
permission=0o644
)
shutil.copyfile.assert_called_with(
'/some/event',
os.path.join(app_dir, 'data', 'manifest.yml')
)
treadmill.appevents.post.assert_called_with(
mock.ANY,
events.ConfiguredTraceEvent(
instanceid='proid.myapp#0',
uniqueid='AAAAA',
payload=None
)
)
@unittest.skipUnless(sys.platform.startswith('linux'), 'Requires Linux')
@mock.patch('pwd.getpwnam', mock.Mock(auto_spec=True))
@mock.patch('shutil.copyfile', mock.Mock(auto_spec=True))
@mock.patch('shutil.rmtree', mock.Mock())
@mock.patch('treadmill.appcfg.manifest.load', auto_spec=True)
@mock.patch('treadmill.appevents.post', mock.Mock(auto_spec=True))
@mock.patch('treadmill.fs.write_safe', mock.mock_open())
@mock.patch('treadmill.subproc.get_aliases', mock.Mock(return_value={}))
@mock.patch('treadmill.subproc.resolve', mock.Mock(return_value='mock'))
@mock.patch('treadmill.supervisor.create_service', auto_spec=True)
@mock.patch('treadmill.utils.rootdir',
mock.Mock(return_value='/treadmill'))
def test_configure_linux_event_rm(self, mock_create_svc, mock_load):
"""Tests when event file is removed when copied."""
manifest = {
'proid': 'foo',
'environment': 'dev',
'shared_network': False,
'cpu': '100',
'memory': '100M',
'disk': '100G',
'services': [
{
'name': 'web_server',
'command': '/bin/true',
'restart': {
'limit': 5,
'interval': 60,
},
},
],
'system_services': [],
'endpoints': [
{
'name': 'http',
'port': '8000',
},
],
'environ': [
{
'name': 'Hello',
'value': 'World!',
},
],
'cell': 'cell',
'zookeeper': 'foo',
'name': 'proid.myapp#0',
'uniqueid': 'AAAAA',
}
mock_load.return_value = manifest
app_unique_name = 'proid.myapp-0-00000000AAAAA'
app_dir = os.path.join(self.root, 'apps', app_unique_name)
mock_create_svc.return_value.directory = app_dir
mock_create_svc.return_value.data_dir = os.path.join(app_dir, 'data')
shutil.copyfile.side_effect = IOError(2, 'No such file or directory')
app_cfg.configure(self.tm_env, '/some/event', 'linux')
mock_load.assert_called_with('/some/event')
mock_create_svc.assert_called_with(
self.tm_env.apps_dir,
name=app_unique_name,
app_run_script=mock.ANY,
downed=False,
monitor_policy={
'limit': 0,
'interval': 60,
'tombstone': {
'uds': False,
'path': self.tm_env.running_tombstone_dir,
'id': 'proid.myapp#0'
}
},
userid='root',
environ={},
environment='dev'
)
shutil.copyfile.assert_called_with(
'/some/event',
os.path.join(app_dir, 'data', 'manifest.yml')
)
treadmill.fs.write_safe.assert_not_called()
shutil.rmtree.assert_called_with(app_dir)
treadmill.appevents.post.assert_not_called()
if __name__ == '__main__':
unittest.main()
|
def is_posititve(number):
if number > 0:
return True
def sum_divisors(number):
divisors = 0
for divisor in range(1, number):
if number % divisor == 0:
divisors += divisor
return divisors
def perfect_number(number):
if is_posititve(number) and sum_divisors(number) == number:
return 'We have a perfect number!'
else:
return "It's not so perfect."
number = int(input())
print(perfect_number(number))
|
import torch.nn as nn
import torch
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.nn_layers = nn.Sequential(
# ========================================================== #
# fully connected layer
# Can stack number of layers you want
# Note that the first layer's in_features need to match to data's dim.
# And out_features need to match to label's dim
nn.Linear(in_features=223, out_features=512),
nn.LeakyReLU(),
nn.Dropout(0.2),
nn.Linear(in_features=512, out_features=512),
nn.LeakyReLU(),
nn.Dropout(0.2),
nn.Linear(in_features=512, out_features=1024),
nn.LeakyReLU(),
nn.Dropout(0.2),
nn.Linear(in_features=1024, out_features=20),
# ========================================================== #
)
def forward(self, x):
# data fit into model, no need to rewrite
x = self.nn_layers(x)
return x
|
# -*- coding: utf-8 -*-
"""These settings overrides what's in settings/general.py
"""
from . import general
# 扩展中间件,主要加载系统配置和账户信息
MIDDLEWARE_CLASSES = (
# Use GAE ndb
'google.appengine.ext.ndb.django_middleware.NdbDjangoMiddleware',
) + general.MIDDLEWARE_CLASSES + (
'custom.middleware.sysconf.AppOptionsMiddleware',
'custom.middleware.account.AccountMiddleware',
)
# 模板上下文处理器
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.core.context_processors.tz',
'custom.context_processors.options',
'custom.context_processors.author',
)
# To extend installed apps settings
INSTALLED_APPS = general.INSTALLED_APPS + ('base', 'admin')
# Recipients of traceback emails and other notifications.
ADMINS = (
('Zivee', 'xu@zivee.cn'),
)
MANAGERS = ADMINS
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Use siged cookie
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
# Use Memcached on Google App Engine
CACHES = {
'default': {
'BACKEND': 'custom.cache.GaeMemcache'
}
}
# SECURITY WARNING: don't run with debug turned on in production!
# Debugging displays nice error messages, but leaks memory. Set this to False
# on all server instances and True only for development.
DEBUG = TEMPLATE_DEBUG = True
# Is this a development instance? Set this to True on development/master
# instances and False on stage/prod.
DEV = True
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
OAUTH2CONFIGS = {
'google':
{
"web":
{
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"client_secret": "nICk7kupYpusNgXsQraZm9py",
"token_uri": "https://accounts.google.com/o/oauth2/token",
"client_email": "812484405187@developer.gserviceaccount.com",
"redirect_uris": [
"http://www.zivee.tk/admin/oauth2/google/"
],
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/812484405187-flmqjeilq50rg055a3lhn1g0u47utu6p@developer.gserviceaccount.com",
"client_id": "812484405187.apps.googleusercontent.com",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"javascript_origins": [
"http://www.zivee.tk/"
]
},
"scope": ['https://www.googleapis.com/auth/plus.me',
'https://www.googleapis.com/auth/userinfo.profile']
}
}
OAUTH2CONFIGS = {
'google':
{
"web":
{
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"client_secret": "9rhPKz9hOvWg0kAy9NjHggXQ",
"token_uri": "https://accounts.google.com/o/oauth2/token",
"client_email": "812484405187-flmqjeilq50rg055a3lhn1g0u47utu6p@developer.gserviceaccount.com",
"redirect_uris": [
"http://localhost:8080/admin/oauth2/google/"
],
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/812484405187-flmqjeilq50rg055a3lhn1g0u47utu6p@developer.gserviceaccount.com",
"client_id": "812484405187-flmqjeilq50rg055a3lhn1g0u47utu6p.apps.googleusercontent.com",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"javascript_origins": [
"http://localhost:8080"
]
},
"scope": ['https://www.googleapis.com/auth/plus.me',
'https://www.googleapis.com/auth/userinfo.profile']
}
}
|
# https://www.geeksforgeeks.org/convert-csv-to-json-using-python/
import csv
import json
def make_json(csvFilePath, jsonFilePath):
jsonArray = []
# open a cv reader called DictReaer
with open(csvFilePath, encoding='utf-8') as csvf:
csvReader = csv.DictReader(csvf)
#convert each csv row into python dict
for row in csvReader:
obj = {
"step": int(row['Step']),
"duration": int(row['Duration']),
"r": {
"freq": float(row['R freq']),
"note": row['R note']
},
"g": {
"freq": float(row['G freq']),
"note": row['G note']
},
"b": {
"freq": float(row['B freq']),
"note": row['B note']
}
}
#add this python dict to json array
jsonArray.append(obj)
# Open a json writer, and use the json.dumps()
# function to dump data
with open(jsonFilePath, 'w', encoding='utf-8') as jsonf:
jsonf.write(json.dumps(jsonArray, indent=4))
# Driver Code
csvFilePath = r'HomeFreqvalues02.csv'
jsonFilePath = r'home-freqs.json'
make_json(csvFilePath, jsonFilePath)
|
from django.conf.urls import url
from pruebas import views
urlpatterns = [
url(
r'^models/(?P<datatxt_id>[A-Za-z0-9\-]+)/test/$',
views.model_test,
name='test'
),
url(
r'^models/(?P<datatxt_id>[A-Za-z0-9\-]+)/results/$',
views.ClassifierModelList.as_view(),
name='test-results'
),
url(
r'^document-group/(?P<doc_pk>[0-9\-]+)/test/$',
views.BaseDocumentTestList.as_view(),
name='document-group-test-list'
),
url(
r'^document-group/(?P<dg_pk>[0-9\-]+)/test/(?P<pk>[0-9\-]+)/$',
views.BaseDocumentTestDetails.as_view(),
name='document-group-test-details'
),
url(
r'^document-group/(?P<dg_pk>[0-9\-]+)/'
r'run-test/(?P<datatxt_id>[A-Za-z0-9\-]+)/$',
views.model_document_group,
name='document-group-run-test'
),
url(
r'^document/(?P<doc_pk>[0-9\-]+)/test/'
r'(?P<test_pk>[0-9\-]+)/$',
views.DocumentAnnotationDetails.as_view(),
name='document-test-details'
),
# model_document_group
# url(
# r'^models/(?P<datatxt_id>[A-Za-z0-9\-]+)/results/(?P<test_id>[A-Za-z0-9\-]+)/$',
# views.ClassifierModelDetail.as_view(),
# name='test-result'
# ),
]
|
import re
import unittest
import validator.utils
class EntryTest(unittest.TestCase):
def __init__(self, test_name, entry, test_class):
unittest.TestCase.__init__(self, test_name)
self.entry = entry
if 'dn' in entry:
self.dn = entry['dn'][0]
else:
self.dn = None
if 'objectClass' in entry:
self.objects = entry['objectClass']
else:
self.objects = []
self.test_class = test_class
self.schema = __import__('%s.data' %(test_class,)).data.schema
self.types = __import__('%s.types' %(test_class,)).types
def test_object_class(self):
'''Verifying the object class'''
message = "ERROR: The entry %s does not contain any object class" % (self.dn)
self.assertTrue('objectClass' in self.entry , message)
if 'objectClass' in self.entry:
status = True
message = ""
for obj in self.entry['objectClass']:
if not self.types.is_ObjectClass(obj):
message = message + validator.utils.message_generator("ERROR","E021",self.dn,"NA",obj)
status = False
self.assertTrue(status, message)
def test_mandatory_attributes(self):
"""Verifying the existence of mandatory attributes."""
status = True
message = ""
for obj in self.objects:
if obj in self.schema:
for attribute in self.schema[obj]:
if attribute == 'GLUE2GroupID' and attribute not in self.entry and 'GLUE2GroupName' not in self.entry:
message = message + validator.utils.message_generator("WARNING","W034",self.dn,attribute,"NA")
status = False
elif self.test_class != 'egi-glue2' and self.schema[obj][attribute][2] and attribute not in self.entry:
message = message + validator.utils.message_generator("WARNING","W034",self.dn,attribute,"NA")
status = False
else:
if self.schema[obj][attribute][2] == 'Mandatory' and attribute not in self.entry:
message = message + validator.utils.message_generator("WARNING","W034",self.dn,attribute,"NA")
status = False
elif self.schema[obj][attribute][2] == 'Recommended' and attribute not in self.entry:
message = message + validator.utils.message_generator("INFO","I095",self.dn,attribute,"NA")
status = False
elif self.schema[obj][attribute][2] == 'Undesirable' and attribute in self.entry:
message = message + validator.utils.message_generator("WARNING","W034",self.dn,attribute,"NA")
status = False
self.assertTrue(status, message)
def test_single_valued(self):
"""Verifying single-valued attributes only have one value."""
status = True
message = ""
for obj in self.objects:
if obj in self.schema:
for attribute in self.schema[obj]:
if self.schema[obj][attribute][1] and attribute in self.entry and len(self.entry[attribute]) > 1:
message = message + \
validator.utils.message_generator("WARNING","W036",self.dn,attribute,self.entry[attribute])
status = False
self.assertTrue(status, message)
def test_data_types(self):
"""Validating data types."""
status = True
message = ""
for obj in self.objects:
if obj in self.schema:
for attribute in self.entry:
if attribute in self.schema[obj] and attribute != "GLUE2EntityOtherInfo":
data_type = self.schema[obj][attribute][0]
for value in self.entry[attribute]:
check = getattr(self.types, 'is_' + data_type)
if not check(value):
message = message + validator.utils.message_generator\
("WARNING","W037",self.dn,attribute,self.entry[attribute],\
"Expected type is %s" % data_type)
status = False
self.assertTrue(status, message)
def test_empty_attributes(self):
"""Verifying that attributes are not empty."""
status = True
message = ""
for obj in self.objects:
if obj in self.schema:
for attribute in self.entry:
if attribute in self.schema[obj]:
for value in self.entry[attribute]:
if value == "":
message = message + validator.utils.message_generator\
("WARNING","W038",self.dn,attribute,"empty!")
self.assertTrue(status, message)
|
# 'count', 'index
a = ('math', 'history', 'bahasa indonesia', 'math')
# manampilkan total jumlah value di dalam tuple yang kita spesifikasikan
print(a.count('math'))
# mencari lokasi index dari sebuah value di tupple, parameter start dan stop adalah optional, tugasnya hampir sama
# dengan slicing, jika tidak ada value, maka akan tampil error yang menunjukkan bahwa value tidak ada
print(a.index('bahasa indonesia'))
print(a.index('bahasa indonesia', 2, 5))
print(help(tuple.index))
# tuple tidak menerima item assignment atau pergantian item, karena data structure di tuple adalah immutable,
# beda dengan list, dan dictionary yang data structure adalah mutable. jadi untuk menganti data di tuple adalah
# dengan cara reassign variabel dengan nama yang sama tapi dengan item yang berbeda
print(a)
a = ('math', 'history', 'bahasa indonesia', 'biology')
print(a)
|
from time import sleep
import config
import logging
import csv
from gevent import timeout
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver import Firefox, ActionChains
from selenium.webdriver.firefox.options import Options
import sys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
BinList = [
'450060',
'451014',
'451015',
]
# __________________________________________________
opts = Options()
opts.set_headless()
assert opts.headless
def wait_until_element_visible(position_id, position_value):
element = WebDriverWait(browser, 60).until(
EC.visibility_of_element_located((position_id, position_value)))
browser = Firefox(options=opts)
browser.maximize_window()
WebDriverWait(browser, 60).until(EC.visibility_of_all_elements_located)
browser.get(config.konnektiveAdminPannelURL)
browser.find_element_by_name('userName').send_keys(config.seleniumUserName)
browser.find_element_by_name('password').send_keys(config.seleniumPassword)
browser.find_element_by_tag_name('button').submit()
sleep(6)
Brows_URL = 'https://crm.konnektive.com/merchants/binmapping/'
browser.get(Brows_URL)
wait_until_element_visible(By.CSS_SELECTOR, '#-row-1 > td:nth-child(1)')
try:
RowsCount = len(browser.find_elements_by_tag_name('tr'))
for counter in range(RowsCount-1):
CyclePathStartRange = '#-row-' + str(counter + 1) + ' > td:nth-child(3)'
StartRangeValue = browser.find_element_by_css_selector(CyclePathStartRange).text
cardBinListCross = StartRangeValue in BinList
if cardBinListCross:
CyclePathEditButton = '#-row-' + str(counter + 1) + ' > td:nth-child(8) > span:nth-child(1)'
browser.find_element_by_css_selector(CyclePathEditButton).click()
wait_until_element_visible(By.CSS_SELECTOR, '.checkbox > label:nth-child(1) > input:nth-child(1')
browser.find_element_by_css_selector('.checkbox > label:nth-child(1) > input:nth-child(1)').click()
browser.find_element_by_css_selector('input.btn').click()
sleep(30)
browser.close()
print('Done')
except:
browser.close()
print('Done with exception')
|
import pandas as pd
import requests
import json
import os
import xml.etree.ElementTree as ET
import time
import glob
import math
stage = ""
# stage = "_dev"
genji_dir = "/Users/nakamurasatoru/git/d_genji"
hostPrefix = "https://genji.dl.itc.u-tokyo.ac.jp"
# hostPrefix = "https://utda.github.io/genji"
dir = genji_dir + "/genji/static/data"
dir2 = genji_dir + "/genji/static/data/iiif"+stage+"/org"
def create_members_map(members_map, members):
for member in members:
member_id = member["@id"]
canvas_id = member_id.split("#")[0]
if canvas_id not in members_map:
members_map[canvas_id] = []
members_map[canvas_id].append(member)
return members_map
def create_anno(canvas, members_map, odir, index, info):
canvas_id = canvas["@id"]
if canvas_id not in members_map:
return None
odir = odir + "/list"
os.makedirs(odir, exist_ok=True)
opath = odir+"/p"+str(index)+".json"
annoListUri = opath.replace(opath.split("/data/")[0], hostPrefix)
members = members_map[canvas_id]
resources = []
for i in range(len(members)):
member = members[i]
xywh = member["@id"].split("#xywh=")[1]
areas = xywh.split(",")
w = int(float(areas[2]))
h = int(float(areas[3]))
d2 = int(h / 150)
# d2 = 30
x = int(float(areas[0])) + int(w / 2)
y = int(float(areas[1]))# + int(float(areas[3]))# + w #d2 * 2
w = w / 2
if w > 100:
w = 100
y -= w / 1.14
x = int(x)
y = int(y)
member_label = member["label"]
if "metadata" not in member:
continue
# print("create_anno", "label", member_label)
if member_label == "脱文・錯簡":
# descripiton = ""
# url = ""
metadata = member["metadata"]
m_map = {}
for obj in metadata:
label = obj["label"]
value = obj["value"]
m_map[label] = value
'''
if label == "url":
url = value
else:
descripiton = value
'''
chars = m_map["Type"]+"<p>" + m_map["Text"] + "</p><p><a href=\""+hostPrefix + "/ds"+"\" target=\"_blank\" rel=\"noopener noreferrer\">脱文錯簡リスト</p>"
fill = "#FF0000"
stroke = "#FF0000"
# w = d2
d = "M"+str(int(x))+" "+str(int(y))+" l-"+str(int(w/2))+" "+str(int(w / 1.14))+" l"+str(int(w))+" 0 z"
dw = w /20
d1 = "M"+str(x - dw)+" "+str(y + w * 3/10)+" l-0 "+str(w + 2/10)+" "+str(dw * 2)+" 0 l0 -"+str(w + 2/10)+" z"
d3 = "M"+str(x - dw)+" "+str(y + w * 6.5/10)+" l-0 "+str(dw)+" "+str(dw * 2)+" 0 l0 -"+str(dw)+" z"
opa = str(0.5)
'''
<path xmlns=\"http://www.w3.org/2000/svg\" d=\""+d1 + \
"\" id=\"pin_" + "abc" + "\" fill=\"" + \
fill+"\" stroke=\""+stroke+"\"/><path xmlns=\"http://www.w3.org/2000/svg\" d=\""+d2 + \
"\" id=\"pin_" + "abc2" + "\" fill=\"" + \
fill+"\" stroke=\""+stroke+"\"/>
'''
svg = "<svg xmlns='http://www.w3.org/2000/svg'><path xmlns=\"http://www.w3.org/2000/svg\" d=\""+d + \
"\" id=\"pin_" + "abc3" + "\" fill-opacity=\""+opa+"\" fill=\"" + \
fill+"\" stroke=\""+stroke+"\"/></svg>"
y -= d2 * 6
else:
page = int(member["metadata"][0]["value"])
# https://japanknowledge.com/lib/display/?lid=80110V00200017
if "新編日本古典文学全集" in member_label:
sagaId = info["jk_front"][0:-3] + str(page).zfill(3)
chars = "新編日本古典文学全集 p."+str(page)+" 開始位置<p><a href=\"https://japanknowledge.com/lib/display/?lid=" + \
str(sagaId)+"\" target=\"_blank\" rel=\"noopener noreferrer\">ジャパンナレッジ Lib</a>でみる</p><p><a href=\"https://japanknowledge.com/psnl/display/?lid=" + \
str(sagaId)+"\" target=\"_blank\" rel=\"noopener noreferrer\">ジャパンナレッジ Personal</a>でみる</p>"
fill = "#2E89D9"
stroke = "#2E89D9"
# 新編の場合は上にずらす
y -= d2 * 6
else:
ndlId = info["ndl"].split("/")[-2]
# front 20 page 5 koui front 1
ndlPage = info["ndl_front"] + math.floor((page - info["koui_front"] + 1) / 2)
chars = "校異源氏物語 p."+str(page)+" 開始位置<p><a href=\"http://dl.ndl.go.jp/info:ndljp/pid/"+ndlId+"/"+str(
ndlPage)+"\" target=\"_blank\" rel=\"noopener noreferrer\">国立国会図書館デジタルコレクション</a>でみる</p>" # 校異源氏物語を
'''
if page == 594 and "/東大本/" in odir:
chars = "<div><b>【脱文あり】</b></div>" + chars
'''
fill = "#F3AA00"
stroke = "#f38200"
if info["vol"] >= 2 and info["vol"] <= 6 and "東大本" in opath and info["vol"] != 3:
y += 200
d = "M" + str(int(x)) + "," + str(int(y)) + "c0,-" + str(d2 * 2) + " " + str(d2) + ",-" +str(d2 * 4) + " " +str(d2 * 3) + ",-" + str(d2 * 6) + "c0,-" + str(d2 * 2) + " -" + str(d2) + ",-" + str(d2 * 3) + " -" + str(d2 * 3) + ",-" + str(d2 * 3) + "c-" + str(d2 * 2) + ",0 -" + str(d2 * 3) + "," + str(d2) + " -" + str(d2 * 3) + "," + str(d2 * 3) + "c" + str(d2 * 2) + "," + str(d2 * 2) + " " + str(d2 * 3) + "," + str(d2 * 4) + " " + str(d2 * 3) + "," + str(d2 * 6) + "z"
opa = str(0.5)
svg = "<svg xmlns='http://www.w3.org/2000/svg'><path xmlns=\"http://www.w3.org/2000/svg\" d=\""+d + \
"\" id=\"pin_" + "abc" + "\" fill-opacity=\""+opa+"\" fill=\"" + \
fill+"\" stroke=\""+stroke+"\"/></svg>"
resources.append({
"@id": annoListUri + "#" + str(i+1),
"@type": "oa:Annotation",
"motivation": "sc:painting",
"on": [
{
"@type": "oa:SpecificResource",
"full": canvas_id,
"selector": {
"@type": "oa:Choice",
"default": {
"@type": "oa:FragmentSelector",
"value": "xywh=" + xywh
},
"item": {
"@type": "oa:SvgSelector",
"value": svg
}
},
"within": {
"@id": info["manifest"],
"@type": "sc:Manifest"
}
}
],
"resource": {
"@type": "dctypes:Text",
"chars": chars,
"format": "text/html"
}
})
annoList = {
"@context": "http://iiif.io/api/presentation/2/context.json",
"@id": annoListUri,
"@type": "sc:AnnotationList",
"resources": resources
}
fw = open(opath, 'w')
json.dump(annoList, fw, ensure_ascii=False, indent=4, separators=(',', ': '))
return annoListUri
### リターン Curation
def create_manifest(selection, info):
within = selection["within"]
label = within["label"] #########################
vol = info["vol"]
odir = dir2+"/"+label + "/" + str(vol).zfill(2)
os.makedirs(odir, exist_ok=True)
opath = odir+"/manifest.json"
##########
members = selection["members"]
members_map = {}
members_map = create_members_map(members_map, members)
# print("*", label)
# 東大本の場合は、新編も(あれば)
if "東大本" in label:
try:
with open(genji_dir + "/genji_curation/docs/iiif/saga/"+str(vol).zfill(2)+".json") as f:
saga_curation = json.load(f)
members = saga_curation["selections"][0]["members"]
members_map = create_members_map(members_map, members)
except:
aaa = "bbb"
##########
manifest_uri = selection["within"]["@id"]
# print(manifest_uri)
path = "data/"+label + "/" + str(vol).zfill(2) + "/manifest.json"
if not os.path.exists(path):
time.sleep(3)
manifest_data = requests.get(manifest_uri).json()
dirname = os.path.dirname(path)
os.makedirs(dirname, exist_ok=True)
fw = open(path, 'w')
json.dump(manifest_data, fw, ensure_ascii=False, indent=4, separators=(',', ': '))
else:
with open(path) as f:
manifest_data = json.load(f)
canvases = manifest_data["sequences"][0]["canvases"]
canvases_rev = {}
for i in range(len(canvases)):
canvas = canvases[i]
canvases_rev[canvas["@id"]] = i # canvasの順番を保持
otherContentUri = create_anno(canvas, members_map, odir, i+1, info)
if otherContentUri:
canvas["otherContent"] = [
{
"@id": otherContentUri,
"@type": "sc:AnnotationList"
}
]
else:
if "otherContent" in canvas:
del canvas["otherContent"]
##########
manifest_uri = opath.replace(opath.split("/data/")[0], hostPrefix)
manifest_data["@id"] = manifest_uri
manifest_data["label"] = label
##### TOCの作成
structures = []
manifest_data["structures"] = structures
# print(members_map)
count = 1
structures_map = {}
for canvas_id in members_map:
members = members_map[canvas_id]
for member in members:
label = member["label"]
if "新編日本古典文学全" in member["label"] or "源氏物語大成" in member["label"]:
# label = member["label"]
label = label.replace("源氏物語大成", "校異源氏物語")
aaa = "bbb"
elif label == "脱文・錯簡":
# print(label)
# print(member)
aaa = "bbb"
elif "metadata" in member:
label = "校異源氏物語 p."+ str(member["metadata"][0]["value"])
else:
aaa = "bbb"
# print("***********************", label, member)
member["label"] = label #ラベルの修正
canvas_id = member["@id"].split("#xywh=")[0]
index = canvases_rev[canvas_id]
x = -int(member["@id"].split("#xywh=")[1].split(",")[0].split(".")[0])
if index not in structures_map:
structures_map[index] = {}
if x not in structures_map[index]:
structures_map[index][x] = []
structures_map[index][x].append({
"@id": member["@id"] + "/r"+str(count),
"@type": "sc:Range",
"canvases": [
canvas_id
],
"label": label #member["label"]
})
count += 1
for index in sorted(structures_map):
obj = structures_map[index]
for x in sorted(obj):
arr = obj[x]
for e in arr:
structures.append(e)
fw = open(opath, 'w')
json.dump(manifest_data, fw, ensure_ascii=False,
indent=4, separators=(',', ': '))
##########
selection["within"]["@id"] = manifest_uri
members = []
for canvas_id in members_map:
for member in members_map[canvas_id]:
# print("aaa", member)
page = -1
err = False
label = member["label"]
if "metadata" not in member:
if " p." in label:
page = label.split(".")[1]
else:
# print(member)
if label == "脱文・錯簡":
err = True
print(label)
metadata = member["metadata"]
for obj in metadata:
if obj["label"] == "p":
page = int(obj["value"])
if page == -1 and not err:
continue
if not err:
# 校異 Line IDの付与
if "新編日本古典文学全集" not in member["label"]:
member["lineId"] = "https://w3id.org/kouigenjimonogatari/data/" + \
str(page).zfill(4)+"-01.json"
# 出力用に削除
# 錯簡は残す
if "metadata" in member and not err:
# del member["metadata"]
member["metadata"] = [{
"label" : "Page",
"value" : label
}]
if "description" in member:
del member["description"]
members.append(member)
selection["members"] = members
selection["@id"] = hostPrefix + "/data/vol"+stage+"/"+str(vol).zfill(2)+"/curation.json#"+within["label"]
return selection
def create_ndl(info):
members = []
vol = info["vol"]
vol_str = str(vol).zfill(2)
tei = "https://kouigenjimonogatari.github.io/tei/"+vol_str+".xml"
response = requests.get(tei)
if response.status_code < 400:
xmlData = requests.get(tei).text
root = ET.fromstring(xmlData)
ET.register_namespace('', "http://www.tei-c.org/ns/1.0")
prefix = ".//{http://www.tei-c.org/ns/1.0}"
surfaces = root.findall(prefix+"surface")
for surface in surfaces:
graphic = surface.find(prefix+"graphic")
canvas_id = graphic.get("n")
zones = surface.findall(prefix+"zone")
for zone in zones:
x = int(zone.get("ulx"))
y = int(zone.get("uly"))
w = int(zone.get("lrx")) - x
h = int(zone.get("lry")) - y
xywh = str(x) + "," + str(y) + "," + str(w) + "," + str(h)
member_id = canvas_id+"#xywh="+xywh
zone_id = zone.get("{http://www.w3.org/XML/1998/namespace}id")
lineId = "https://w3id.org/kouigenjimonogatari/data/" + \
zone_id.split("_")[1]+"-01.json"
members.append({
"@id": member_id,
"@type": "sc:Canvas",
"label": "校異源氏物語 p." + str(int(lineId.split("/")[-1].split("-")[0])), # lineId,
"lineId": lineId
})
selection = {
"@id": hostPrefix + "/data/vol"+stage+"/"+vol_str+"/curation.json#校異源氏物語",
"@type": "sc:Range",
"label": "Manual curation by IIIF Curation Viewer",
"members": members,
"within": {
"@id": info["manifest"],
"@type": "sc:Manifest",
"label": "校異源氏物語"
}
}
return selection
def create_curations(info):
vol = info["vol"]
files = glob.glob(genji_dir + "/genji_curation/docs/iiif/fb2/"+str(vol).zfill(2)+"/*.json")
orderedSelections = {}
notOrderedSelections = []
orderedLabels = ["東大本", "九大本(古活字版)", "九大本(無跋無刊記整版本)", "湖月抄・NIJL・鵜飼文庫本"]
selections = []
for file in files:
with open(file) as f:
df = json.load(f)
selection = df["selections"][0]
if "members" not in selection:
continue
members = selection["members"]
# すべてのアノテーション付与が完了しているもののみ
if len(members) != info["koui_count"]:
# continue
print("アノテーションが一部欠落しています!", file)
label = selection["within"]["label"]
if "古活字版" in label:
label = "九大本(古活字版)"
elif "無跋" in label or label == "源氏物語":
label = "九大本(無跋無刊記整版本)"
elif "湖月抄・NIJL・鵜飼文庫本" in label:
label = "湖月抄(国文研所蔵)"
selection["within"]["label"] = label
if label in orderedLabels:
orderedSelections[label] = selection
else:
# print("****************", label)
notOrderedSelections.append(selection)
for label in orderedLabels:
if label in orderedSelections:
selection = orderedSelections[label]
# manifestの生成
selectionResult = create_manifest(selection, info)
if len(selectionResult) > 0:
selections.append(selectionResult)
for selection in notOrderedSelections:
# manifestの生成
selectionResult = create_manifest(selection, info)
if len(selectionResult) > 0:
selections.append(selectionResult)
return selections
def create_image_map(info, vol, dir):
selections = []
selections.append(create_ndl(info))
curations = create_curations(info)
for selection in curations:
selections.append(selection)
vol_str = str(vol).zfill(2)
curation = {
"@context": [
"http://iiif.io/api/presentation/2/context.json",
"http://codh.rois.ac.jp/iiif/curation/1/context.json"
],
"@id": hostPrefix + "/data/vol"+stage+"/"+vol_str+"/curation.json",
"@type": "cr:Curation",
"label": info["label"],
"selections": selections
}
odir = dir+"/vol"+stage+"/"+vol_str
os.makedirs(odir, exist_ok=True)
fw = open(odir+"/curation.json", 'w')
json.dump(curation, fw, ensure_ascii=False, indent=4, separators=(',', ': '))
def create_config(info, vol, dir):
vol_str = str(vol).zfill(2)
url_main = "https://w3id.org/kouigenjimonogatari/tei/"+vol_str+".xml"
# 対応付がなされていれば
if os.path.exists(genji_dir + "/genji/static/data/tei/koui/"+vol_str+".xml"):
url_main = hostPrefix + "/data/tei/koui/"+vol_str+".xml"
config = {
"returnUrl": hostPrefix,
"returnLabel": "デジタル源氏物語",
"urlMain": url_main,
"urlSub": info["tei"],
"imageMap": hostPrefix + "/data/vol"+stage+"/"+vol_str+"/curation.json",
"direction": "vertical"
}
odir = dir+"/vol"+stage+"/"+vol_str
os.makedirs(odir, exist_ok=True)
fw = open(odir+"/config.json", 'w')
json.dump(config, fw, ensure_ascii=False, indent=4, separators=(',', ': '))
if __name__ == '__main__':
path = genji_dir + "/genji/static/data/info.json"
with open(path) as f:
info = json.load(f)
# info = requests.get("https://raw.githubusercontent.com/nakamura196/genji_vue/master/docs/data/info.json").json()
info_map = {}
for selection in info["selections"]:
members = selection["members"]
manifest = selection["within"]["@id"]
for member in members:
metadata = member["metadata"]
map = {}
map["label"] = member["label"]
map["manifest"] = manifest
for obj in metadata:
map[obj["label"]] = obj["value"]
if obj["label"] == "vol":
vol = obj["value"]
info_map[vol] = map
for vol in range(1, 55):
if vol != 10 and False:
continue
print("vol", vol)
info = info_map[vol]
# print(info)
create_image_map(info, vol, dir)
create_config(info, vol, dir)
# break
|
#1.获取用户要复制的文件名
old_file_name = input('请输入所要复制的文件名:')
#2.打开要复制的文件
old_file=open(old_file_name,'r')
#3.新建一个文件
new_file_name = '副件'+old_file_name
new_file = open(new_file_name,'w')
#4.从旧文件中读取数据,并写入到新文件中
content = old_file.read()
new_file.write(content)
#5.关闭两个文件
old_file.close()
new_file.close()
|
class vehicles :
def __init__(self,ID,brand,model,year,color,vehicle_type,base_cost):
self.ID = ID
self.brand = brand
self.model = model
self.year = year
self.color = color
self.vehicle_type = vehicle_type
self.base_cost = base_cost
class Sedan(vehicles) :
def __init__(self,ID,brand,model,year,color,vehicle_type,base_cost):
super().__init__(ID,brand,model,year,color,vehicle_type,base_cost)
self.base_cost = base_cost + 50
self.vehicle_type = "sedan"
self.is_rented = bool(False)
def create_vehicle(self):
return {"ID": self.ID, "brand": self.brand, "model":self.model,"year":self.year,"color":self.color,"type":self.vehicle_type,"cost":self.base_cost,"is_rented":self.is_rented}
class SUV(vehicles) :
def __init__(self,ID,brand,model,year,color,vehicle_type,base_cost):
super().__init__(ID,brand,model,year,color,vehicle_type,base_cost)
self.base_cost = base_cost + 100
self.vehicle_type = "SUV"
self.is_rented = bool(False)
def create_vehicle(self):
return {"ID": self.ID, "brand": self.brand, "model":self.model,"year":self.year,"color":self.color,"type":self.vehicle_type,"cost":self.base_cost,"is_rented":self.is_rented}
class Coupe(vehicles) :
def __init__(self,ID,brand,model,year,color,vehicle_type,base_cost):
super().__init__(ID,brand,model,year,color,vehicle_type,base_cost)
self.base_cost = base_cost + 75
self.vehicle_type = "coupe"
self.is_rented = bool(False)
def create_vehicle(self):
return {"ID": self.ID, "brand": self.brand, "model":self.model,"year":self.year,"color":self.color,"type":self.vehicle_type,"cost":self.base_cost,"is_rented":self.is_rented}
|
#!/usr/bin/env python3
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""This module provides shared functionality to provide Dart metadata for
DOM APIs.
"""
import copy
import json
import logging
import monitored
import os
import re
from htmlrenamer import renamed_html_members, html_interface_renames
_logger = logging.getLogger('dartmetadata')
# Annotations to be placed on native members. The table is indexed by the IDL
# interface and member name, and by IDL return or field type name. Both are
# used to assemble the annotations:
#
# INTERFACE.MEMBER: annotations for member.
# +TYPE: add annotations only if there are member annotations.
# -TYPE: add annotations only if there are no member annotations.
# TYPE: add regardless of member annotations.
_dart2js_annotations = monitored.Dict(
'dartmetadata._dart2js_annotations',
{
'AnimationEffectTiming.duration': [
"@Returns('num|String|Null')",
],
'ArrayBufferView': [
"@Creates('TypedData')",
"@Returns('TypedData|Null')",
],
'CanvasRenderingContext2D.createImageData': [
"@Creates('ImageData|=Object')",
],
'CanvasRenderingContext2D.getImageData': [
"@Creates('ImageData|=Object')",
],
'CanvasRenderingContext2D.webkitGetImageDataHD': [
"@Creates('ImageData|=Object')",
],
'CanvasRenderingContext2D.fillStyle': [
"@Creates('String|CanvasGradient|CanvasPattern')",
"@Returns('String|CanvasGradient|CanvasPattern')",
],
'CanvasRenderingContext2D.strokeStyle': [
"@Creates('String|CanvasGradient|CanvasPattern')",
"@Returns('String|CanvasGradient|CanvasPattern')",
],
'CryptoKey.algorithm': [
"@Creates('Null')",
],
'CustomEvent._detail': [
"@Creates('Null')",
],
# Normally Window is never null, but starting from a <template> element in
# JavaScript, this will be null:
# template.content.ownerDocument.defaultView
'Document.window': [
"@Creates('Window|=Object|Null')",
"@Returns('Window|=Object|Null')",
],
'Document.getElementsByClassName': [
"@Creates('NodeList|HtmlCollection')",
"@Returns('NodeList|HtmlCollection')",
],
'Document.getElementsByName': [
"@Creates('NodeList|HtmlCollection')",
"@Returns('NodeList|HtmlCollection')",
],
'Document.getElementsByTagName': [
"@Creates('NodeList|HtmlCollection')",
"@Returns('NodeList|HtmlCollection')",
],
# querySelectorAll never returns `null`.
'Document.querySelectorAll': [
"@Creates('NodeList')",
"@Returns('NodeList')",
],
'DocumentFragment.querySelectorAll': [
"@Creates('NodeList')",
"@Returns('NodeList')",
],
'Element.querySelectorAll': [
"@Creates('NodeList')",
"@Returns('NodeList')",
],
'Element.getBoundingClientRect': [
"@Creates('_DomRect')",
"@Returns('_DomRect|Null')", # TODO(sra): Verify and remove Null.
],
'Element.getClientRects': [
"@Creates('DomRectList')",
"@Returns('DomRectList|Null')",
],
# Methods returning Window can return a local window, or a cross-frame
# window (=Object) that needs wrapping.
'Window': [
"@Creates('Window|=Object')",
"@Returns('Window|=Object')",
],
'Window.openDatabase': [
"@Creates('SqlDatabase')",
],
'Window.showModalDialog': [
"@Creates('Null')",
],
'Element.webkitGetRegionFlowRanges': [
"@Creates('JSExtendableArray')",
"@Returns('JSExtendableArray')",
],
'Element.getElementsByClassName': [
"@Creates('NodeList|HtmlCollection')",
"@Returns('NodeList|HtmlCollection')",
],
'Element.getElementsByName': [
"@Creates('NodeList|HtmlCollection')",
"@Returns('NodeList|HtmlCollection')",
],
'Element.getElementsByTagName': [
"@Creates('NodeList|HtmlCollection')",
"@Returns('NodeList|HtmlCollection')",
],
"ErrorEvent.error": [
"@Creates('Null')", # Only returns values created elsewhere.
],
# To be in callback with the browser-created Event, we had to have called
# addEventListener on the target, so we avoid
'Event.currentTarget': [
"@Creates('Null')",
"@Returns('EventTarget|=Object|Null')",
],
# Only nodes in the DOM bubble and have target !== currentTarget.
'Event.target': [
"@Creates('Node')",
"@Returns('EventTarget|=Object')",
],
# TODO(sra): Investigate how ExtendableMessageEvent.data is different from
# MessageEvent.data. It might be necessary to put in a method to translate
# the JavaScript wire type into a Dart type.
'ExtendableMessageEvent.data': [
"@annotation_Creates_SerializedScriptValue",
"@annotation_Returns_SerializedScriptValue",
],
# TODO(sra): We could determine the following by parsing the compound IDL
# type.
'ExtendableMessageEvent.source': [
"@Creates('Client|ServiceWorker|MessagePort')",
"@Returns('Client|ServiceWorker|MessagePort|Null')",
],
'File.lastModifiedDate': [
"@Creates('Null')", # JS date object.
],
'FocusEvent.relatedTarget': [
"@Creates('Null')",
],
'Gamepad.buttons': [
"@Creates('JSExtendableArray|GamepadButton')",
"@Returns('JSExtendableArray')",
],
# Creates a GeolocationPosition or a GeolocationPositionError for a
# callback. See issue #45562.
'Geolocation.getCurrentPosition': [
"@Creates('Geoposition')",
"@Creates('PositionError')",
],
'Geolocation.watchPosition': [
"@Creates('Geoposition')",
"@Creates('PositionError')",
],
'HTMLCanvasElement.getContext': [
"@Creates('CanvasRenderingContext2D|RenderingContext|RenderingContext2')",
"@Returns('CanvasRenderingContext2D|RenderingContext|RenderingContext2|Null')",
],
'HTMLInputElement.valueAsDate': [
"@Creates('Null')", # JS date object.
],
# Rather than have the result of an IDBRequest as a union over all possible
# results, we mark the result as instantiating any classes, and mark
# each operation with the classes that it could cause to be asynchronously
# instantiated.
'IDBRequest.result': ["@Creates('Null')"],
# The source is usually a participant in the operation that generated the
# IDBRequest.
'IDBRequest.source': ["@Creates('Null')"],
'IDBFactory.open': ["@Creates('Database')"],
'IDBFactory.webkitGetDatabaseNames': ["@Creates('DomStringList')"],
'IDBObjectStore.put': ["@_annotation_Creates_IDBKey"],
'IDBObjectStore.add': ["@_annotation_Creates_IDBKey"],
'IDBObjectStore.get': ["@annotation_Creates_SerializedScriptValue"],
'IDBObjectStore.openCursor': ["@Creates('Cursor')"],
'IDBIndex.get': ["@annotation_Creates_SerializedScriptValue"],
'IDBIndex.getKey': [
"@annotation_Creates_SerializedScriptValue",
# The source is the object store behind the index.
"@Creates('ObjectStore')",
],
'IDBIndex.openCursor': ["@Creates('Cursor')"],
'IDBIndex.openKeyCursor': ["@Creates('Cursor')"],
'IDBCursorWithValue.value': [
'@annotation_Creates_SerializedScriptValue',
'@annotation_Returns_SerializedScriptValue',
],
'IDBCursor.key': [
"@_annotation_Creates_IDBKey",
"@_annotation_Returns_IDBKey",
],
'IDBCursor.primaryKey': [
"@_annotation_Creates_IDBKey",
"@_annotation_Returns_IDBKey",
],
'IDBCursor.source': [
"@Creates('Null')",
"@Returns('ObjectStore|Index|Null')",
],
'IDBDatabase.version': [
"@Creates('int|String|Null')",
"@Returns('int|String|Null')",
],
'IDBIndex.keyPath': [
"@annotation_Creates_SerializedScriptValue",
],
'IDBKeyRange.lower': [
"@annotation_Creates_SerializedScriptValue",
],
'IDBKeyRange.upper': [
"@annotation_Creates_SerializedScriptValue",
],
'IDBObjectStore.keyPath': [
"@annotation_Creates_SerializedScriptValue",
],
'+IDBOpenDBRequest': [
"@Returns('Request')",
"@Creates('Request')",
],
'+IDBRequest': [
"@Returns('Request')",
"@Creates('Request')",
],
'IDBVersionChangeEvent.newVersion': [
"@Creates('int|String|Null')",
"@Returns('int|String|Null')",
],
'IDBVersionChangeEvent.oldVersion': [
"@Creates('int|String|Null')",
"@Returns('int|String|Null')",
],
'ImageData.data': [
"@Creates('NativeUint8ClampedList')",
"@Returns('NativeUint8ClampedList')",
],
'MediaStream.getAudioTracks': [
"@Creates('JSExtendableArray|MediaStreamTrack')",
"@Returns('JSExtendableArray')",
],
'MediaStream.getVideoTracks': [
"@Creates('JSExtendableArray|MediaStreamTrack')",
"@Returns('JSExtendableArray')",
],
'MessageEvent.data': [
"@annotation_Creates_SerializedScriptValue",
"@annotation_Returns_SerializedScriptValue",
],
'MessageEvent.ports': ["@Creates('JSExtendableArray')"],
'MessageEvent.source': [
"@Creates('Null')",
"@Returns('EventTarget|=Object')",
],
'Metadata.modificationTime': [
"@Creates('Null')", # JS date object.
],
'MouseEvent.relatedTarget': [
"@Creates('Node')",
"@Returns('EventTarget|=Object|Null')",
],
'Notification.data': [
"@annotation_Creates_SerializedScriptValue",
"@annotation_Returns_SerializedScriptValue",
],
'PopStateEvent.state': [
"@annotation_Creates_SerializedScriptValue",
"@annotation_Returns_SerializedScriptValue",
],
'RTCStatsReport.timestamp': [
"@Creates('Null')", # JS date object.
],
'SerializedScriptValue': [
"@annotation_Creates_SerializedScriptValue",
"@annotation_Returns_SerializedScriptValue",
],
'ServiceWorkerMessageEvent.data': [
"@annotation_Creates_SerializedScriptValue",
"@annotation_Returns_SerializedScriptValue",
],
'ServiceWorkerMessageEvent.source': [
"@Creates('Null')",
"@Returns('ServiceWorker|MessagePort')",
],
'ShadowRoot.getElementsByClassName': [
"@Creates('NodeList|HtmlCollection')",
"@Returns('NodeList|HtmlCollection')",
],
'ShadowRoot.getElementsByName': [
"@Creates('NodeList|HtmlCollection')",
"@Returns('NodeList|HtmlCollection')",
],
'ShadowRoot.getElementsByTagName': [
"@Creates('NodeList|HtmlCollection')",
"@Returns('NodeList|HtmlCollection')",
],
# Touch targets are Elements in a Document, or the Document.
'Touch.target': [
"@Creates('Element|Document')",
"@Returns('Element|Document')",
],
'TrackEvent.track': [
"@Creates('Null')",
],
'VTTCue.line': [
"@Creates('Null')",
"@Returns('num|String')",
],
'VTTCue.position': [
"@Creates('Null')",
"@Returns('num|String')",
],
'WebGLRenderingContext.getBufferParameter': [
"@Creates('int|Null')",
"@Returns('int|Null')",
],
'WebGLRenderingContext.getFramebufferAttachmentParameter': [
"@Creates('int|Renderbuffer|Texture|Null')",
"@Returns('int|Renderbuffer|Texture|Null')",
],
'WebGLRenderingContext.getProgramParameter': [
"@Creates('int|bool|Null')",
"@Returns('int|bool|Null')",
],
'WebGLRenderingContext.getRenderbufferParameter': [
"@Creates('int|Null')",
"@Returns('int|Null')",
],
'WebGLRenderingContext.getShaderParameter': [
"@Creates('int|bool|Null')",
"@Returns('int|bool|Null')",
],
'WebGLRenderingContext.getTexParameter': [
"@Creates('int|Null')",
"@Returns('int|Null')",
],
'WebGLRenderingContext.getUniform': [
"@Creates('Null|num|String|bool|JSExtendableArray|"
"NativeFloat32List|NativeInt32List|NativeUint32List')",
"@Returns('Null|num|String|bool|JSExtendableArray|"
"NativeFloat32List|NativeInt32List|NativeUint32List')",
],
'WebGLRenderingContext.getVertexAttrib': [
"@Creates('Null|num|bool|NativeFloat32List|Buffer')",
"@Returns('Null|num|bool|NativeFloat32List|Buffer')",
],
'WebGLRenderingContext.getParameter': [
# Taken from http://www.khronos.org/registry/webgl/specs/latest/
# Section 5.14.3 Setting and getting state
"@Creates('Null|num|String|bool|JSExtendableArray|"
"NativeFloat32List|NativeInt32List|NativeUint32List|"
"Framebuffer|Renderbuffer|Texture')",
"@Returns('Null|num|String|bool|JSExtendableArray|"
"NativeFloat32List|NativeInt32List|NativeUint32List|"
"Framebuffer|Renderbuffer|Texture')",
],
'WebGLRenderingContext.getContextAttributes': [
"@Creates('ContextAttributes|Null')",
],
'XMLHttpRequest.response': [
"@Creates('NativeByteBuffer|Blob|Document|=Object|JSExtendableArray"
"|String|num')",
],
},
dart2jsOnly=True)
_blink_experimental_annotations = [
"@SupportedBrowser(SupportedBrowser.CHROME)",
]
_indexed_db_annotations = [
"@SupportedBrowser(SupportedBrowser.CHROME)",
"@SupportedBrowser(SupportedBrowser.FIREFOX, '15')",
"@SupportedBrowser(SupportedBrowser.IE, '10')",
]
_file_system_annotations = [
"@SupportedBrowser(SupportedBrowser.CHROME)",
]
_all_but_ie9_annotations = [
"@SupportedBrowser(SupportedBrowser.CHROME)",
"@SupportedBrowser(SupportedBrowser.FIREFOX)",
"@SupportedBrowser(SupportedBrowser.IE, '10')",
"@SupportedBrowser(SupportedBrowser.SAFARI)",
]
_history_annotations = _all_but_ie9_annotations
_no_ie_annotations = [
"@SupportedBrowser(SupportedBrowser.CHROME)",
"@SupportedBrowser(SupportedBrowser.FIREFOX)",
"@SupportedBrowser(SupportedBrowser.SAFARI)",
]
_performance_annotations = [
"@SupportedBrowser(SupportedBrowser.CHROME)",
"@SupportedBrowser(SupportedBrowser.FIREFOX)",
"@SupportedBrowser(SupportedBrowser.IE)",
]
_rtc_annotations = [ # Note: Firefox nightly builds also support this.
"@SupportedBrowser(SupportedBrowser.CHROME)",
]
_shadow_dom_annotations = [
"@SupportedBrowser(SupportedBrowser.CHROME, '26')",
]
_speech_recognition_annotations = [
"@SupportedBrowser(SupportedBrowser.CHROME, '25')",
]
_svg_annotations = _all_but_ie9_annotations
_web_sql_annotations = [
"@SupportedBrowser(SupportedBrowser.CHROME)",
"@SupportedBrowser(SupportedBrowser.SAFARI)",
]
_webgl_annotations = [
"@SupportedBrowser(SupportedBrowser.CHROME)",
"@SupportedBrowser(SupportedBrowser.FIREFOX)",
]
_web_audio_annotations = _webgl_annotations
_webkit_experimental_annotations = [
"@SupportedBrowser(SupportedBrowser.CHROME)",
"@SupportedBrowser(SupportedBrowser.SAFARI)",
]
# Annotations to be placed on generated members.
# The table is indexed as:
# INTERFACE: annotations to be added to the interface declaration
# INTERFACE.MEMBER: annotation to be added to the member declaration
_annotations = monitored.Dict(
'dartmetadata._annotations',
{
'CSSHostRule':
_shadow_dom_annotations,
'WebKitCSSMatrix':
_webkit_experimental_annotations,
'Crypto':
_webkit_experimental_annotations,
'Database':
_web_sql_annotations,
'DatabaseSync':
_web_sql_annotations,
'ApplicationCache': [
"@SupportedBrowser(SupportedBrowser.CHROME)",
"@SupportedBrowser(SupportedBrowser.FIREFOX)",
"@SupportedBrowser(SupportedBrowser.IE, '10')",
"@SupportedBrowser(SupportedBrowser.OPERA)",
"@SupportedBrowser(SupportedBrowser.SAFARI)",
],
'AudioBufferSourceNode':
_web_audio_annotations,
'AudioContext':
_web_audio_annotations,
'DOMFileSystem':
_file_system_annotations,
'DOMFileSystemSync':
_file_system_annotations,
'Window.indexedDB':
_indexed_db_annotations,
'Window.openDatabase':
_web_sql_annotations,
'Window.performance':
_performance_annotations,
'Window.webkitNotifications':
_webkit_experimental_annotations,
'Window.webkitRequestFileSystem':
_file_system_annotations,
'Window.webkitResolveLocalFileSystemURL':
_file_system_annotations,
'Element.createShadowRoot': [
"@SupportedBrowser(SupportedBrowser.CHROME, '25')",
],
'Element.ontransitionend':
_all_but_ie9_annotations,
# Placeholder to add experimental flag, implementation for this is
# pending in a separate CL.
'Element.webkitMatchesSelector': [],
'Event.clipboardData': [
"@SupportedBrowser(SupportedBrowser.CHROME)",
"@SupportedBrowser(SupportedBrowser.FIREFOX)",
"@SupportedBrowser(SupportedBrowser.SAFARI)",
],
'FormData':
_all_but_ie9_annotations,
'HashChangeEvent': [
"@SupportedBrowser(SupportedBrowser.CHROME)",
"@SupportedBrowser(SupportedBrowser.FIREFOX)",
"@SupportedBrowser(SupportedBrowser.SAFARI)",
],
'History.pushState':
_history_annotations,
'History.replaceState':
_history_annotations,
'HTMLContentElement':
_shadow_dom_annotations,
'HTMLDataListElement':
_all_but_ie9_annotations,
'HTMLDetailsElement':
_webkit_experimental_annotations,
'HTMLEmbedElement': [
"@SupportedBrowser(SupportedBrowser.CHROME)",
"@SupportedBrowser(SupportedBrowser.IE)",
"@SupportedBrowser(SupportedBrowser.SAFARI)",
],
'HTMLKeygenElement':
_webkit_experimental_annotations,
'HTMLMeterElement':
_no_ie_annotations,
'HTMLObjectElement': [
"@SupportedBrowser(SupportedBrowser.CHROME)",
"@SupportedBrowser(SupportedBrowser.IE)",
"@SupportedBrowser(SupportedBrowser.SAFARI)",
],
'HTMLOutputElement':
_no_ie_annotations,
'HTMLProgressElement':
_all_but_ie9_annotations,
'HTMLShadowElement':
_shadow_dom_annotations,
'HTMLTemplateElement':
_blink_experimental_annotations,
'HTMLTrackElement': [
"@SupportedBrowser(SupportedBrowser.CHROME)",
"@SupportedBrowser(SupportedBrowser.IE, '10')",
"@SupportedBrowser(SupportedBrowser.SAFARI)",
],
'IDBFactory':
_indexed_db_annotations,
'IDBDatabase':
_indexed_db_annotations,
'MediaStream':
_rtc_annotations,
'MediaStreamEvent':
_rtc_annotations,
'MediaStreamTrack':
_rtc_annotations,
'MediaStreamTrackEvent':
_rtc_annotations,
'MediaSource': [
# TODO(alanknight): This works on Firefox 33 behind a flag and in Safari
# desktop, but not mobile. On theory that static false positives are worse
# than negatives, leave those out for now. Update once they're available.
"@SupportedBrowser(SupportedBrowser.CHROME)",
"@SupportedBrowser(SupportedBrowser.IE, '11')",
],
'MutationObserver': [
"@SupportedBrowser(SupportedBrowser.CHROME)",
"@SupportedBrowser(SupportedBrowser.FIREFOX)",
"@SupportedBrowser(SupportedBrowser.SAFARI)",
],
'Performance':
_performance_annotations,
'PopStateEvent':
_history_annotations,
'RTCIceCandidate':
_rtc_annotations,
'RTCPeerConnection':
_rtc_annotations,
'RTCSessionDescription':
_rtc_annotations,
'ShadowRoot':
_shadow_dom_annotations,
'SpeechRecognition':
_speech_recognition_annotations,
'SpeechRecognitionAlternative':
_speech_recognition_annotations,
'SpeechRecognitionError':
_speech_recognition_annotations,
'SpeechRecognitionEvent':
_speech_recognition_annotations,
'SpeechRecognitionResult':
_speech_recognition_annotations,
'SVGAltGlyphElement':
_no_ie_annotations,
'SVGAnimateElement':
_no_ie_annotations,
'SVGAnimateMotionElement':
_no_ie_annotations,
'SVGAnimateTransformElement':
_no_ie_annotations,
'SVGFEBlendElement':
_svg_annotations,
'SVGFEColorMatrixElement':
_svg_annotations,
'SVGFEComponentTransferElement':
_svg_annotations,
'SVGFEConvolveMatrixElement':
_svg_annotations,
'SVGFEDiffuseLightingElement':
_svg_annotations,
'SVGFEDisplacementMapElement':
_svg_annotations,
'SVGFEDistantLightElement':
_svg_annotations,
'SVGFEFloodElement':
_svg_annotations,
'SVGFEFuncAElement':
_svg_annotations,
'SVGFEFuncBElement':
_svg_annotations,
'SVGFEFuncGElement':
_svg_annotations,
'SVGFEFuncRElement':
_svg_annotations,
'SVGFEGaussianBlurElement':
_svg_annotations,
'SVGFEImageElement':
_svg_annotations,
'SVGFEMergeElement':
_svg_annotations,
'SVGFEMergeNodeElement':
_svg_annotations,
'SVGFEMorphologyElement':
_svg_annotations,
'SVGFEOffsetElement':
_svg_annotations,
'SVGFEPointLightElement':
_svg_annotations,
'SVGFESpecularLightingElement':
_svg_annotations,
'SVGFESpotLightElement':
_svg_annotations,
'SVGFETileElement':
_svg_annotations,
'SVGFETurbulenceElement':
_svg_annotations,
'SVGFilterElement':
_svg_annotations,
'SVGForeignObjectElement':
_no_ie_annotations,
'SVGSetElement':
_no_ie_annotations,
'SQLTransaction':
_web_sql_annotations,
'SQLTransactionSync':
_web_sql_annotations,
'WebGLRenderingContext':
_webgl_annotations,
'WebSocket':
_all_but_ie9_annotations,
'Worker':
_all_but_ie9_annotations,
'XMLHttpRequest.overrideMimeType':
_no_ie_annotations,
'XMLHttpRequest.response':
_all_but_ie9_annotations,
'XMLHttpRequestEventTarget.onloadend':
_all_but_ie9_annotations,
'XMLHttpRequestEventTarget.onprogress':
_all_but_ie9_annotations,
'XSLTProcessor': [
"@SupportedBrowser(SupportedBrowser.CHROME)",
"@SupportedBrowser(SupportedBrowser.FIREFOX)",
"@SupportedBrowser(SupportedBrowser.SAFARI)",
],
})
# TODO(blois): minimize noise and enable by default.
_monitor_type_metadata = False
class DartMetadata(object):
def __init__(self,
api_status_path,
doc_comments_path,
logging_level=logging.WARNING):
_logger.setLevel(logging_level)
self._api_status_path = api_status_path
status_file = open(self._api_status_path, 'r+')
self._types = json.load(status_file)
status_file.close()
comments_file = open(doc_comments_path, 'r+')
self._doc_comments = json.load(comments_file)
comments_file.close()
if _monitor_type_metadata:
monitored_interfaces = {}
for interface_id, interface_data in list(self._types.items()):
monitored_interface = interface_data.copy()
monitored_interface['members'] = monitored.Dict(
'dartmetadata.%s' % interface_id, interface_data['members'])
monitored_interfaces[interface_id] = monitored_interface
self._monitored_types = monitored.Dict(
'dartmetadata._monitored_types', monitored_interfaces)
else:
self._monitored_types = self._types
def GetFormattedMetadata(self,
library_name,
interface,
member_id=None,
indentation=''):
""" Gets all comments and annotations for an interface or member.
"""
return self.FormatMetadata(
self.GetMetadata(library_name, interface, member_id), indentation)
def GetMetadata(self,
library_name,
interface,
member_name=None,
source_member_name=None):
""" Gets all comments and annotations for an interface or member.
Args:
source_member_name: If the member is dependent on a different member
then this is used to apply the support annotations from the other
member.
"""
annotations = self._GetComments(library_name, interface, member_name)
annotations = annotations + self._GetCommonAnnotations(
interface, member_name, source_member_name)
return annotations
def GetDart2JSMetadata(
self,
idl_type,
library_name,
interface,
member_name,
):
""" Gets all annotations for Dart2JS members- including annotations for
both dart2js and dartium.
"""
annotations = self.GetMetadata(library_name, interface, member_name)
ann2 = self._GetDart2JSSpecificAnnotations(idl_type, interface.id,
member_name)
if ann2:
if annotations:
annotations.extend(ann2)
else:
annotations = ann2
return annotations
def IsSuppressed(self, interface, member_name):
annotations = self._GetSupportLevelAnnotations(interface.id,
member_name)
return any(
annotation.startswith('@removed') for annotation in annotations)
def _GetCommonAnnotations(self,
interface,
member_name=None,
source_member_name=None):
annotations = []
if member_name:
key = '%s.%s' % (interface.id, member_name)
dom_name = '%s.%s' % (interface.javascript_binding_name,
member_name)
# DomName annotation is needed for dblclick ACX plugin analyzer.
if member_name == 'dblclickEvent' or member_name == 'ondblclick':
annotations.append("@DomName('" + dom_name + "')")
else:
key = interface.id
if key in _annotations:
annotations.extend(_annotations[key])
if (not member_name and
interface.javascript_binding_name.startswith('WebKit') and
interface.id not in html_interface_renames):
annotations.extend(_webkit_experimental_annotations)
if (member_name and member_name.startswith('webkit') and
key not in renamed_html_members):
annotations.extend(_webkit_experimental_annotations)
if source_member_name:
member_name = source_member_name
support_annotations = self._GetSupportLevelAnnotations(
interface.id, member_name)
for annotation in support_annotations:
if annotation not in annotations:
annotations.append(annotation)
return annotations
def _GetComments(self, library_name, interface, member_name=None):
""" Gets all comments for the interface or member and returns a list. """
# Add documentation from JSON.
comments = []
library_name = 'dart.dom.%s' % library_name
if library_name in self._doc_comments:
library_info = self._doc_comments[library_name]
if interface.id in library_info:
interface_info = library_info[interface.id]
if member_name:
if 'members' in interface_info and member_name in interface_info[
'members']:
comments = interface_info['members'][member_name]
elif 'comment' in interface_info:
comments = interface_info['comment']
if comments:
comments = ['\n'.join(comments)]
return comments
def AnyConversionAnnotations(self, idl_type, interface_name, member_name):
if (_annotations.get('%s.%s' % (interface_name, member_name)) or
self._GetDart2JSSpecificAnnotations(idl_type, interface_name,
member_name)):
return True
else:
return False
def FormatMetadata(self, metadata, indentation):
if metadata:
newline = '\n%s' % indentation
result = newline.join(metadata) + newline
return result
return ''
def _GetDart2JSSpecificAnnotations(self, idl_type, interface_name,
member_name):
""" Finds dart2js-specific annotations. This does not include ones shared with
dartium.
"""
ann1 = _dart2js_annotations.get("%s.%s" % (interface_name, member_name))
if ann1:
ann2 = _dart2js_annotations.get('+' + idl_type)
if ann2:
return ann2 + ann1
ann2 = _dart2js_annotations.get(idl_type)
if ann2:
return ann2 + ann1
return ann1
ann2 = _dart2js_annotations.get('-' + idl_type)
if ann2:
return ann2
ann2 = _dart2js_annotations.get(idl_type)
return ann2
def _GetSupportInfo(self, interface_id, member_id=None):
""" Looks up the interface or member in the DOM status list and returns the
support level for it.
"""
if interface_id in self._monitored_types:
type_info = self._monitored_types[interface_id]
else:
type_info = {
'members': {},
'support_level': 'untriaged',
}
self._types[interface_id] = type_info
if not member_id:
return type_info
members = type_info['members']
if member_id in members:
member_info = members[member_id]
else:
if member_id == interface_id:
member_info = {}
else:
member_info = {'support_level': 'untriaged'}
members[member_id] = member_info
return member_info
def _GetSupportLevelAnnotations(self, interface_id, member_id=None):
""" Gets annotations for API support status.
"""
support_info = self._GetSupportInfo(interface_id, member_id)
dart_action = support_info.get('dart_action')
support_level = support_info.get('support_level')
comment = support_info.get('comment')
annotations = []
# TODO(blois): should add an annotation for the comment, but keeping out
# to keep the initial diff a bit more localized.
#if comment:
# annotations.append('// %s' % comment)
if dart_action:
if dart_action == 'unstable':
annotations.append('@Unstable()')
elif dart_action == 'suppress':
if comment:
annotations.append('// %s' % comment)
anAnnotation = 'deprecated'
if member_id:
anAnnotation = 'removed'
annotations.append('@%s // %s' % (anAnnotation, support_level))
pass
elif dart_action == 'stable':
pass
else:
_logger.warn(
'Unknown dart_action - %s:%s' % (interface_id, member_id))
elif support_level == 'stable':
pass
elif support_level == 'deprecated':
if comment:
annotations.append('// %s' % comment)
annotations.append('@deprecated')
elif support_level is None:
pass
else:
_logger.warn(
'Unknown support_level - %s:%s' % (interface_id, member_id))
return annotations
def Flush(self):
json_file = open(self._api_status_path, 'w+')
json.dump(
self._types,
json_file,
indent=2,
separators=(',', ': '),
sort_keys=True)
json_file.close()
|
#-*- encoding:utf-8 -*-
from hello import db,User
f=open('danwei.txt','rt',encoding="utf-8")
for x in f:
db.session.add(User(username=x.split(',')[1][:-1],collage=x.split(',')[0],password='123456',usermode=4))
db.session.commit()
|
from distutils.core import setup
from distutils.extension import Extension
import glob
setup(
name = "RoundClient",
version = "1.0",
description = 'Round Audio Client',
author = 'Mike MacHenry',
author_email = 'dskippy@ccs.neu.edu',
url = 'http://roundware.sourceforge.net',
license = 'GPL',
requires = [
"gobject",
"pygtk",
"gtk",
"gtk.glade",
"os",
"os.path",
"time",
"hildon",
"pygst(>=0.10)",
"gst",
"urllib",
"urllib2",
"mimetools",
"mimetypes",
"stat",
"ConfigParser",
"StringIO",
"simplejson",
],
scripts = ['roundclient.sh'],
data_files = [
('share/roundclient', glob.glob('share/*.png')),
('share/roundclient', ['share/client.glade']),
('share/roundclient/images', glob.glob('share/images/*')),
('share/roundclient', ['share/round.db']),
('share/pixmaps', ['share/round_menu_logo.png']),
('lib/gstreamer-0.10', ['lib/libgstlevel.so']),
('share/themes/default/gtk-2.0', ['gtkrc']),
('share/applications/hildon', ['roundclient.desktop']),
('/etc/sudoers.d', ['share/roundware.sudoers']),
#('/media/mmc2', glob.glob('share/*mp3')),
],
packages = ["roundclient"],
)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name="index"),
url(r'^/$', views.most_popular, name="most_popular"),
url(r'^add_secret$', views.add_secret, name="add_secret"),
url(r'^delete_secret/(?P<id>\d+)$', views.delete_secret, name="delete_secret"),
url(r'^add_like/(?P<s_id>\d+)/(?P<u_id>\d+)$', views.add_like, name="add_like"),
url(r'^/db_debug$', views.db_debug),
]
|
#四个数字:1、2、3、4,能组成多少个互不相同且无重复数字的三位数?各是多少?
count=0
for i in range(1,5):
for j in range(1,5):
for k in range(1,5):
if i != j and i != k and j !=k:
count+=1
print(i,j,k)
print("共有",count,"个")
|
from flask import Flask
from .blueprints import blueprint
app = Flask(__name__)
app.register_blueprint(blueprint)
from .views import hanzi, utils, vocab, item
from threading import Thread
from pathlib import Path
import sys
class ChineseViewer:
def __init__(self, port=42045, debug=True):
self.port = port
self.debug = debug
self.server = None
env_python = Path(sys.argv[0]).name
if 'ipykernel' in env_python:
self.server = Thread(target=self._runserver)
self.server.daemon = Thread
self.server.start()
else:
self._runserver()
def _runserver(self):
app.run(
port=self.port,
debug=self.debug
)
|
import argparse
import sys
from wineQualityPred.paper import predictQuality
from wineQualityPred.paper import reproduceResults
def parse_arguments(args):
'''
Parse the arguments of the command line
'''
parser = argparse.ArgumentParser(description="Predict wine quality from its physicochemical properties.")
parser.add_argument( "-f",
"--filepath",
type=str,
help='Filepath of the data to process.', default=None
)
parser.add_argument( "-s",
"--scaler",
type=str,
help='The name of the scaler : "StandardScaler", "MinMaxScaler"', default="StandardScaler"
)
parser.add_argument("-nn", "--not_normalize", help="Do not normalize data", action="store_true")
parser.add_argument("-ns", "--not_shuffle", help="Do not shuffle data", action="store_true")
parser.add_argument(
"-nro", "--not_remove_outliers", help="Do not remove outliers", action="store_true"
)
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_arguments(sys.argv[1:])
try:
filepath = args.filepath
if filepath is None :
filepath ='wineQualityPred/data/winequality-red.csv'
predictQuality(filepath, not args.not_shuffle, not args.not_normalize, not args.not_remove_outliers, args.scaler)
except Exception as e:
print(e)
|
from .admin import AdminUser
from .order import Order
from .product import Product
from .texts import Texts
from .user import User
|
#!/usr/bin/env python
# Created by Joe Ellis
# Columbia University DVMM lab
### Libraries ###
import os,sys, getopt
from gensim import corpora, models, similarities
from gensim.models import ldamodel
sys.path.append("/ptvn/src/ellis_dev/speaker_diarization/dev/utility")
import FileReader as reader
### Global Variables ###
global topic_xml_dir, saved_models_dir
topic_xml_dir = "/ptvn/SocialMedia/GNews.Topics/"
saved_models_dir = "/ptvn/work/saved_lda_models"
class LDAmodel():
def __init__(self, modelfile=None, dictfile=None, corpusfile=None):
# If we want to initialize from a pre-loaded dictionary file do it here
# Read in the dictionary file
if dictfile != None:
self.dict = corpora.Dictionary.load(dictfile)
# Read in the modelfile
if modelfile !=None:
self.lda_model = ldamodel.LdaModel.load(modelfile)
# Read in the modelfile
if corpusfile !=None:
self.corpus = corpora.mmcorpus.MmCorpus(corpusfile)
return
def ReadinFiles(self,directory):
# This function reads in a directory of files and gets them into strings in a list for creation of a corpora
files = os.listdir(directory)
self.filepaths = [os.path.join(directory,file)for file in files]
# Now let's read each of them into a list of the strings
documents = []
for file in self.filepaths:
documents.append(open(file,"r").read().replace(",",""))
print "Finished Reading Docs"
self.documents = documents
return documents
def Tokenize(self,documents=None):
# This function tokenizes the documents that have been read into our list of string structure
if documents == None:
documents = self.documents
# remove common words and tokenize
stoplist = set('for a of the and to in because'.split())
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in documents]
# remove words that appear only once
all_tokens = sum(texts, [])
tokens_once = set(word for word in set(all_tokens) if all_tokens.count(word) == 1)
texts = [[word for word in text if word not in tokens_once]
for text in texts]
# Finished Tokenizing the files
print "Finished Tokenize"
self.texts = texts
return texts
def CreateDictionary(self,texts=None):
# Check to see if we are saving these as class variables
if texts == None:
texts = self.texts
# Creates a dictionary from our stuff
bow_dict = corpora.Dictionary(texts)
# Finished Creating the dictionary
print "Finished Dictionary Creation"
self.dict = bow_dict
return bow_dict
def SaveDictionaryandCorpus(self,dictfile,corpusfile,dict=None,corpus=None):
if dict == None:
dict = self.dict
if corpus == None:
corpus = self.corpus
# Saves the dictionary to a file for future reference
dict.save(dictfile)
# Save the corpora in Market Matrix format
corpora.mmcorpus.MmCorpus.serialize(corpusfile, corpus)
return
def CreateLDAModel(self,topic_num):
# This function creates an LDA model from our give training set
# Create a bow_corpus of our text elements with the dictionary
bow_corpus = [self.dict.doc2bow(text) for text in self.texts]
lda_model = ldamodel.LdaModel(bow_corpus, id2word=self.dict, num_topics=topic_num)
self.lda_model = lda_model
self.lda_texts = lda_model[self.texts]
#Debug statement
print "Finished LDA model creation"
return
def TrainLDA(self,documents,num_t):
# This function trains a model for LDA
# remove common words and tokenize
stoplist = set('for a of the and to in because'.split())
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in documents]
# remove words that appear only once
all_tokens = sum(texts, [])
tokens_once = set(word for word in set(all_tokens) if all_tokens.count(word) == 1)
texts = [[word for word in text if word not in tokens_once]
for text in texts]
dictionary = corpora.Dictionary(texts)
self.dict = dictionary
corpus = [dictionary.doc2bow(text) for text in texts]
self.corpus = corpus
# I can print out the documents and which is the most probable topics for each doc.
lda = ldamodel.LdaModel(corpus, id2word=dictionary, num_topics=num_t)
corpus_lda = lda[corpus]
self.lda_model = lda
return lda
def ReturnSimilarArticles(self,document,num_returned=5):
# This function returns the indices of the documents that are supposed
# to be the most similar to our query
# This gets the bag of word representation for our vocabulary for this topic
doc_bow = self.dict.doc2bow(document.lower().split())
print doc_bow
# Now let's find the most similar results
doc_lda = self.lda_model[doc_bow]
print doc_lda
# Now let's create the similarity structure for these values
index = similarities.MatrixSimilarity(self.corpus)
sims = index[doc_lda]
sims = sorted(enumerate(sims), key=lambda item: -item[1])
results = sims[0:num_returned]
return results
def SaveModel(self,saved_name,model=None):
if model == None:
model = self.lda_model
model.save(saved_name)
return
def run(argv):
try:
opts, args = getopt.getopt(argv,'hct:n:r:')
except getopt.GetoptError:
print "Usage Error: Please see help"
sys.exit(1)
# This section parses the input variables to the script
create_model = False
num_t = 30
return_closest_articles = False
for opt,arg in opts:
if opt in ('-h'):
print 'Help:'
print '-c: Tells the program to create model file'
print '-t: The topic number we want to process'
print '-n: The number of topics to generate from LDA'
print '-r: Tells us to return closes articles (default_number=5)'
elif opt in ('-c'):
create_model = True
elif opt in ('-t'):
topic_string = arg
topic = int(topic_string)
elif opt in ('-n'):
num_t = int(arg)
elif opt in ('-r'):
return_closest_articles = True
document = arg
# Create the files that we want to use
topic_xml_file = os.path.join(topic_xml_dir,topic_string + ".topic")
saved_model_file = os.path.join(saved_models_dir, topic_string + ".lda")
saved_dict_file = os.path.join(saved_models_dir, topic_string + ".dict")
saved_corpus_file = os.path.join(saved_models_dir, topic_string + ".mm")
if create_model:
content = reader.ReadArticlesforTopic(topic_xml_file)
# Read the articles and parsed them
print len(content)
# We don't want to use any duplicate links
used_titles = []
documents = []
for title,desc in content:
if title not in used_titles:
documents.append(desc)
used_titles.append(title)
print len(documents)
# Read the articles
lda_modeler = LDAmodel()
lda = lda_modeler.TrainLDA(documents,num_t)
lda_modeler.SaveModel(saved_model_file)
lda_modeler.SaveDictionaryandCorpus(saved_dict_file,saved_corpus_file)
#print lda.show_topics()
elif return_closest_articles:
lda_modeler = LDAmodel(saved_model_file,saved_dict_file, saved_corpus_file)
indices = lda_modeler.ReturnSimilarArticles(document,5)
# Let's look at the title and description for each article based on the query
content = reader.ReadArticlesforTopic(topic_xml_file)
# We don't want to use any duplicate links
used_titles = []
documents = []
for title,desc in content:
if title not in used_titles:
documents.append(desc)
used_titles.append(title)
#print indices
for index in indices:
print index
print documents[index[0]]
if __name__ == "__main__":
run(sys.argv[1:])
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import pytest
from pants.backend.shell.goals import tailor
from pants.backend.shell.goals.tailor import PutativeShellTargetsRequest, classify_source_files
from pants.backend.shell.target_types import (
ShellSourcesGeneratorTarget,
Shunit2TestsGeneratorTarget,
)
from pants.core.goals.tailor import AllOwnedSources, PutativeTarget, PutativeTargets
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
def test_classify_source_files() -> None:
test_files = {"foo/bar/baz_test.sh", "foo/test_bar.sh", "foo/tests.sh", "tests.sh"}
sources_files = {"foo/bar/baz.sh", "foo/bar_baz.sh"}
assert {
Shunit2TestsGeneratorTarget: test_files,
ShellSourcesGeneratorTarget: sources_files,
} == classify_source_files(test_files | sources_files)
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*tailor.rules(),
QueryRule(PutativeTargets, [PutativeShellTargetsRequest, AllOwnedSources]),
],
target_types=[],
)
def test_find_putative_targets(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
f"src/sh/foo/{fp}": ""
for fp in (
"f.sh",
"bar/baz1.sh",
"bar/baz1_test.sh",
"bar/baz2.sh",
"bar/baz2_test.sh",
"bar/baz3.sh",
)
}
)
pts = rule_runner.request(
PutativeTargets,
[
PutativeShellTargetsRequest(("src/sh/foo", "src/sh/foo/bar")),
AllOwnedSources(["src/sh/foo/bar/baz1.sh", "src/sh/foo/bar/baz1_test.sh"]),
],
)
assert (
PutativeTargets(
[
PutativeTarget.for_target_type(
ShellSourcesGeneratorTarget,
path="src/sh/foo",
name=None,
triggering_sources=["f.sh"],
),
PutativeTarget.for_target_type(
ShellSourcesGeneratorTarget,
path="src/sh/foo/bar",
name=None,
triggering_sources=["baz2.sh", "baz3.sh"],
),
PutativeTarget.for_target_type(
Shunit2TestsGeneratorTarget,
path="src/sh/foo/bar",
name="tests",
triggering_sources=["baz2_test.sh"],
),
]
)
== pts
)
def test_find_putative_targets_subset(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
f"src/sh/foo/{fp}": ""
for fp in (
"bar/bar.sh",
"bar/bar_test.sh",
"baz/baz.sh",
"baz/baz_test.sh",
"qux/qux.sh",
)
}
)
pts = rule_runner.request(
PutativeTargets,
[
PutativeShellTargetsRequest(("src/sh/foo/bar", "src/sh/foo/qux")),
AllOwnedSources(["src/sh/foo/bar/bar.sh"]),
],
)
assert (
PutativeTargets(
[
PutativeTarget.for_target_type(
Shunit2TestsGeneratorTarget,
path="src/sh/foo/bar",
name="tests",
triggering_sources=["bar_test.sh"],
),
PutativeTarget.for_target_type(
ShellSourcesGeneratorTarget,
path="src/sh/foo/qux",
name=None,
triggering_sources=["qux.sh"],
),
]
)
== pts
)
|
# -*- coding: utf-8 -*-
from odoo import fields, models, api, _
import logging
_logger = logging.getLogger(__name__)
class register(models.Model):
_name = 'lat.siswa.register'
name = fields.Char(
string='Number',
required=True,
copy=False
)
lat_siswa = fields.Char(string="Name", required=True)
lat_regiterdate = fields.Date(
string='Register Date',
default=fields.Date.today
)
lat_birthdate = fields.Date(
string='Birth Date',
)
lat_sex = fields.Selection([
('male', "Male"),
('female', "Female"),
], string="Sex")
lat_create_receipt = fields.Boolean(string='Create Status')
lat_state = fields.Selection(
[('new', 'New'), ('paid', 'Paid')],
string='Status',
required=True,
readonly=True,
copy=False, default='new')
@api.model
def create(self, vals):
if vals.get('name', 'New') == 'New':
reg = self.env['ir.sequence'].next_by_code('register.number')
vals['name'] = reg or 'New'
return super(register, self).create(vals)
@api.multi
def lat_action_create_receipt(self):
reg = [('lat_register_id', '=', self.id)]
lat_register_id = self.env['lat.siswa.receipt'].search(reg, limit=1)
if not lat_register_id:
receipt = self.env['lat.siswa.receipt'].create(
{
'name': 'New',
'lat_register_id': self.id,
'lat_siswa': self.lat_siswa,
'lat_paiddate': fields.Date.today(),
}
)
return {
'name': _('Document Receipt'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'lat.siswa.receipt',
'type': 'ir.actions.act_window',
'target': 'current',
'res_id': receipt.id,
}
else:
return {
'name': _('Document Receipt'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'lat.siswa.receipt',
'type': 'ir.actions.act_window',
'target': 'current',
'res_id': lat_register_id.id,
}
@api.multi
def lat_action_generate_student(self):
reg = [('lat_register_id', '=', self.id)]
lat_register_id = self.env['lat.siswa.student'].search(reg, limit=1)
if not lat_register_id:
receipt = self.env['lat.siswa.student'].create(
{
'name': 'New',
'lat_register_id': self.id,
'lat_siswa': self.lat_siswa,
'lat_paiddate': fields.Date.today(),
'lat_birthdate': self.lat_birthdate,
'lat_sex': self.lat_sex,
}
)
return {
'name': _('Document Student'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'lat.siswa.student',
'type': 'ir.actions.act_window',
'target': 'current',
'res_id': receipt.id,
}
else:
return {
'name': _('Document Receipt'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'lat.siswa.student',
'type': 'ir.actions.act_window',
'target': 'current',
'res_id': lat_register_id.id,
}
|
"""App related constants."""
import re
DNS_RECORD_TYPES = [
("spf", "SPF"),
("dkim", "DKIM"),
("dmarc", "DMARC"),
("autoconfig", "Autoconfig"),
("autodiscover", "Autodiscover"),
]
SPF_MECHANISMS = ["ip4", "ip6", "a", "mx", "ptr", "exists", "include"]
DMARC_URI_REGEX = re.compile(r"^mailto:(.+)(!\w+)?")
DMARC_TAGS = {
"adkim": {"values": ["r", "s"]},
"aspf": {"values": ["r", "s"]},
"fo": {"values": ["0", "1", "d", "s"]},
"p": {"values": ["none", "quarantine", "reject"]},
"pct": {"type": "int", "min_value": 0, "max_value": 100},
"rf": {"type": "list", "values": ["afrf"]},
"ri": {"type": "int"},
"rua": {"type": "list", "regex": DMARC_URI_REGEX},
"ruf": {"type": "list", "regex": DMARC_URI_REGEX},
"sp": {"values": ["none", "quarantine", "reject"]}
}
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN Agent with logged replay buffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from batch_rl.baselines.replay_memory import logged_replay_buffer
from dopamine.agents.dqn import dqn_agent
import gin
@gin.configurable
class LoggedDQNAgent(dqn_agent.DQNAgent):
"""An implementation of the DQN agent with replay buffer logging to disk."""
def __init__(self, sess, num_actions, replay_log_dir, **kwargs):
"""Initializes the agent and constructs the components of its graph.
Args:
sess: tf.Session, for executing ops.
num_actions: int, number of actions the agent can take at any state.
replay_log_dir: str, log Directory to save the replay buffer to disk
periodically.
**kwargs: Arbitrary keyword arguments.
"""
assert replay_log_dir is not None
# Set replay_log_dir before calling parent's initializer
self._replay_log_dir = replay_log_dir
super(LoggedDQNAgent, self).__init__(sess, num_actions, **kwargs)
def log_final_buffer(self):
self._replay.memory.log_final_buffer()
def _build_replay_buffer(self, use_staging):
"""Creates the replay buffer used by the agent.
Args:
use_staging: bool, if True, uses a staging area to prefetch data for
faster training.
Returns:
A WrapperReplayBuffer object.
"""
return logged_replay_buffer.WrappedLoggedReplayBuffer(
log_dir=self._replay_log_dir,
observation_shape=self.observation_shape,
stack_size=self.stack_size,
use_staging=use_staging,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype.as_numpy_dtype)
|
from google.appengine.ext import db
class newchat(db.Model):
u1=db.StringProperty()
u2=db.StringProperty()
class bhaat(db.Model):
name=db.StringProperty()
thistime=db.StringProperty()
detail=db.StringProperty()
link=db.ReferenceProperty(newchat)
class auths(db.Model):
user=db.UserProperty()
token=db.StringProperty()
details=db.StringProperty()
code=db.StringProperty()
|
#!/usr/bin/env python3
import os
import argparse
import put.ui.file_functions
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('dir', nargs='?', default=os.getcwd())
args = parser.parse_args()
return args
def main():
args = parse_arguments()
put.ui.file_functions.FileFunctions(args.dir).main()
if __name__ == '__main__':
main()
|
items = ('45','67','56','78')
a = ('điểm cao nhất:')
print(a,*items,sep=',')
for i,j in enumerate(items):
print(i+1,j)
b = int(input('new high scores:'))
print(b)
|
import keras
from keras.layers import Input
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import TimeDistributed
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
import numpy as np
from nltk.tokenize import word_tokenize
import os
file_path = 'code'
codes = []
comments = []
vocab = set()
comments_vector = {}
for root,dirs,files in os.walk(file_path):
for fl in files:
code = open('code/'+fl).read()
code_tensor = np.asarray(word_tokenize(code)[:1000])
print(len(code_tensor), "--->")
if(len(code_tensor) < 1000):
code_tensor = np.pad(code_tensor, (0, 1000 - len(code_tensor)) , 'constant' )
print(len(code_tensor))
codes.append(code_tensor)
comment = open('comments/'+fl).read()
for c in comment.split():
vocab.add(c)
comments.append(comment)
# print(comments)
print(np.asarray(codes).shape)
i=0
for item in vocab:
comments_vector[item]=i
i=i+1
comments_tensor = []
for item in comments:
comment_tensor = []
for word in item.split():
comment = np.zeros(len(vocab))
comment[comments_vector[word]] = 1
comment_tensor.append(comment)
comments_tensor.append(comment_tensor)
print(np.asarray(comments_tensor).shape)
model = Sequential()
# model.add(Input(shape = (2000,)))
model.add(Embedding(10, 100, input_length=1000))
model.add(LSTM(500, return_sequences=True))
model.add(LSTM(500, return_sequences=True))
model.add(TimeDistributed(Dense(len(vocab))))
model.compile(loss = 'categorical_crossentropy', optimizer='RMSProp', metrics = ['accuracy'])
print(model.summary())
model.fit(np.asarray(codes), np.asarray(comments_tensor), epochs=2, verbose = 1)
|
from dataclasses import dataclass
from sqlite3 import Connection
from typing import List, Optional
from uuid import uuid4
@dataclass
class User:
id: str
name: str
is_system: bool
discord_id: Optional[int]
telegram_id: Optional[int]
vk_id: Optional[int]
bill_from: int
token_credit: int
bill_multiplier: int
class UserRepository:
def __init__(self, con: Connection) -> None:
self._con = con
def get_all(self) -> List[User]:
records = self._con.cursor().execute('SELECT * FROM users').fetchall()
return [
User(
id=r['id'],
name=r['name'],
is_system=bool(r['is_system']),
discord_id=r['discord_id'],
telegram_id=r['telegram_id'],
vk_id=r['vk_id'],
bill_from=r['bill_from'],
token_credit=r['token_credit'],
bill_multiplier=r['bill_multiplier']
)
for r in records
]
def create(self, user: User) -> User:
id = uuid4().hex
record = (id, user.name, 0, user.discord_id, user.telegram_id, user.vk_id, user.bill_from, user.token_credit, user.bill_multiplier)
with self._con as con:
con.cursor().execute('INSERT INTO users (id, name, is_system, discord_id, telegram_id, vk_id, bill_from, token_credit, bill_multiplier) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)', record)
return User(
id=id,
name=user.name,
is_system=False,
discord_id=user.discord_id,
telegram_id=user.telegram_id,
vk_id=user.vk_id,
bill_from=user.bill_from,
token_credit=user.token_credit,
bill_multiplier=user.bill_multiplier
)
def update(self, user: User):
record = (user.name, user.discord_id, user.telegram_id, user.vk_id, user.bill_from, user.token_credit, user.bill_multiplier, user.id)
with self._con as con:
con.cursor().execute('''
UPDATE users
SET
name = ?,
discord_id = ?,
telegram_id = ?,
vk_id = ?,
bill_from = ?,
token_credit = ?,
bill_multiplier = ?
WHERE id = ? AND is_system = 0
''', record)
def delete(self, user_id: str):
with self._con as con:
con.cursor().execute('DELETE FROM users WHERE id = ? AND is_system = 0', (user_id,))
|
import random
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
_code_git_version="79ec8fe5fe8ec95d3ac6026eb1e502bbada7c7ba"
_code_repository="https://github.com/plops/cl-py-generator/tree/master/example/29_ondrejs_challenge/source/run_00_start.py"
_code_generation_time="18:16:02 of Saturday, 2021-04-03 (GMT+1)"
class PongPaddle(Widget):
score=NumericProperty(0)
def bounce_ball(self, ball):
if ( self.collide_widget(ball) ):
vx, vy=ball.v
offset=((((ball.center_y)-(self.center_y)))/((((0.50 ))*(self.height))))
bounced=Vector(((-1)*(vx)), vy)
vel=((bounced)*((1.10 )))
ball.v=vel.x, ((vel.y)+(offset))
class PongBall(Widget):
vx=NumericProperty(0)
vy=NumericProperty(0)
v=ReferenceListProperty(vx, vy)
def move(self):
self.pos=((Vector(*self.v))+(self.pos))
class PongGame(Widget):
ball=ObjectProperty(None)
player1=ObjectProperty(None)
player2=ObjectProperty(None)
def serve_ball(self, vel=(4,0,)):
self.ball.center=self.center
self.ball.v=vel
def update(self, dt):
self.ball.move()
self.player1.bounce_ball(self.ball)
self.player2.bounce_ball(self.ball)
if ( ((((self.ball.y)<(self.y))) or (((self.top)<(self.ball.top)))) ):
self.ball.vy=((-1)*(self.ball.vy))
if ( ((self.ball.x)<(self.x)) ):
self.player2.score=((1)+(self.player2.score))
self.serve_ball(vel=(4,0,))
if ( ((self.width)<(self.ball.x)) ):
self.player1.score=((1)+(self.player1.score))
self.serve_ball(vel=(-4,0,))
def on_touch_move(self, touch):
if ( ((touch.x)<(((self.width)/(3)))) ):
self.player1.center_y=touch.y
if ( ((((self.width)-(((self.width)/(3)))))<(touch.x)) ):
self.player2.center_y=touch.y
class PongApp(App):
game=None
def build(self):
self.game=PongGame()
self.game.serve_ball()
Clock.schedule_interval(self.game.update, (((1.0 ))/(60)))
return self.game
if ( ((__name__)==("__main__")) ):
app=PongApp()
app.run()
|
"""
*********************************************************************
This file is part of:
The Acorn Project
https://wwww.twistedfields.com/research
*********************************************************************
Copyright (c) 2019-2021 Taylor Alexander, Twisted Fields LLC
Copyright (c) 2021 The Acorn Project contributors (cf. AUTHORS.md).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*********************************************************************
"""
import redis
import time
import pickle
from scipy.interpolate import CubicSpline
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import splprep, splev
import spline_lib
_SMOOTH_MULTIPLIER = 0.00000000001
r = redis.Redis(
host='localhost',
port=6379)
# r.set('foo', 'bar')
# def smooth_track(gps_coords, smooth_factor, num_points):
# """ Calculated a spline based on a gps track.
# Args:
# gps_coords: A list of dict objects with 'lat' and 'lon' keys.
# smooth_factor: Any float, but recommend 1-10.
# num_points: The number of points
# Returns: Four lists. Smoothed lat and lon coords and original lat and lon
# coords.
# """
# np_points = np.empty((len(gps_coords), 2))
# orig_lats = []
# orig_lons = []
# for idx in range(len(gps_coords)):
# line = gps_coords[idx]
# np_points[idx] = ((line['lat'], line['lon']))
# orig_lats.append(line['lat'])
# orig_lons.append(line['lon'])
#
# tck, u = splprep(np_points.T, u=None, s=smooth_factor * _SMOOTH_MULTIPLIER, per=0, t=10)
# u_new = np.linspace(u.min(), u.max(), num_points)
# lat_smooth, lon_smooth = splev(u_new, tck, der=0)
# return lat_smooth, lon_smooth, orig_lats, orig_lons
for key in r.scan_iter():
print(key)
if 'gpspath' in str(key) and 'long_strawberry_parking2' in str(key):
print(key)
# # newkey = str(key).replace('-key\'',':key')
# # newkey = newkey.replace('b\'','')
# # print(newkey)
# # # #print(bytes(newkey, encoding='ascii'))
# # # # #newkey = "twistedfields:gpspath:{}-key".format(str(key))
# r.delete(key)
gps_coords = pickle.loads(r.get(key))
spline = spline_lib.GpsSpline(gps_coords, smooth_factor=1, num_points=500)
#lat_smooth, lon_smooth, orig_x, orig_y = smooth_track(gps_coords, smooth_factor=10, num_points=200)
#print(list(zip(lat_smooth, lon_smooth)))
#plt.plot(dat[:,0], dat[:,1],'ro')
lat_smooth = []
lon_smooth = []
for p in spline.points:
lat_smooth.append(p.lat)
lon_smooth.append(p.lon)
orig_x = []
orig_y = []
for p in gps_coords:
orig_x.append(p['lat'])
orig_y.append(p['lon'])
print(p['lat'], p['lon'])
point_of_interest = {'lat':37.35409860533507, 'lon':-122.33325479993744}
point_of_interest = {'lat':37.35398195436689, 'lon':-122.33308312401907}
point_of_interest = {'lat': 37.3540842425, 'lon': -122.3333173125}
point_of_interest = {'lat': 37.35402, 'lon': -122.3334}
#37.3540842425, -122.3333173125
start = time.time()
for _ in range(1000):
closeu = spline.closestUOnSpline(point_of_interest)
time1 = time.time()-start
print("closeu {}, time {}".format(closeu, time1))
start = time.time()
for _ in range(1000):
closeu2 = spline.closestUOnSplinePoints(point_of_interest)
time2 = time.time()-start
print("closeu2 {}, time {}".format(closeu2, time2))
coord = spline.coordAtU(closeu)
coord2 = spline.coordAtU(closeu2)
mag = spline.slopeRadiansAtU(closeu)
import math
mag = math.degrees(mag)
#mag = mag[0]/mag[1] * 90
print("closeu {}, coord {}, mag {}".format(closeu, coord, mag))
plt.plot(orig_x, orig_y, 'ro')
plt.plot(lat_smooth, lon_smooth, 'bo')
plt.plot(point_of_interest['lat'],point_of_interest['lon'], 'go', markersize=20)
plt.plot(coord.lat, coord.lon, 'mo', markersize=20)
plt.plot(coord2.lat, coord2.lon, 'yo', markersize=20)
plt.title(str(key))
plt.show()
# print(value)
# point_data = []
# lats = []
# lons = []
# utm_x = []
# utm_y = []
# # try:
#
#
# for line in value:
# lats.append(line['lat'])
# lons.append(line['lon'])
# point_data.append((line['lat'], line['lon']))
# utm_coord = utm.from_latlon(line['lat'], line['lon'])
# utm_x.append(utm_coord[0])
# utm_x.append(utm_coord[1])
# x, y = np.array(lats), np.array(lons)
# #simple_coords = rdp(point_data, epsilon=1e-4)
# #print("{} points reduced to {}!".format(coords.shape[0], simple_coords.shape[0]))
# #plt.plot(simple_coords[:, 0], simple_coords[:, 1], 'ro')
# #plt.show()
#
# smooth_factor = 1
#
#
#
# dat = np.array([(x,y) for x,y in zip(lats, lons)])
# #dat = np.array([(x,y) for x,y in zip(coords.lon[::18], coords.lat[::18])])
# tck, u = splprep(dat.T, u=None, s=smooth_factor * _SMOOTH_MULTIPLIER, per=0, t=10)
# u_new = np.linspace(u.min(), u.max(), 200)
# x_new, y_new = splev(u_new, tck, der=0)
# #print(x_new)
# print(point_data)
# plt.plot(x, y, 'ro', ms=5)
# cs = CubicSpline(x, y)
# xs = 2 * np.pi * np.linspace(0, 1, 100)
# ax.plot(xs, cs(xs), label="S")
# plt.show()
# spl = UnivariateSpline(x, y)
# xs = np.linspace(-3, 3, 1000)
# plt.plot(xs, spl(xs), 'g', lw=3)
# except:
# print('exception unpickling key {}'.format(key))
#r.delete(key)
# while True:
# value = r.get('foo')
# print(value)
# time.sleep(0.1)
|
# -*- coding: utf-8 -*-
import numpy as np
import re
import labsql
from sklearn.cluster import KMeans
import collections
reBODY = r'<body.*?>([\s\S]*?)<\/body>'
reCOMM = r'<!--.*?-->'
reTRIM = r'<{0}.*?>([\s\S]*?)<\/{0}>'
reTAG = r'<[\s\S]*?>|[ \t\r\f\v]'
reIMG = re.compile(r'<img[\s\S]*?src=[\'|"]([\s\S]*?)[\'|"][\s\S]*?>')
# 如果需要提取正文区域出现的图片,只需要在第一步去除tag时保留<img>标签的内容:
def deal_images(body):
return reIMG.sub(r'{\1}', body)
# 去除所有tag,包括样式、Js脚本内容等,但保留原有的换行符\n:
def deal_tags(body):
body = re.sub(reCOMM, "", body)
body = re.sub(reTRIM.format("script"), "", re.sub(reTRIM.format("style"), "", body))
# self.body = re.sub(r"[\n]+","\n", re.sub(reTAG, "", self.body))
body = re.sub(reTAG, "", body)
body = re.sub(r'[\u3000\u2003\xa0]', "", body)
return body
def special_fun(sentence_list):
read_list = []
corpus_dict = {}
corpus_range = []
# read each lines
for id, sentence in enumerate(sentence_list):
# use regex to find chinese in sentence
ident_word = re.search("[\u4e00-\u9fa5]", sentence)
# judge the line whether is empty or not chinese
if sentence is '' or ident_word is None: continue
# when the line is contain chinese, capture after 5 lines
# 五句頭
head = id
# 五句尾
bottom = id + 5
total_length = 0
# 候選句子
candidate_sentence = []
# 如果外部for的id已在readied_list,跳過
if id in read_list: continue
# head到bottom的id加到read_list
for read_id in range(head, bottom): read_list.append(read_id)
# 讀取head到bottom的句子
for capture in sentence_list[head:bottom]:
# 只保留句子中的中文,。、
words = re.sub('[^\u4e00-\u9fa5,。、]', '', capture)
# 句子長度加到total_length
total_length += len(words)
# 句子加到candidate_sentence
candidate_sentence.append(words)
# 假如total_length大於閥值則將head, total_length & candidate_sentence加到corpus_dict
if total_length >= 110:
# corpus_range加入第一個candidate_sentence成功的head
if not corpus_range: corpus_range.append(head)
corpus_dict[head] = candidate_sentence
# corpus_range.append(bottom)
# print(corpus_range)
keys_list = list(corpus_dict.keys())
output = {}
if len(keys_list) <= 3:
output = corpus_dict
elif keys_list[1] - keys_list[0] >= 100:
output = {keys_list[0]: corpus_dict[keys_list[0]]}
else:
X = np.array(keys_list).reshape(len(keys_list), 1)
kmeans = KMeans(n_clusters=3, random_state=0).fit(X)
label_list = list(kmeans.labels_)
most = collections.Counter(label_list).most_common()
# 次數
top = most[0][1]
# 最多次數的群
cluster = most[0][0]
# 如果次數跟其他兩群不同,就對應群在label中的index再對應t的位置
if top is not most[1][1] and top is not most[2][1]:
for i, item in enumerate(corpus_dict):
if label_list[i] == cluster:
output[keys_list[i]] = corpus_dict[keys_list[i]]
# self.main_content_range[ID] = corpus_range
content = output.values()
out_list = []
for out_data in content:
out_list += list(filter(None, out_data))
out1 = ', '.join(out_list)
if not out1: return 'continue'
return out1
class Extractor:
def __init__(self, block_size=3, image=False):
# where id = 9 or id = 88 or id = 125 or id = 175 or id = 332 or id = 635 or id = 724
self.conn = labsql.LabSQL('172.168.1.36', 'sohu', 'sa', 'scucc')
self.data = self.conn.fetch(
"select * from dis where id = 9 or id = 88 or id = 125 or id = 175 or id = 332 or id = 635 or id = 724") # where id between '110' and '120'
self.blockSize = block_size
self.saveImage = image
# 将网页内容按行分割,定义行块 blocki 为第 [i,i+blockSize] 行文本之和并给出行块长度基于行号的分布函数:
def deal_blocks(self, body):
# 把文章split
sentence_list = body.split("\n")
# print(sentence_list)
self.textLens = [len(text) for text in sentence_list]
self.cblocks = [0] * (len(sentence_list) - self.blockSize - 1)
lines = len(sentence_list)
for i in range(self.blockSize):
self.cblocks = list(map(lambda x, y: x + y, self.textLens[i: lines - 1 - self.blockSize + i], self.cblocks))
print(lines)
if not self.cblocks: return special_fun(sentence_list)
total_length = len(sentence_list)
head_limit_edge = round(total_length * 0.3)
tail_limit_edge = round(total_length * 0.7)
# 後選前二長
top2_maxTextLen = list(reversed(sorted(self.textLens)))[:3]
# 移除極端值
# 最後選擇字元最多的
candidate_top = [candidate for candidate in top2_maxTextLen if candidate <= 1000]
candidate_index = [self.textLens.index(index) for index in candidate_top]
# print(candidate_index)
"""
method 1
"""
if len(candidate_index) > 1:
for check in candidate_index:
if head_limit_edge <= check <= tail_limit_edge:
maxTextLen = check
if abs(check - candidate_index[-1]) <= 10:
maxTextLen = candidate_index[-1]
else:
if not sentence_list[candidate_index[0]:candidate_index[-1]]: return special_fun(sentence_list)
return sentence_list[candidate_index[0]:candidate_index[-1]], [min(candidate_index),
max(candidate_index)]
self.start = self.end = maxTextLen
while self.start > 0 and self.cblocks[self.start] > min(self.textLens):
self.start -= 1
while self.end < lines - self.blockSize and self.cblocks[self.end] > min(self.textLens):
self.end += 1
if not "".join(sentence_list[self.start:self.end]): return special_fun(sentence_list)
return "".join(sentence_list[self.start:self.end]), [self.start, self.end]
else:
if not sentence_list[0]: return special_fun(sentence_list)
return sentence_list[0], [0, 0]
# 正文出现在最长的行块,截取两边至行块长度为 0 的范围:
def get_context(self):
for c, sql_fetch_data in enumerate(self.data):
ID, corpus, url = sql_fetch_data
body = re.findall(reBODY, corpus)
if not body: continue
body = body[0]
if self.saveImage:
body = deal_images(body)
body = deal_tags(body)
# print(body)
# output, range_list = self.deal_blocks(body)
output = self.deal_blocks(body)
if output == 'continue': continue
print(output)
# print(range_list)
if __name__ == '__main__':
ext = Extractor()
ext.get_context()
# print(out)
|
import os
import unittest
import json
from flask_sqlalchemy import SQLAlchemy
from flask import session
import auth.constants as constants
from app import create_app
from database.models import setup_db, db, Month, User, UserHistory, Secret
# Travel Cockpit endpoints test class
class TravelCockpitTestCase(unittest.TestCase):
def setUp(self):
# Auth0 JWTs: User, Manager, Director
self.test_user = os.environ['JWT_USER']
self.test_manager = os.environ['JWT_MANAGER']
self.test_director = os.environ['JWT_DIRECTOR']
# Define test variable and initialize app
self.app = create_app()
self.client = self.app.test_client
self.database_name = "travel_cockpit_test"
self.database_path = "postgres://{}:{}@{}/{}".format(
'postgres', 'secret', 'localhost:5432', self.database_name)
setup_db(self.app, self.database_path)
# Binds the app to the current context
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
# create all tables
self.db.create_all()
def tearDown(self):
pass
"""Endpoint testing (without login/logout)"""
def test_start_page(self):
res = self.client().get('/')
data = res.data
self.assertEqual(res.status_code, 200)
self.assertIn('https://www.reise-klima.de/urlaub/', str(data))
def test_404_start_page(self):
res = self.client().get('/_')
self.assertEqual(res.status_code, 404)
def test_vision(self):
res = self.client().get('/vision')
data = res.data
self.assertEqual(res.status_code, 200)
self.assertIn('Vision', str(data))
def test_404_vision(self):
res = self.client().get('/vision_')
self.assertEqual(res.status_code, 404)
def test_contact(self):
res = self.client().get('/contact')
data = res.data
self.assertEqual(res.status_code, 200)
self.assertIn('Contact', str(data))
def test_404_contact(self):
res = self.client().get('/contact_')
self.assertEqual(res.status_code, 404)
"""Requires AUTH0, w/o RBAC -> every user"""
def test_get_home(self):
res = self.client().get(
'/home',
headers={'Authorization': 'Bearer '+self.test_user}
)
data = res.data
self.assertEqual(res.status_code, 200)
self.assertIn('https://www.reise-klima.de/urlaub/', str(data))
def test_401_get_home(self):
res = self.client().get('/home', headers={'Authorization': 'Bearer '})
data = res.data
self.assertEqual(res.status_code, 401)
self.assertNotIn('https://www.reise-klima.de/urlaub/', str(data))
def test_post_home(self):
res = self.client().post(
'/home',
headers={'Authorization': 'Bearer '+self.test_user},
data={"destination": "Spain"}
)
data = res.data
self.assertEqual(res.status_code, 200)
self.assertIn('spain', str(data))
# Invalid header
def test__401_post_home(self):
res = self.client().post('/home', headers={'Authorization': 'Bearer '},
data={"destination": "Spain"})
data = res.data
self.assertEqual(res.status_code, 401)
self.assertNotIn('spain', str(data))
def test_get_history(self):
res = self.client().get(
'/history',
headers={'Authorization': 'Bearer '+self.test_user}
)
data = res.data
self.assertEqual(res.status_code, 200)
self.assertIn('Spain', str(data))
# Invalid header
def test_401_get_history(self):
res = self.client().get(
'/history',
headers={'Authorization': 'Bearer '}
)
data = res.data
self.assertEqual(res.status_code, 401)
self.assertNotIn('Spain', str(data))
# Manager
def test_get_history_all(self):
res = self.client().get(
'/history-all',
headers={'Authorization': 'Bearer '+self.test_manager}
)
data = res.data
self.assertEqual(res.status_code, 200)
self.assertIn('Germany', str(data))
# Director
def test_get_history_all(self):
res = self.client().get(
'/history-all',
headers={'Authorization': 'Bearer '+self.test_director}
)
data = res.data
self.assertEqual(res.status_code, 200)
self.assertIn('Germany', str(data))
# Unauthorized
def test_403_get_history_all(self):
res = self.client().get(
'/history-all',
headers={'Authorization': 'Bearer '+self.test_user}
)
data = res.data
self.assertEqual(res.status_code, 403)
self.assertNotIn('Secret', str(data))
"""CRUD API endpoint testing (Secret Model)"""
# CREATE
# Before POST, test get form html page
def test_create_blog(self):
res = self.client().get(
'/blog/create',
headers={'Authorization': 'Bearer '+self.test_manager}
)
self.assertEqual(res.status_code, 200)
def test_404_create_blog(self):
res = self.client().get(
'/blog/create/1',
headers={'Authorization': 'Bearer '+self.test_manager}
)
self.assertEqual(res.status_code, 404)
# Unauthorized
def test_403_create_blog(self):
res = self.client().get(
'/blog/create',
headers={'Authorization': 'Bearer '+self.test_user}
)
self.assertEqual(res.status_code, 403)
def test_create_blog_manager(self):
res = self.client().post(
'/blog/create',
headers={'Authorization': 'Bearer '+self.test_manager},
data={
"title": "Valpolicella",
"why1": "Amarone",
"why2": "Superb food",
"why3": "Lake Garda",
"text": "Wine, food, lake & mountains!",
"link": "Verona"
})
self.assertEqual(res.status_code, 302)
# Unauthorized
def test_403_create_blog(self):
res = self.client().post(
'/blog/create',
headers={'Authorization': 'Bearer '+self.test_user},
data={
"title": "Valpolicella",
"why1": "Amarone",
"why2": "Superb food",
"why3": "Lake Garda",
"text": "Wine, food, lake & mountains!",
"link": "Verona"
})
self.assertEqual(res.status_code, 403)
def test_404_create_blog_manager(self):
res = self.client().post(
'/blog/create/1',
headers={'Authorization': 'Bearer '+self.test_manager},
data={
"title": "Valpolicella",
"why1": "Amarone",
"why2": "Superb food",
"why3": "Lake Garda",
"text": "Wine, food, lake & mountains!",
"link": "Verona"
})
self.assertEqual(res.status_code, 404)
def test_create_blog_director(self):
res = self.client().post(
'/blog/create',
headers={'Authorization': 'Bearer '+self.test_director},
data={
"title": "Parma",
"why1": "Parmegiano",
"text": "Cheeeeeeeese",
"link": "Parma"
})
self.assertEqual(res.status_code, 302)
# READ
# User
def test_get_blog_user(self):
res = self.client().get(
'/blog/user',
headers={'Authorization': 'Bearer '+self.test_user}
)
data = res.data
self.assertEqual(res.status_code, 200)
self.assertIn('Valpolicella', str(data))
def test_401_get_blog_user(self):
res = self.client().get(
'/blog/user',
headers={'Authorization': 'Bearer '}
)
data = res.data
self.assertEqual(res.status_code, 401)
self.assertNotIn('Valpolicella', str(data))
def test_404_get_blog_user(self):
res = self.client().get(
'/blog/user/1',
headers={'Authorization': 'Bearer '+self.test_user}
)
data = res.data
self.assertEqual(res.status_code, 404)
self.assertNotIn('Valpolicella', str(data))
def test_get_blog_manager(self):
res = self.client().get(
'/blog',
headers={'Authorization': 'Bearer '+self.test_manager}
)
data = res.data
self.assertEqual(res.status_code, 200)
self.assertIn('Valpolicella', str(data))
def test_get_blog_director(self):
res = self.client().get(
'/blog',
headers={'Authorization': 'Bearer '+self.test_director}
)
data = res.data
self.assertEqual(res.status_code, 200)
self.assertIn('Valpolicella', str(data))
# Unauthorized
def test_403_get_blog(self):
res = self.client().get(
'/blog',
headers={'Authorization': 'Bearer '+self.test_user}
)
data = res.data
self.assertEqual(res.status_code, 403)
self.assertNotIn('Valpolicella', str(data))
def test_404_get_blog_manager(self):
res = self.client().get(
'/blog/1',
headers={'Authorization': 'Bearer '+self.test_manager}
)
data = res.data
self.assertEqual(res.status_code, 404)
self.assertNotIn('Valpolicella', str(data))
# UPDATE
def test_get_edit_blog_director(self):
res = self.client().get(
'/blog/69/edit',
headers={'Authorization': 'Bearer '+self.test_director}
)
data = res.data
self.assertEqual(res.status_code, 200)
self.assertIn('Parma', str(data))
def test_404_get_edit_blog_director(self):
res = self.client().get(
'/blog/68/edit',
headers={'Authorization': 'Bearer '+self.test_director}
)
data = res.data
self.assertEqual(res.status_code, 404)
self.assertNotIn('Parma', str(data))
def test_get_edit_blog_manager(self):
res = self.client().get(
'/blog/70/edit-own',
headers={'Authorization': 'Bearer '+self.test_manager}
)
data = res.data
self.assertEqual(res.status_code, 200)
self.assertIn('Valpolicella', str(data))
def test_404_get_edit_blog_manager(self):
res = self.client().get(
'/blog/68/edit-own',
headers={'Authorization': 'Bearer '+self.test_manager}
)
data = res.data
self.assertEqual(res.status_code, 404)
self.assertNotIn('Valpolicella', str(data))
def test_403_get_edit_blog_manager(self):
res = self.client().get(
'/blog/69/edit-own',
headers={'Authorization': 'Bearer '+self.test_manager}
)
data = res.data
self.assertEqual(res.status_code, 403)
self.assertNotIn('Valpolicella', str(data))
def test_patch_edit_blog_director(self):
res = self.client().patch(
'/blog/69/edit/submission',
headers={'Authorization': 'Bearer '+self.test_director},
json={
"title": "Parma",
"why1": "Parmegiano",
"why2": "Emilia Romagnia",
"text": "Home of the best cheese!",
"link": "Parma"
})
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
def test_405_patch_edit_blog_director(self):
res = self.client().patch(
'/blog/68/edit/submission',
headers={'Authorization': 'Bearer '+self.test_director},
json={
"title": "Parma",
"why1": "Parmegiano",
"why2": "Emilia Romagnia",
"text": "Home of the best cheese!",
"link": "Parma"
})
data = json.loads(res.data)
self.assertEqual(res.status_code, 405)
self.assertEqual(data['success'], False)
def test_patch_edit_blog_manager(self):
res = self.client().patch(
'/blog/70/edit-own/submission',
headers={'Authorization': 'Bearer '+self.test_manager},
json={
"title": "Valpolicella",
"why1": "Amarone wine",
"why2": "Superb low priced food",
"why3": "Lake Garda",
"text": "Wine, food, lake & mountains :)",
"link": "Verona"
})
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
def test_405_patch_edit_blog_manager(self):
res = self.client().patch(
'/blog/68/edit-own/submission',
headers={'Authorization': 'Bearer '+self.test_manager},
json={
"title": "Valpolicella",
"why1": "Amarone wine",
"why2": "Superb low priced food",
"why3": "Lake Garda",
"text": "Wine, food, lake & mountains :)",
"link": "Verona"
})
data = json.loads(res.data)
self.assertEqual(res.status_code, 405)
self.assertEqual(data['success'], False)
# Test if Manager can patch Director's post -> Should not be possible
def test_not_own_patch_edit_blog_manager(self):
res = self.client().patch(
'/blog/69/edit-own/submission',
headers={'Authorization': 'Bearer '+self.test_manager},
json={
"title": "Valpolicella",
"why1": "Amarone wine",
"why2": "Superb low priced food",
"why3": "Lake Garda",
"text": "Wine, food, lake & mountains :)",
"link": "Verona"
})
data = json.loads(res.data)
self.assertEqual(res.status_code, 405)
self.assertEqual(data['success'], False)
# DELETE
def test_delete_blog_director(self):
res = self.client().delete(
'/blog/85/delete',
headers={'Authorization': 'Bearer '+self.test_director}
)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
def test_422_delete_blog_director(self):
res = self.client().delete(
'/blog/68/delete',
headers={'Authorization': 'Bearer '+self.test_director}
)
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(data['success'], False)
# Unauthorized
def test_403_delete_blog_manager(self):
res = self.client().delete(
'/blog/69/delete',
headers={'Authorization': 'Bearer '+self.test_manager}
)
data = json.loads(res.data)
self.assertEqual(res.status_code, 403)
self.assertEqual(data['success'], False)
def test_delete_blog_manager(self):
res = self.client().delete(
'/blog/86/delete-own',
headers={'Authorization': 'Bearer '+self.test_manager}
)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
def test_403_delete_blog_manager(self):
res = self.client().delete(
'/blog/68/delete',
headers={'Authorization': 'Bearer '+self.test_manager}
)
data = json.loads(res.data)
self.assertEqual(res.status_code, 403)
self.assertEqual(data['success'], False)
# Test if Manager can delete Director's blog -> should not be possinle
def test_not_own_delete_blog_manager(self):
res = self.client().delete(
'/blog/69/delete',
headers={'Authorization': 'Bearer '+self.test_manager}
)
data = json.loads(res.data)
self.assertEqual(res.status_code, 403)
self.assertEqual(data['success'], False)
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main()
|
import djcelery
import datetime
djcelery.setup_loader()
CELERY_TIMEZONE = 'Asia/Shanghai'
BROKER_URL = 'redis://localhost:6379' #clery4 版本用来代替CELERY_BROKER_URL
CELERY_BROKER_URL = 'redis://localhost:6379/1'
#CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
CELERY_RESULT_BACKEND = 'django-db'
CELERY_CACHE_BACKEND = 'django-cache'
#需执行异步的子应用
CELERY_IMPORTS = (
'TestModel.tasks',
)
# celery内容等消息的格式设置
CELERY_ACCEPT_CONTENT = ['application/json', ]
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
# django setting.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
}
}
# 为定时任务和异步任务单独设置QUEUES
CELERY_QUEUES = {
'beat_tasks': {
'exchange': 'beat_tasks',
'exchange_type': 'direct',
'binding_key': 'beat_tasks'
},
'work_queue': {
'exchange': 'work_queue',
'exchange_type': 'direct',
'binding_key': 'work_queue'
}
}
# 默认使用队列
CELERY_DEFAULT_QUEUE = 'work_queue'
# 某个程序中出现的队列,在broker中不存在,则立刻创建它
CELERY_CREATE_MISSING_QUEUES = True
CELERYD_PREFETCH_MULTIPLIER = 1
# 有些情况下可以防止死锁
CELERYD_FORCE_EXECV = True
# 设置并发的worker数量
CELERYD_CONCURRENCY = 4
# 允许重试
CELERY_ACKS_LATE = True
# 每个worker最多执行100个任务被销毁,可以防止内存泄漏
CELERYD_MAX_TASKS_PER_CHILD = 100
# 单个任务的最大运行时间,超过就杀死
CELERYD_TASK_TIME_LEMIT = 12 * 60
# 定时任务
CELERYBEAT_SCHEDULE = {
'task1': {
'task': 'course-task',
'schedule': datetime.timedelta(seconds=5), # 每5秒执行一次
'options': {
'queue': 'beat_tasks' # 当前定时任务是跑在beat_tasks队列上的
}
}
}
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
|
__author__ = 'luca'
from PIL import Image
from images.image_converter import ImageConverter
class Frame(object):
def __init__(self, path):
self._path = path
self._image = None
self._grayscaled_image = None
def path(self):
return self._path
def image(self):
if not self._image:
self._image = Image.open(self._path)
return self._image
def grayscaled_image(self):
if not self._grayscaled_image:
self._grayscaled_image = ImageConverter.luminance_image(self.image())
return self._grayscaled_image
def width(self):
return self.image().size[0]
def height(self):
return self.image().size[1]
|
from spack import *
import distutils.dir_util as du
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class Pythia6(Package):
"""PYTHIA is a program for the generation of high-energy physics events,
i.e. for the description of collisions at high energies between elementary
particles such as e+, e-, p and pbar in various combinations."""
homepage = "https://pythia6.hepforge.org/"
url = "http://service-spi.web.cern.ch/service-spi/external/MCGenerators/distribution/pythia6/pythia6-426-src.tgz"
version('426', '4dd75f551b7660c35f817c063abd74ca91b70259c0987905a06ebb2d21bcdf26')
def install(self, spec, prefix):
with working_dir(self.version.string):
configure('--with-hepevt=4000')
make()
make('install')
du.copy_tree('lib',prefix.lib)
du.copy_tree('include',prefix.include)
def url_for_version(self,version):
url='http://service-spi.web.cern.ch/service-spi/external/MCGenerators/distribution/pythia6/pythia6-426-src.tgz'%self.version
return url
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 20 21:22:51 2018
@author: brandinho
"""
import math
import numpy as np
### Combinations Calculator ###
def nCr(n, r):
return (math.factorial(n) / (math.factorial(r) * math.factorial(n - r)))
### Extract unique elements from a list ###
def uniq(lst):
last = object()
for item in lst:
if item == last:
continue
yield item
last = item
### Sort and remove duplicates ###
def sort_and_deduplicate(l):
return list(uniq(sorted(l, reverse=True)))
### Count the number of outs for a straight ###
def countOuts(numbers):
if 14 in numbers:
numbers = [1] + numbers
possibilityList = []
for i in range(len(numbers)):
possibilityList.append(np.arange(max(numbers[i] - 4, 1), min(numbers[i] + 4, 14) + 1))
straightPossibilityList = []
for i in range(len(possibilityList)):
for j in range(len(possibilityList[i]) - 4):
straightPossibilityList.append(list(possibilityList[i][j:j+5]))
uniquePossibilityList = sort_and_deduplicate(straightPossibilityList)
frequencyCounter = []
validStraightDraws = []
missingCards = []
for i in range(len(uniquePossibilityList)):
frequencyCounter.append(straightPossibilityList.count(uniquePossibilityList[i]))
if frequencyCounter[i] >= 3:
validStraightDraws.append(uniquePossibilityList[i])
tempMissingCards = list(set(uniquePossibilityList[i]) - set(numbers))
if len(tempMissingCards) < 3:
missingCards.append(tempMissingCards)
outs = sort_and_deduplicate(missingCards)
newOuts = list(outs)
for i in range(len(outs)):
if len(outs[i]) == 1:
for j in range(len(outs)):
if (len(outs[j]) == 2 and outs[i][0] in outs[j] and outs[j] in newOuts or
outs[j] == [] and outs[j] in newOuts or
len(outs[j]) == 1 and outs[j][0] < min(numbers) and outs[j] in newOuts and [] in outs):
newOuts.remove(outs[j])
return newOuts
def calcProbs(hand, rankType, cardsOnTable, handStatus):
### We extract variables from the status dictionary to be used in our conditional statements ###
FullHouse = handStatus["FullHouse"]
Triple = handStatus["Triple"]
TwoPair = handStatus["TwoPair"]
Pair = handStatus["Pair"]
NonPair = handStatus["NonPair"]
StraightRunnerRunner = handStatus["StraightRunnerRunner"]
StraightSingleRunner = handStatus["StraightSingleRunner"]
numSuited = handStatus["NumSuited"]
straightGap = handStatus["straightGap"]
straightLowerBound = handStatus["straightLowerBound"]
straightUpperBound = handStatus["straightUpperBound"]
### We calculate the probabilities using combinatorics ###
if cardsOnTable == "PreFlop":
totalCombinations = nCr(50, 5)
if rankType == "Pair":
# We don't have to worry about if we have a pair in our hand because if we had a pair then we wouldn't be calculating this
probMatchingHand = 6 * nCr(11, 4) * 4**4
probMatchingTable = 11 * nCr(4, 2) * nCr(10, 3) * 4**3
marginalProbability1 = probMatchingHand / totalCombinations
probability = marginalProbability1 + (1 - marginalProbability1) * (probMatchingTable / totalCombinations)
elif rankType == "Two Pair":
if NonPair == True:
probMatchingHand = 3**2 * nCr(10, 3) * 4**3
probMatchingTable = nCr(11, 2) * nCr(4, 2)**2 * 9 * 4
probMatching_HalfHand_HalfTable = 6 * 11 * nCr(4, 2) * nCr(10, 2) * 4**2
marginalProbability1 = probMatchingHand / totalCombinations
marginalProbability2 = marginalProbability1 + (1 - marginalProbability1) * (probMatchingTable / totalCombinations)
probability = marginalProbability2 + (1 - marginalProbability2) * (probMatching_HalfHand_HalfTable / totalCombinations)
elif Pair == True:
probability = (12 * nCr(4, 2) * nCr(11, 3) * 4**3) / totalCombinations
elif rankType == "Three of a Kind":
if NonPair == True:
probMatchingHand = 2 * nCr(3, 2) * nCr(11, 3) * 4**3
probMatchingTable = 11 * nCr(4, 3) * nCr(10, 2) * 4**2
marginalProbability1 = probMatchingHand / totalCombinations
probability = marginalProbability1 + (1 - marginalProbability1) * (probMatchingTable / totalCombinations)
elif Pair == True:
probability = (2 * nCr(12, 4) * 4**4) / totalCombinations
elif rankType == "Straight": # I purposely left out the probability of flopping a straight that has nothing to do with your hand
if straightGap == 0:
if straightLowerBound > 3 or straightUpperBound < 12:
firstMultiplier = 4
secondMultiplier = 0
elif straightLowerBound == 3 or straightUpperBound == 12:
firstMultiplier = 3
secondMultiplier = 0
elif straightLowerBound == 2 or straightUpperBound == 13:
firstMultiplier = 2
secondMultiplier = 0
elif straightLowerBound == 1 or straightUpperBound == 14:
firstMultiplier = 1
secondMultiplier = 1
probBothCardsStraight = (firstMultiplier * 3 * 4**3 * nCr(8, 2) * 4**2) / totalCombinations
probability = probBothCardsStraight + ((secondMultiplier * 4 * 4**4 * 7 * 4) / totalCombinations) * (1 - probBothCardsStraight)
elif straightGap == 1:
if straightLowerBound > 4 or straightUpperBound < 11:
firstMultiplier = 3
secondMultiplier = 2
elif straightLowerBound == 4 or straightUpperBound == 11 or straightLowerBound == 3 or straightUpperBound == 12:
firstMultiplier = 3
secondMultiplier = 1
elif straightLowerBound == 2 or straightUpperBound == 13:
firstMultiplier = 2
secondMultiplier = 1
elif straightLowerBound == 1 or straightUpperBound == 14:
firstMultiplier = 1
secondMultiplier = 2
probBothCardsStraight = (firstMultiplier * 3 * 4**3 * nCr(8, 2) * 4**2) / totalCombinations
probability = probBothCardsStraight + ((secondMultiplier * 4 * 4**4 * 7 * 4) / totalCombinations) * (1 - probBothCardsStraight)
elif straightGap == 2:
if straightLowerBound > 4 or straightUpperBound < 11:
firstMultiplier = 2
secondMultiplier = 4
elif straightLowerBound == 4 or straightUpperBound == 11:
firstMultiplier = 2
secondMultiplier = 3
elif straightLowerBound == 2 or straightUpperBound == 13 or straightLowerBound == 3 or straightUpperBound == 12:
firstMultiplier = 2
secondMultiplier = 2
elif straightLowerBound == 1 or straightUpperBound == 14:
firstMultiplier = 1
secondMultiplier = 3
probBothCardsStraight = (firstMultiplier * 3 * 4**3 * nCr(8, 2) * 4**2) / totalCombinations
probability = probBothCardsStraight + ((secondMultiplier * 4 * 4**4 * 7 * 4) / totalCombinations) * (1 - probBothCardsStraight)
elif straightGap == 3:
if straightLowerBound > 4 or straightUpperBound < 11:
firstMultiplier = 1
secondMultiplier = 6
elif straightLowerBound == 4 or straightUpperBound == 11:
firstMultiplier = 1
secondMultiplier = 5
elif straightLowerBound == 3 or straightUpperBound == 12 or straightLowerBound == 1 or straightUpperBound == 14:
firstMultiplier = 1
secondMultiplier = 4
elif straightLowerBound == 2 or straightUpperBound == 13:
firstMultiplier = 1
secondMultiplier = 3
probBothCardsStraight = (firstMultiplier * 3 * 4**3 * nCr(8, 2) * 4**2) / totalCombinations
probability = probBothCardsStraight + ((secondMultiplier * 4 * 4**4 * 7 * 4) / totalCombinations) * (1 - probBothCardsStraight)
elif straightGap > 3:
multipliers = []
if straightLowerBound == 1 or straightLowerBound == 2:
multipliers.append(2)
elif straightLowerBound == 3:
multipliers.append(3)
elif straightLowerBound == 4:
multipliers.append(4)
elif (straightLowerBound == 5 or straightLowerBound == 6 or straightLowerBound == 7 or
straightLowerBound == 8 or straightLowerBound == 9):
multipliers.append(5)
if straightUpperBound == 14 or straightUpperBound == 13:
multipliers.append(2)
elif straightUpperBound == 12:
multipliers.append(3)
elif straightUpperBound == 11:
multipliers.append(4)
elif (straightUpperBound == 6 or straightUpperBound == 7 or straightUpperBound == 8 or
straightUpperBound == 9 or straightUpperBound == 10):
multipliers.append(5)
firstMultiplier = multipliers[0]
secondMultiplier = multipliers[1]
probability = ((firstMultiplier + secondMultiplier) * 4 * 4**4 * 7 * 4) / totalCombinations
elif straightGap == -1: # This means that they have a pocket pair
if straightLowerBound > 4 or straightUpperBound < 11:
firstMultiplier = 5
elif straightLowerBound == 4 or straightUpperBound == 11:
firstMultiplier = 4
elif straightLowerBound == 3 or straightUpperBound == 12:
firstMultiplier = 3
elif straightLowerBound == 2 or straightUpperBound == 13 or straightLowerBound == 1 or straightUpperBound == 14:
firstMultiplier = 2
probability = (firstMultiplier * 3 * 4**3 * nCr(8, 2) * 4**2) / totalCombinations
elif rankType == "Flush":
if numSuited == 1:
probFlushWithHand = 2 * nCr(12, 4) * 46
probFlushWithoutHand = 2 * nCr(13, 5)
marginalProbability1 = probFlushWithHand / totalCombinations
probability = marginalProbability1 + (1 - marginalProbability1) * (probFlushWithoutHand / totalCombinations)
elif numSuited == 2:
probFlushWithHand = nCr(11, 3) * nCr(47, 2)
probFlushWithoutHand = 3 * nCr(13, 5)
marginalProbability1 = probFlushWithHand / totalCombinations
probability = marginalProbability1 + (1 - marginalProbability1) * (probFlushWithoutHand / totalCombinations)
elif rankType == "Full House":
if NonPair == True:
probMatchingHand = 3**2 * nCr(10, 2) * 4**2 # The beginning has 3**2 because both 3C2 and 3C1 = 3
probMatchingTable = 11 * nCr(4, 3) * 10 * nCr(4, 2)
probMatching_HandPair_TableTriple = 6 * 11 * nCr(4, 3) * 10 * 4
probMatching_HandTriple_TablePair = 2 * nCr(3, 2) * 11 * nCr(4, 2) * 10 * 4
marginalProbability1 = probMatchingHand / totalCombinations
marginalProbability2 = marginalProbability1 + (1 - marginalProbability1) * (probMatchingTable / totalCombinations)
marginalProbability3 = marginalProbability1 + (1 - marginalProbability1) * (probMatching_HandPair_TableTriple / totalCombinations)
probability = marginalProbability3 + (1 - marginalProbability3) * (probMatching_HandTriple_TablePair / totalCombinations)
elif Pair == True:
probMatching_HandPair_TableTriple = 12 * nCr(4, 3) * nCr(11, 2) * 4**2
probMatching_HandTriple_TablePair = 2 * 12 * nCr(4, 2) * nCr(10, 2) * 4**2
marginalProbability1 = probMatching_HandPair_TableTriple / totalCombinations
probability = marginalProbability1 + (1 - marginalProbability1) * (probMatching_HandTriple_TablePair / totalCombinations)
elif rankType == "Four of a Kind":
if NonPair == True:
probMatchingHand = 2 * nCr(11, 2) * 4**2
probMatchingTable = 11 * 10 * 4
marginalProbability1 = probMatchingHand / totalCombinations
probability = marginalProbability1 + (1 - marginalProbability1) * (probMatchingTable / totalCombinations)
elif Pair == True:
probability = (nCr(12, 3) * 4**3) / totalCombinations
elif rankType == "Straight Flush":
if numSuited == 1 or numSuited == 2 and straightGap > 3:
multipliers = []
if straightLowerBound == 1 or straightLowerBound == 2:
multipliers.append(2)
elif straightLowerBound == 3:
multipliers.append(3)
elif straightLowerBound == 4:
multipliers.append(4)
elif (straightLowerBound == 5 or straightLowerBound == 6 or straightLowerBound == 7 or
straightLowerBound == 8 or straightLowerBound == 9):
multipliers.append(5)
if straightUpperBound == 14 or straightUpperBound == 13:
multipliers.append(2)
elif straightUpperBound == 12:
multipliers.append(3)
elif straightUpperBound == 11:
multipliers.append(4)
elif (straightUpperBound == 6 or straightUpperBound == 7 or straightUpperBound == 8 or
straightUpperBound == 9 or straightUpperBound == 10):
multipliers.append(5)
if len(multipliers) == 1:
multipliers.append(multipliers[0])
firstMultiplier = multipliers[0]
secondMultiplier = multipliers[1]
probability = ((firstMultiplier + secondMultiplier) * 4 * 46) / totalCombinations
elif numSuited == 2:
if straightGap == 0:
if straightLowerBound > 3 or straightUpperBound < 12:
firstMultiplier = 4
secondMultiplier = 0
elif straightLowerBound == 3 or straightUpperBound == 12:
firstMultiplier = 3
secondMultiplier = 0
elif straightLowerBound == 2 or straightUpperBound == 13:
firstMultiplier = 2
secondMultiplier = 0
elif straightLowerBound == 1 or straightUpperBound == 14:
firstMultiplier = 1
secondMultiplier = 1
probBothCardsStraight = (firstMultiplier * 3 * 47 * 46) / totalCombinations
probability = probBothCardsStraight + ((secondMultiplier * 4 * 46) / totalCombinations) * (1 - probBothCardsStraight)
elif straightGap == 1:
if straightLowerBound > 4 or straightUpperBound < 11:
firstMultiplier = 3
secondMultiplier = 2
elif straightLowerBound == 4 or straightUpperBound == 11 or straightLowerBound == 3 or straightUpperBound == 12:
firstMultiplier = 3
secondMultiplier = 1
elif straightLowerBound == 2 or straightUpperBound == 13:
firstMultiplier = 2
secondMultiplier = 1
elif straightLowerBound == 1 or straightUpperBound == 14:
firstMultiplier = 1
secondMultiplier = 2
probBothCardsStraight = (firstMultiplier * 3 * 47 * 46) / totalCombinations
probability = probBothCardsStraight + ((secondMultiplier * 4 * 46) / totalCombinations) * (1 - probBothCardsStraight)
elif straightGap == 2:
if straightLowerBound > 4 or straightUpperBound < 11:
firstMultiplier = 2
secondMultiplier = 4
elif straightLowerBound == 4 or straightUpperBound == 11:
firstMultiplier = 2
secondMultiplier = 3
elif straightLowerBound == 2 or straightUpperBound == 13 or straightLowerBound == 3 or straightUpperBound == 12:
firstMultiplier = 2
secondMultiplier = 2
elif straightLowerBound == 1 or straightUpperBound == 14:
firstMultiplier = 1
secondMultiplier = 3
probBothCardsStraight = (firstMultiplier * 3 * 47 * 46) / totalCombinations
probability = probBothCardsStraight + ((secondMultiplier * 4 * 46) / totalCombinations) * (1 - probBothCardsStraight)
elif straightGap == 3:
if straightLowerBound > 4 or straightUpperBound < 11:
firstMultiplier = 1
secondMultiplier = 6
elif straightLowerBound == 4 or straightUpperBound == 11:
firstMultiplier = 1
secondMultiplier = 5
elif straightLowerBound == 3 or straightUpperBound == 12 or straightLowerBound == 1 or straightUpperBound == 14:
firstMultiplier = 1
secondMultiplier = 4
elif straightLowerBound == 2 or straightUpperBound == 13:
firstMultiplier = 1
secondMultiplier = 3
probBothCardsStraight = (firstMultiplier * 3 * 47 * 46) / totalCombinations
probability = probBothCardsStraight + ((secondMultiplier * 4 * 46) / totalCombinations) * (1 - probBothCardsStraight)
elif cardsOnTable == "Flop":
totalCombinations = nCr(47, 2)
if rankType == "Pair":
probMatchingHand = 6 * 8 * 4
probMatchingTable = 9 * 8 * 4
probMatchingRunners = 8 * nCr(4, 2)
marginalProbability1 = probMatchingHand / totalCombinations
marginalProbability2 = marginalProbability1 + (1 - marginalProbability1) * (probMatchingTable / totalCombinations)
probability = marginalProbability2 + (1 - marginalProbability2) * (probMatchingRunners / totalCombinations)
elif rankType == "Two Pair":
if NonPair == True:
probMatchingHand = 3**2
probMatchingTable = 3 * 3**2
probMatching_HalfHand_HalfTable = (2 * 3) * (3 * 3)
marginalProbability1 = probMatchingHand / totalCombinations
marginalProbability2 = marginalProbability1 + (1 - marginalProbability1) * (probMatchingTable / totalCombinations)
probability = marginalProbability2 + (1 - marginalProbability2) * (probMatching_HalfHand_HalfTable / totalCombinations)
elif Pair == True:
probability = (3 * 3 * 9 * 4) / totalCombinations
elif rankType == "Three of a Kind":
if NonPair == True:
# We multiply by 5 because there are 5 different cards (2 in your hand and 3 on the flop) that need runner, runner
probability = (5 * nCr(3, 2)) / totalCombinations
elif Pair == True:
probability = (2 * 9 * 4) / totalCombinations
elif TwoPair == True:
probability = 0 # Because you would get full house
elif rankType == "Straight":
probRunnerRunner = (StraightRunnerRunner * 4**2) / totalCombinations
if NonPair == True:
probSingleRunner = (StraightSingleRunner * 4 * 8 * 4) / totalCombinations
elif Pair == True:
probSingleRunner = (StraightSingleRunner * 4 * 9 * 4) / totalCombinations
elif TwoPair == True or Triple == True:
probSingleRunner = 0
probability = probRunnerRunner + (1 - probRunnerRunner) * probSingleRunner
elif rankType == "Flush":
if numSuited == 2:
probability = 0
elif numSuited == 3:
probability = nCr(10, 2) / totalCombinations
elif numSuited == 4:
probability = (9 * 46) / totalCombinations
elif rankType == "Full House":
if NonPair == True:
probability = 0
elif Pair == True:
probMatchSetThenPair = 2 * 3 * 3
probRunnerRunnerSet = 3 * 3
marginalProbability1 = probMatchSetThenPair / totalCombinations
probability = marginalProbability1 + (1 - marginalProbability1) * (probRunnerRunnerSet / totalCombinations)
elif TwoPair == True:
probMatchHand = 2 * 2 * 10 * 4
probRunnerRunnerSingle = 3
marginalProbability1 = probMatchHand / totalCombinations
probability = marginalProbability1 + (1 - marginalProbability1) * (probRunnerRunnerSingle / totalCombinations)
elif Triple == True:
probPairTheBoard = 2 * 3 * 10 * 4
probRunnerRunner = 10 * nCr(4, 2)
marginalProbability1 = probPairTheBoard / totalCombinations
probability = marginalProbability1 + (1 - marginalProbability1) * (probRunnerRunner / totalCombinations)
elif rankType == "Four of a Kind":
if NonPair == True:
probability = 0
elif Pair == True:
probability = 1 / totalCombinations
elif TwoPair == True:
probability = 2 / totalCombinations
elif Triple == True:
probability = 10 * 4 / totalCombinations
elif FullHouse == True:
probPairRunnerRunner = 2
probMatchTriple = 11 * 4
marginalProbability1 = probPairRunnerRunner / totalCombinations
probability = marginalProbability1 + (1 - marginalProbability1) * (probMatchTriple / totalCombinations)
elif rankType == "Straight Flush":
probability = 0 #FIX THIS WITH AN ACTUAL PROBABILITY
elif cardsOnTable == "Turn":
totalCombinations = 46
if rankType == "Pair":
probability = (6 * 3) / totalCombinations
elif rankType == "Two Pair":
if NonPair == True:
probability = 0
elif Pair == True:
probability = (4 * 3) / totalCombinations
elif rankType == "Three of a Kind":
if NonPair == True:
probability = 0
elif Pair == True:
probability = 2 / totalCombinations
elif TwoPair == True:
probability = 0 # Because if they get a triple, that would mean a full house
elif rankType == "Straight":
probability = (StraightSingleRunner * 4) / totalCombinations
elif rankType == "Flush":
if numSuited == 2:
probability = 0
elif numSuited == 3:
probability = 0
elif numSuited == 4:
probability = 9 / totalCombinations
elif rankType == "Full House":
if NonPair == True:
probability = 0
elif Pair == True:
probability = 0
elif TwoPair == True: # Leaving this for right now, but later I need to check if we have three pair as well
probability = 4 / totalCombinations
elif Triple == True:
probability = (3 * 3) / totalCombinations
elif rankType == "Four of a Kind":
if NonPair == True:
probability = 0
elif Pair == True:
probability = 0
elif TwoPair == True:
probability = 0
elif Triple or FullHouse == True:
probability = 1 / totalCombinations
elif rankType == "Straight Flush":
if numSuited < 4 or StraightSingleRunner == 0:
probability = 0
else:
probability = 0 # FIX THIS WITH AN ACTUAL PROBABILITY
return probability
def findHandStatus(hand, table):
evaluationHand = list(table)
evaluationHand.extend(hand)
### Initializing the states to False - will update if they turn out to be True ###
NonPair = False
Pair = False
TwoPair = False
Triple = False
FullHouse = False
NumSuitSplit = []
for i in range(len(evaluationHand)): NumSuitSplit.append(evaluationHand[i].split("_"))
numbers_string = np.array(NumSuitSplit)[:,0]
suits = np.array(NumSuitSplit)[:,1]
numbers = list(map(int, numbers_string))
numbers.sort()
suits.sort()
pair_sequence = 1
pair_sequences = []
pair_values = []
tempSuited = 1
NumSuited = 1
for i in range(1, len(numbers)):
diff = numbers[i] - numbers[(i-1)]
### Check for pairs ###
if diff == 0:
pair_sequence += 1
pair = numbers[i]
if i == len(numbers) - 1:
pair_values.append(pair)
pair_sequences.append(pair_sequence)
else:
if pair_sequence > 1:
pair_values.append(pair)
pair_sequences.append(pair_sequence)
pair_sequence = 1
### Check for number of suited cards ###
if suits[i] == suits[(i-1)]:
tempSuited += 1
if tempSuited > NumSuited:
NumSuited = tempSuited
elif suits[i] != suits[(i-1)]:
tempSuited = 1
### Check for various states ###
if len(pair_sequences) > 1 and np.sum(np.array(pair_sequences) == 3) > 0:
FullHouse = True
elif len(pair_sequences) == 1 and pair_sequences[0] == 3:
Triple = True
elif len(pair_sequences) > 1:
TwoPair = True
elif len(pair_sequences) == 1 and pair_sequences[0] == 2:
Pair = True
elif len(pair_sequences) == 0:
NonPair = True
### Initialize and begin to fill the dictionary ###
statusDict = {}
statusDict["NonPair"] = NonPair
statusDict["Pair"] = Pair
statusDict["TwoPair"] = TwoPair
statusDict["Triple"] = Triple
statusDict["FullHouse"] = FullHouse
outs = countOuts(numbers)
StraightRunnerRunner = 0
StraightSingleRunner = 0
for out in outs:
if len(out) == 1:
StraightSingleRunner += 1
elif len(out) == 2:
StraightRunnerRunner += 1
statusDict["StraightRunnerRunner"] = StraightRunnerRunner
statusDict["StraightSingleRunner"] = StraightSingleRunner
statusDict["NumSuited"] = NumSuited
NumSuitSplitHand = []
for i in range(len(hand)): NumSuitSplitHand.append(hand[i].split("_"))
hand_numbers_string = np.array(NumSuitSplitHand)[:,0]
hand_numbers = list(map(int, hand_numbers_string))
statusDict["straightGap"] = abs(hand_numbers[0] - hand_numbers[1]) - 1
statusDict["straightUpperBound"] = max(hand_numbers)
statusDict["straightLowerBound"] = min(hand_numbers)
return statusDict
|
#!/usr/bin/python
from xsocket import *
from xia_address import *
set_conf("xsockconf_python.ini","stock_test_client.py")
print_conf()
sock=Xsocket()
if (sock<0):
print "error opening socket"
exit(-1)
# Make the sDAG (the one the server listens on)
dag = "RE %s %s %s" % (AD1, HID1, SID_STOCK)
Xconnect(sock, dag)
msg = "hi"
#Xsendto(sock, msg ,len(msg),0, dag, len(dag)+1);
Xsend(sock, msg ,len(msg),0);
stock_feed = Xrecv(sock, 15000,0);
#stock_feed = Xrecvfrom(sock, 15000,0);
print stock_feed
print len(stock_feed)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.