text stringlengths 8 6.05M |
|---|
#!/usr/bin/env python
'''
views.py: part of singularity package
'''
from singularity.package import list_package, load_package, package
from singularity.utils import zip_up, read_file, write_file
from singularity.cli import Singularity
import SimpleHTTPServer
import SocketServer
import webbrowser
import tempfile
import zipfile
import shutil
import json
import os
import re
###################################################################################################
# PACKAGE TREE ####################################################################################
###################################################################################################
def tree(image_path,S=None):
'''tree will render an html tree (graph) of an image or package
:param image_path: full path to the image, or package
:param S: the Singularity object, only needed if image needs to be packaged.
'''
# Make a temporary directory for stuffs
tmpdir = tempfile.mkdtemp()
# If the user has provided an image, try to package it
if re.search(".img$",image_path):
if S == None:
print("\n\nYOU MUST ENTER YOUR PASSWORD [ENTER] TO CONTINUE.")
S = Singularity()
image_path = package(image_path,output_folder=tmpdir,S=S)
# If it's a package, look for folders.txt and files.txt
if re.search(".zip$",image_path):
guts = list_package(image_path)
if "folders.txt" in guts and "files.txt" in guts:
retrieved = load_package(image_path,get=["folders.txt","files.txt"])
tree = make_package_tree(folders=retrieved["folders.txt"],
files=retrieved['files.txt'])
return tree
else:
print("Cannot find folders.txt and files.txt in package, cannot create visualization.")
shutil.rmtree(tmpdir)
def make_package_tree(folders,files,path_delim="/",parse_files=True):
'''make_package_tree will convert a list of folders and files into a json structure that represents a graph.
:param folders: a list of folders in the image
:param files: a list of files in the folder
:param parse_files: return 'files' lookup in result, to associate ID of node with files (default True)
:param path_delim: the path delimiter, default is '/'
'''
nodes = {} # first we will make a list of nodes
lookup = {}
count = 1 # count will hold an id for nodes
max_depth = 0
for folder in folders:
if folder != ".":
folder = re.sub("^[.]/","",folder)
path_components = folder.split(path_delim)
for p in range(len(path_components)):
path_component = path_components[p]
fullpath = path_delim.join(path_components[0:p+1])
# Have we created the node yet?
if fullpath not in lookup:
lookup[fullpath] = count
node = {"id":count,"name":path_component,"path":fullpath,"level":p,"children":[]}
count +=1
# Did we find a deeper level?
if p > max_depth:
max_depth = p
# Does the node have a parent?
if p==0: # base node, no parent
parent_id = 0
else: # look up the parent id
parent_path = path_delim.join(path_components[0:p])
parent_id = lookup[parent_path]
node["parent"] = parent_id
nodes[node['id']] = node
# Now make the graph, we simply append children to their parents
seen = []
iters = range(max_depth+1) # 0,1,2,3...
iters.reverse() # ...3,2,1,0
iters.pop() # remove 0
for level in iters:
children = {x:y for x,y in nodes.iteritems() if y['level'] == level}
seen = seen + [y['id'] for x,y in children.iteritems()]
nodes = {x:y for x,y in nodes.iteritems() if y['id'] not in seen}
for node_id,child_node in children.iteritems():
if node_id == 0: #base node
graph[node_id] = child_node
else:
parent_id = child_node['parent']
nodes[parent_id]["children"].append(child_node)
# Now add the parents to graph, with name as main lookup
graph = []
for parent,parent_info in nodes.iteritems():
graph.append(parent_info)
graph = {"name":"base","children":graph}
result = {"graph":graph,"lookup":lookup,"depth":max_depth+1}
# Parse files to include in tree
if parse_files == True:
file_lookup = {}
for filey in files:
filey = re.sub("^[.]/","",filey)
filepath,filename = os.path.split(filey)
if filepath in lookup:
folder_id = lookup[filepath]
if folder_id in file_lookup:
file_lookup[folder_id].append(filename)
else:
file_lookup[folder_id] = [filename]
elif filepath == '': # base folder
if 0 in file_lookup:
file_lookup[0].append(filename)
else:
file_lookup[0] = [filename]
result['files'] = file_lookup
return result
###################################################################################################
# WEBSERVER FUNCTIONS #############################################################################
###################################################################################################
# These are currently not in use, but might be useful (later) for non-flask serving.
def webserver(base_folder,port=None,description=None):
'''webserver will generate a temporary webserver in some base_folder
:param base_folder: the folder base to use
:param description: description of the visualization, for the user
'''
if description == None:
description = "visualization"
try:
if port == None:
port = choice(range(8000,9999),1)[0]
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", port), Handler)
print("View shub %s at localhost:%s" %(port,description))
webbrowser.open("http://localhost:%s" %(port))
httpd.serve_forever()
except:
print("Stopping web server...")
httpd.server_close()
|
import pip
def install(package):
pip.main(['install', package])
install('mutagen')
#install('gTTS')
#from gtts import gTTS
from mutagen.mp3 import MP3
|
import os
from os import listdir
import io
import json
from distutils.version import StrictVersion
from PIL import Image, ImageTk
import urllib2
"""This only works for Python Version 2.7 for now.
This checks the version and if its newer then downloads the file and displays changes since the previous version"""
def is_internet_on():
try:
response=urllib2.urlopen('http://74.125.228.100',timeout=10)
return True
except urllib2.URLError as err: pass
return False
############################# Download JSON FIles #####################################
stuff = listdir('./JSON Files')
versionURL = 'http://mtgjson.com/json/version.json'
setURL = 'http://mtgjson.com/json/AllSets-x.json'
changeURL = 'http://mtgjson.com/json/changelog.json'
def download_stuff(name, location):
try:
r = urllib2.urlopen(location)
content = r.read()
e = open(name, 'wb')
e.write(content)
e.close
print "Download of %s Completed!" %(name)
except urllib2.URLError:
print "Could not download %s. Please check your internet connection." %(name)
def save_json_data(json_file):
json_data = open(json_file)
data = json.load(json_data)
json_data.close()
return data
def show_changes(version):
changeList = []
changes = save_json_data('JSON Files/changelog.json')
for items in changes:
if StrictVersion(items["version"]) > StrictVersion(version):
changeList.extend(items["changes"])
return changeList
def download_json():
if 'version.json' not in stuff:
print "You did not have a version file. Downloading now..."
download_stuff('JSON Files/version.json', versionURL)
if 'AllSets-x.json' not in stuff:
print "You did not have the MTG JSON file. Downloading now..."
download_stuff('JSON Files/AllSets-x.json', setURL)
if 'changelog.json' not in stuff:
print "You did not have the changelog file. Downloading now..."
download_stuff('JSON Files/changelog.json', changeURL)
#Find Current Version
currentVersion = open('JSON Files/version.json').read()[1: -1]
#Check version from online
s = urllib2.urlopen(versionURL)
content = s.read()
s.close()
newVersion = content[1: -1]
#Compares the version you have with the one online
if StrictVersion(newVersion) > StrictVersion(currentVersion):
#Update Version
download_stuff('JSON Files/version.json', versionURL)
#Update Set
download_stuff('JSON Files/AllSets-x.json', setURL)
#Update ChangeLog
download_stuff('JSON Files/changelog.json', changeURL)
print "Congratulations! You have downloaded the Latest Version"
print "Changes since last version: "
for items in show_changes(currentVersion):
print items
else:
print "You already have the Newest Version"
######################### Download Image ##############################
def get_image(edition, name, wt, ht):
try:
url = 'http://mtgimage.com/setname/' + edition + '/' + name + '.jpg'
print url
image_bytes = urllib2.urlopen(url).read()
data_stream = io.BytesIO(image_bytes)
pil_image = Image.open(data_stream)
if pil_image.format != 'JPEG':
pil_image.save('temp_image.jpg', "JPEG")
pil_image = Image.open('temp_image.jpg')
w,h = pil_image.size
pil_image = pil_image.resize((wt, ht), Image.ANTIALIAS)
tk_image = ImageTk.PhotoImage(pil_image)
except:
tk_image = None
return tk_image
|
# Need something to reprocess data
import glob, os
# Look in final.old for cases
os.chdir("final.old")
cases = glob.glob("*")
os.chdir("..")
for case in cases:
print "Processing Case %s" % (case,)
# Symlink data in from thumper
os.system("ln -s /thumper/mred/postprocess/data/%s data/%s" % (case,case))
# Run master.py
os.system("./master.py %s" % (case,))
# Remove sym link
os.unlink("data/%s" % (case,))
sys.exit()
|
import fitbit
from fitbit import gather_keys_oauth2 as Oauth2
import pandas as pd
import numpy as np
import datetime
from datetime import timedelta
import sys
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import json
import requests
import time
import Keys
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import Emailer
import os
def authorizer(userKey, userSecret):
server = Oauth2.OAuth2Server(userKey, userSecret)
server.browser_authorize()
accessToken = str(server.fitbit.client.session.token['access_token'])
refreshToken = str(server.fitbit.client.session.token['refresh_token'])
authd_client = fitbit.Fitbit(client_id = userKey, client_secret = userSecret, access_token = accessToken, refresh_token = refreshToken, system='en_GB')
return authd_client
def hrDataCollector(auth):
heartRate = auth.intraday_time_series(resource = "activities/heart", base_date = 'today', detail_level = '1min', start_time = None, end_time = None)
df = pd.DataFrame(heartRate["activities-heart-intraday"]["dataset"])
df = df.rename(columns = {"time": "Time", "value": "Heart rate [BPM]"})
try:
df["Time"] = pd.to_datetime(df["Time"])
except KeyError:
print("Sync your fitbit first!")
return None
except:
print("Unexpected error: ", sys.exc_info()[0])
raise
sys.exit()
df = df.set_index("Time")
df = df.resample("5T").mean()
return df
def noiseDataCollector():
noise_data = requests.get("http://dublincitynoise.sonitussystems.com/applications/api/dublinnoisedata.php?location=8")
noise = pd.DataFrame.from_dict(noise_data.json())
noise["times"] = pd.to_datetime(noise['times'])
return noise
def pushToCloud(dtype, data, db, user = None):
jsonData = data.to_dict(orient = 'records')
if dtype == "heart rate":
time_df = data.index.to_frame()
time_jsonData = time_df.to_dict(orient = 'records')
user_a_ref = db.collection(u'Users').document(user)
HeartRateData_ref = user_a_ref.collection(u'HeartRateData')
for record, t_record in zip(jsonData, time_jsonData):
HeartRateData_ref.add({
u'Time': t_record["Time"],
u'Heart rate [BPM]': record["Heart rate [BPM]"],
})
else:
doc_ref = db.collection(u'Open_Noise_Data')
for record in jsonData:
doc_ref.add({
u'Time': record["times"],
u'aleq': record["aleq"],
})
def actuation(auth):
hrList = []
for i in range(1,6):
hrFile = pd.read_csv("Generated_Data/HR/heart_rate_{}.csv".format(i))
hrFile = instanceDataPreProcessing(hrFile)
hrList.append(hrFile.iloc[-1]["Heart rate [BPM]"])
hr = np.array(hrList)
if hr.mean() > 70:
print("Sending an email to notify authorities about the potential disaster.")
Emailer.sendEmail()
print("Check the graph to see why the trigger was set off.")
visualization(auth)
else:
print("No mandatory action required. Check the graph out if you want.")
visualization(auth)
def visualization(auth):
#Pulling HR Data
hrList = []
for i in range(1,6):
hr = pd.read_csv("Generated_Data/HR/heart_rate_{}.csv".format(i))
hr = instanceDataPreProcessing(hr)
hrList.append(hr)
hr_concat = pd.concat((hrList[0], hrList[1], hrList[2], hrList[3], hrList[4]))
temp = hr_concat.groupby(hr_concat.index)
hr_means = temp.mean()
#Pulling noise data
noise = noiseDataCollector()
noise = noise.astype({'aleq': 'float64'})
ax = hr_means.plot(kind = "bar", width = 0.1)
noise['aleq'].plot(color = 'red', secondary_y = True, xlim = ax.get_xlim())
plt.xlabel('Time')
ax.set_ylabel('Heart Rate')
plt.ylabel('A-weighted Equivalent Level (Noise values)')
plt.show()
def instanceDataPreProcessing(hr):
hr["Time"] = pd.to_datetime(hr["Time"])
hr = hr.set_index("Time")
return hr
def otherInstances(db):
print("Attempting to push emulated data from other sensor instances.")
for i in range(2,6):
hr = pd.read_csv("Generated_Data/HR/heart_rate_{}.csv".format(i))
hr = instanceDataPreProcessing(hr)
pushToCloud("heart rate", hr, db, "Instance-{}".format(i))
print("Pushed instance-{} of emulated heart rate sensor data".format(i))
print("Done pushing emulated heart rate sensor data to firestore.")
def mainFunc(auth, db):
while True:
hr = hrDataCollector(auth)
#hr = None
if hr.empty:
print("Attempting to push emulated data to firestore.")
hr = pd.read_csv("Generated_Data/HR/heart_rate_1.csv")
hr = instanceDataPreProcessing(hr)
pushToCloud("heart rate", hr, db, "Instance-1")
print("Pushed emulated sensor-1 data.")
otherInstances(db)
else:
print("Attempting to pull live HR Data and pushing it to firestore.")
user = Keys.getFitbitClientID()
pushToCloud("heart rate", hr, db, user)
pwd = os.getcwd()
os.chdir(pwd+'/Generated_Data/HR/')
hr.to_csv('heart_rate_1.csv')
os.chdir("../")
os.chdir("../")
print("Live heart rate data from sensor-1 pushed to firestore.")
otherInstances(db)
print("Pulling live noise data now.")
noise = noiseDataCollector()
print("Attempting to push live noise data to firestore.")
pushToCloud("noise", noise, db)
print("Noise data pushed to firestore.")
if float(noise.iloc[-1]["aleq"]) > 50:
actuation(auth)
else:
pass
print("Press 'Ctrl+C' if you want to exit the program now.")
time.sleep(10)
def setup():
userKey = Keys.getFitbitClientID()
userSecret = Keys.getFitbitClientSecret()
auth = authorizer(userKey, userSecret)
cred = credentials.Certificate("Firebase_SA_Key.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
#Setup is done. Now calling mainFunc to start the application.
mainFunc(auth, db)
if __name__ == "__main__":
setup() |
#!/usr/bin/python
"""
Simple HTTP server to be used as an internal, non-authorized notification email gateway.
Using multipart with curl:
$ curl -X POST http://localhost:1396/ -F subject="SUBJECT" -F body="BODY"
Using postdata with wget:
$ wget http://localhost:1396/ --post-data "SUBJECT;BODY"
"""
import cgi
from http.server import HTTPServer, BaseHTTPRequestHandler
import json
import os
import smtplib
import socket
import sys
import traceback
import urllib
def debug(msg):
print(msg, file=sys.stderr)
def send_email(sender_email, sender_pass, recipients, subject, body, smtp_server="smtp.gmail.com", port=587):
server = smtplib.SMTP(smtp_server, port)
server.ehlo()
server.starttls()
server.login(sender_email, sender_pass)
contents = [
"To: %s" % ", ".join(recipients),
"From: %s" % sender_email,
"Subject: %s" % subject,
"", body,
]
server.sendmail(sender_email, recipients, "\r\n".join(contents))
debug("Email with subject '{}' sent to {}".format(subject, recipients))
server.quit()
def parse_form_params(request):
ctype, pdict = cgi.parse_header(request.headers["content-type"])
if ctype != "multipart/form-data":
length = int(request.headers['content-length'])
field_data = request.rfile.read(length)
subject, body = field_data.decode("utf-8").split(";", 1)
return dict(subject=subject, body=body)
else:
pdict_bytes = dict(pdict, boundary=bytes(pdict["boundary"], "utf-8"))
params_bytes = cgi.parse_multipart(request.rfile, pdict_bytes)
return {k: v[0].decode("utf-8") for (k, v) in params_bytes.items()}
def send_email_from_request(request, email, password, recipients):
params = parse_form_params(request)
send_email(sender_email=email, sender_pass=password, recipients=recipients,
subject=params["subject"], body=params["body"])
def json_response(response, obj, status=200):
response.send_response(200)
response.send_header("Content-type", "application/json")
response.end_headers()
body = bytes(json.dumps(obj, indent=4, sort_keys=True) + "\n", "utf-8")
response.wfile.write(body)
def getHandler(email, password, recipients):
class EmailHandler(BaseHTTPRequestHandler):
def do_POST(self):
try:
send_email_from_request(self, email, password, recipients)
json_response(self, dict(status="ok"))
except Exception as exc:
traceback.print_exc()
json_response(self, dict(status="error", message=str(exc)), status=400)
return EmailHandler
def run_server(recipients, bindaddr="", port=8000):
dirname = os.path.dirname(os.path.realpath(__file__))
auth_path = os.path.join(dirname, "i2pc-backup-email-server.auth")
email, password = open(auth_path).readline().split(None, 1)
httpd = HTTPServer((bindaddr, port), getHandler(email, password, recipients))
httpd.serve_forever()
if __name__ == "__main__":
recipients = sys.argv[1:]
if not recipients:
raise ValueError("Usage: i2pc-backup-email-server RECIPIENT [...RECIPIENT]")
run_server(port=1396, recipients=recipients)
|
globalVar = "Global Scope"
print("Global Variable called from outside class before class call:",globalVar)
class FirstClass():
classVar = 10
print("Global Variable called from inside class:",globalVar)
#Here self is the instance of the class
def class_meth(self):
print("Type is", type(self))
print("Hello. Method inside Class")
print("Global Variable called from method inside class:",globalVar)
print("Inside method calling classVar:",FirstClass.classVar)
print("Global Variable called from outside class after class call:",globalVar)
ob = FirstClass()
ob.class_meth()
print("Class Variable:", FirstClass.classVar) |
from autodisc.representations.static.statisticsrepresentation import StatisticRepresentation
from autodisc.representations.static.pytorchnnrepresentation.pytorchnnrepresentation import PytorchNNRepresentation
import autodisc.representations.static.pytorchnnrepresentation
|
def parse(data):
num = 0
output = []
for x in data:
if x=='s':
num = num ** 2
elif x=='i':
num += 1
elif x=='d':
num -= 1
elif x=='o':
output.append(num)
return output
'''
Write a simple parser that will parse and run Deadfish.
Deadfish has 4 commands, each 1 character long:
i increments the value (initially 0)
d decrements the value
s squares the value
o outputs the value into the return array
Invalid characters should be ignored.
parse("iiisdoso") ==> [8, 64]
'''
|
"""Core components of the policy daemon."""
from asgiref.sync import sync_to_async
import asyncio
import concurrent.futures
from email.message import EmailMessage
import logging
import aiosmtplib
from dateutil.relativedelta import relativedelta
from redis import asyncio as aioredis
from django.conf import settings
from django.db import connections
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils import translation
from django.utils.translation import gettext as _, gettext_lazy
from modoboa.admin import constants as admin_constants
from modoboa.admin import models as admin_models
from modoboa.core import models as core_models
from modoboa.lib.email_utils import split_mailbox
from . import constants
logger = logging.getLogger("modoboa.policyd")
SUCCESS_ACTION = b"dunno"
FAILURE_ACTION = b"defer_if_permit Daily limit reached, retry later"
def close_db_connections(func, *args, **kwargs):
"""
Make sure to close all connections to DB.
To use in threads.
"""
def _close_db_connections(*args, **kwargs):
ret = None
try:
ret = func(*args, **kwargs)
finally:
for conn in connections.all():
conn.close()
return ret
return _close_db_connections
async def wait_for(dt):
"""sleep until the specified datetime."""
one_day = 86400
while True:
now = timezone.now()
remaining = (dt - now).total_seconds()
if remaining < one_day:
break
# asyncio.sleep doesn't like long sleeps, so don't sleep more
# than a day at a time
await asyncio.sleep(one_day)
await asyncio.sleep(remaining)
async def run_at(dt, coro, *args):
"""Run coroutine at given datetime."""
await wait_for(dt)
return await coro(*args)
@close_db_connections
def get_local_config():
"""Return local configuration."""
return core_models.LocalConfig.objects.first()
@close_db_connections
def get_notification_recipients():
"""Return superadmins with a mailbox."""
return (
core_models.User.objects
.filter(is_superuser=True, mailbox__isnull=False)
)
@close_db_connections
def create_alarm(ltype, name):
"""Create a new alarm."""
title = _("Daily sending limit reached")
internal_name = "sending_limit"
if ltype == "domain":
domain = admin_models.Domain.objects.get(name=name)
domain.alarms.create(title=title, internal_name=internal_name)
else:
localpart, domain = split_mailbox(name)
mailbox = admin_models.Mailbox.objects.get(
address=localpart, domain__name=domain)
mailbox.alarms.create(
domain=mailbox.domain, title=title, internal_name=internal_name)
async def notify_limit_reached(ltype, name):
"""Send a notification to super admins about item."""
ltype_translations = {
"account": gettext_lazy("account"),
"domain": gettext_lazy("domain")
}
# We're going to execute sync code so we need an executor
executor = concurrent.futures.ThreadPoolExecutor(max_workers=3)
loop = asyncio.get_event_loop()
futures = [
loop.run_in_executor(executor, get_local_config),
loop.run_in_executor(executor, get_notification_recipients),
loop.run_in_executor(executor, create_alarm, ltype, name),
]
lc, recipients, junk = await asyncio.gather(*futures)
sender = lc.parameters.get_value("sender_address", app="core")
for recipient in recipients:
with translation.override(recipient.language):
content = render_to_string(
"policyd/notifications/limit_reached.html", {
"ltype": ltype_translations[ltype], "name": name
})
subject = _("[modoboa] Sending limit reached")
msg = EmailMessage()
msg["From"] = sender
msg["To"] = recipient.email
msg["Subject"] = subject
msg.set_content(content)
await aiosmtplib.send(msg)
async def decrement_limit(rclient, ltype, name):
"""Decrement the given limit by one."""
new_counter = await rclient.hincrby(constants.REDIS_HASHNAME, name, -1)
if new_counter <= 0:
logger.info("Limit reached for {} {}".format(ltype, name))
asyncio.ensure_future(notify_limit_reached(ltype, name))
async def apply_policies(attributes):
"""Apply defined policies to received request."""
sasl_username = attributes.get("sasl_username")
if not sasl_username:
return SUCCESS_ACTION
rclient = aioredis.from_url(settings.REDIS_URL, encoding="utf-8", decode_responses=True)
decr_domain = False
decr_user = False
localpart, domain = split_mailbox(sasl_username)
if await rclient.hexists(constants.REDIS_HASHNAME, domain):
counter = await rclient.hget(constants.REDIS_HASHNAME, domain)
logger.info("Domain {} current counter: {}".format(domain, counter))
if int(counter) <= 0:
return FAILURE_ACTION
decr_domain = True
if await rclient.hexists(constants.REDIS_HASHNAME, sasl_username):
counter = await rclient.hget(constants.REDIS_HASHNAME, sasl_username)
logger.info("Account {} current counter: {}".format(
sasl_username, counter))
if int(counter) <= 0:
return FAILURE_ACTION
decr_user = True
if decr_domain:
await decrement_limit(rclient, "domain", domain)
if decr_user:
await decrement_limit(rclient, "account", sasl_username)
await rclient.close()
logger.debug("Let it pass")
return SUCCESS_ACTION
async def handle_connection(reader, writer):
"""Coroutine to handle a new connection to the server."""
action = SUCCESS_ACTION
try:
logger.debug("Reading data")
data = await reader.readuntil(b"\n\n")
except asyncio.IncompleteReadError:
pass
else:
attributes = {}
for line in data.decode().split("\n"):
if not line:
continue
try:
name, value = line.split("=")
except ValueError:
continue
attributes[name] = value
state = attributes.get("protocol_state")
if state == "RCPT":
logger.debug("Applying policies")
action = await apply_policies(attributes)
logger.debug("Done")
logger.debug("Sending action %s", action)
writer.write(b"action=" + action + b"\n\n")
await writer.drain()
async def new_connection(reader, writer):
try:
await asyncio.wait_for(handle_connection(reader, writer), timeout=5)
except asyncio.TimeoutError as err:
logger.warning("Timeout received while handling connection: %s", err)
finally:
writer.close()
if hasattr(writer, "wait_closed"):
# Python 3.7+ only
await writer.wait_closed()
logger.info("exit")
def get_next_execution_dt():
"""Return next execution date and time."""
return (timezone.now() + relativedelta(days=1)).replace(
hour=0, minute=0, second=0)
@sync_to_async
@close_db_connections
def get_domains_to_reset():
"""
Return a list of domain to reset.
We also close all associated alarms.
"""
qset = admin_models.Domain.objects.filter(message_limit__isnull=False)
admin_models.Alarm.objects.filter(
internal_name="limit_reached", domain__in=qset,
status=admin_constants.ALARM_OPENED
).update(
status=admin_constants.ALARM_CLOSED, closed=timezone.now()
)
return list(qset)
@sync_to_async
@close_db_connections
def get_mailboxes_to_reset():
"""
Return a list of mailboxes to reset.
We also close all associated alarms.
"""
qset = (
admin_models.Mailbox.objects.filter(message_limit__isnull=False)
.select_related("domain")
)
admin_models.Alarm.objects.filter(
internal_name="limit_reached", mailbox__in=qset,
status=admin_constants.ALARM_OPENED
).update(
status=admin_constants.ALARM_CLOSED, closed=timezone.now()
)
return list(qset)
async def reset_counters():
"""Reset all counters."""
rclient = aioredis.from_url(settings.REDIS_URL, encoding="utf-8", decode_responses=True)
logger.info("Resetting all counters")
for domain in await get_domains_to_reset():
await rclient.hset(
constants.REDIS_HASHNAME, domain.name, domain.message_limit)
for mb in await get_mailboxes_to_reset():
await rclient.hset(
constants.REDIS_HASHNAME, mb.full_address, mb.message_limit)
await rclient.close()
# reschedule
asyncio.ensure_future(run_at(get_next_execution_dt(), reset_counters))
def start_reset_counters_coro():
"""Start coroutine."""
first_time = (timezone.now() + relativedelta(days=1)).replace(
hour=0, minute=0, second=0)
asyncio.ensure_future(run_at(first_time, reset_counters))
|
# -*- coding: utf-8 -*-
from django.db import models
from datetime import datetime
class Contact(models.Model):
name = models.CharField(u'名前',max_length=50)
content = models.TextField(u'内容', max_length=1000)
created_at = models.DateTimeField(u'問い合わせ日時', default=datetime.now)
def __unicode__(self):
"""
モデルの文字列表現
内容の改行を削除して先頭から20文字を返す
"""
return ''.join(unicode(self.content).splitlines())[:20]
class Meta:
# ソート順
ordering = ('-created_at',)
# 単数形
verbose_name = u'問い合わせ'
# 複数形
verbose_name_plural=u'問い合わせ'
|
import smtplib, ssl
from email.message import EmailMessage
msg = EmailMessage()
msg.set_content("Your_Message")
msg["Subject"] = "Graphics Card"
msg["From"] = "Your_Email"
msg["To"] = ""
context=ssl.create_default_context()
with smtplib.SMTP("smtp.google.com", port=28) as smtp:
smtp.starttls(context=context)
smtp.login(msg["From"], "Password_Goes_Here")
smtp.send_message(msg)
server.quit()
|
''' Projeto: Repositório de senhas.
-> Este é um programa de gerenciamento de senhas não seguro.
Porém eferece uma demonstração básica de como esses programas funcionam.
-> Livro: Automatize tarefas maçantes com Python - AL Sweigart (pag 180).
-> Aluno: João Pedro M. Riuto'''
#! python 3
import sys, pyperclip
PASSWORDS = {'email': '3nqPEaMZNF7tvnv',
'blog': 'G1R12yH2DuX7nii',
'lugagge': '12345'}
#Verifica se o usuário informou o parâmetro com o nome da conta.
if len(sys.argv) < 2:
print('Usage: python pw.py [account] - copy account password.')
sys.exit()
account = sys.argv[1] # O primeiro argumento da linha de comando é o nome da conta.
if account in PASSWORDS:
pyperclip.copy(PASSWORDS[account])
print('Password for ' + account + ' copied to clipboard. Enjoy!')
else:
print('There is no account named ' + account) |
import os
from bsm.util import safe_rmdir
from bsm.logger import get_logger
_logger = get_logger()
def run(param):
clean_dirs = param['config_package'].get('clean', [])
['build', 'download', 'source', 'log']
for d in clean_dirs:
if d in ['source', 'build']:
dir_path = param['config_package'].get('path', {}).get(d)
elif d == 'download':
dir_path = os.path.join(param['package_path']['misc_dir'], 'download')
elif d == 'log':
dir_path = param['package_path']['log_dir']
else:
continue
if dir_path:
safe_rmdir(dir_path)
_logger.debug('Clean "{0}": {1}'.format(d, dir_path))
else:
_logger.warn('Clean directory not found for: {0}'.format(d))
return True
|
# -*- coding: utf-8 -*-
import unittest
from unittest import TestCase
import boto.s3.connection
from boto.s3.key import Key
import urllib, urllib2
import StringIO
s3_cred = { 'host': 'precise64',
'port': 8000,
#'port': 80,
'access_key':'4WLAD43EZZ64EPK1CIRO',
'secret_key':'uGA3yy/NJqITgERIVmr9AgUZRBqUjPADvfQoxpKL',
'bucket': 'test1',
}
U_M_LIMIT = 5 * 1024 * 1024
class Tester():
def __init__(self, host, port, akey, skey, bucket, fkey, content, content_type, multipart_file_size):
self.fkey = fkey
self.host = host
self.bucket = bucket
self.content = content
self.content_type = content_type
self.multipart_file_size = multipart_file_size
self.conn = boto.s3.connection.S3Connection(host=host, port=port, is_secure=False, aws_access_key_id=akey,
aws_secret_access_key=skey, calling_format=boto.s3.connection.OrdinaryCallingFormat())
def create_bucket(self):
self.conn.create_bucket(self.bucket)
def delete(self):
bucket_obj = self.conn.get_bucket(self.bucket)
k = Key(bucket_obj)
k.key = self.fkey
bucket_obj.delete_key(k)
def upload(self):
bucket = self.conn.get_bucket(self.bucket)
k = Key(bucket)
k.key = self.fkey
k.set_contents_from_string(self.content, headers={'Content-Type': str(self.content_type)})
def upload_with_headers(self):
bucket = self.conn.get_bucket(self.bucket)
k = Key(bucket)
k.key = self.fkey
headers = {'Content-Type': str(self.content_type),
'x-amz-meta-origin': 'valtest',
'x-amz-meta-origin-a': 'valtest-a'}
k.set_contents_from_string(self.content, headers=headers )
headers = {'Content-Type': str(self.content_type),
'x-amz-meta-origin-a': 'valtest-a'}
k.set_contents_from_string(self.content, headers=headers )
def set_acl(self, policy):
bucket = self.conn.get_bucket(self.bucket)
k = Key(bucket)
k.key = self.fkey
k.set_acl(policy)
def test_upload(self):
self.delete()
self.upload()
self.set_acl('public-read')
bucket = self.conn.get_bucket(self.bucket)
k2 = Key(bucket)
k2.key = self.fkey
if k2.get_contents_as_string()!=self.content:
return False
return True
def test_upload_with_headers(self):
self.delete()
self.upload_with_headers()
self.set_acl('public-read')
bucket = self.conn.get_bucket(self.bucket)
k2 = Key(bucket)
k2.key = self.fkey
if k2.get_contents_as_string()!=self.content:
return False
return True
def test_upload_private_acl(self):
self.delete()
self.upload()
self.set_acl('private')
try:
urllib.urlretrieve('http://'+self.host+'/'+self.fkey)
except urllib2.HTTPError, code:
return False
return True
def test_get_metadata(self):
self.delete()
self.upload()
bucket_obj = self.conn.get_bucket(self.bucket)
k = bucket_obj.get_key(self.fkey)
if 'dict' in str(type(k.metadata)):
return True
return False
def test_delete(self):
self.upload()
self.delete()
return True
def test_public_read_acl(self):
self.delete()
self.upload()
self.set_acl('public-read')
bucket_obj = self.conn.get_bucket(self.bucket)
acl_info = bucket_obj.get_acl(key_name=self.fkey)
S3_PUBLIC_POLICY_URI = 'http://acs.amazonaws.com/groups/global/AllUsers'
for aicg in acl_info.acl.grants:
if aicg.uri == S3_PUBLIC_POLICY_URI:
if aicg.permission == "READ":
return True
return False
def multipart_upload(self):
fh = StringIO.StringIO('a' * self.multipart_file_size)
bucket = self.conn.get_bucket(self.bucket)
key = Key(bucket)
key.key = self.fkey
mp = bucket.initiate_multipart_upload(key)
try:
fh.seek(0, 0)
pos = 0
part_num = 0
while pos < self.multipart_file_size - 1:
if pos + U_M_LIMIT > self.multipart_file_size:
part_size = self.multipart_file_size - pos
else:
part_size = U_M_LIMIT
part_num += 1
mp.upload_part_from_file(fh, part_num, size=part_size)
pos += part_size
mp.complete_upload()
except:
mp.cancel_upload()
raise
return True
def test_multipart_upload(self):
self.multipart_upload()
self.delete()
return True
class BotoTest(TestCase):
def setUp(self):
self.boto_tester = Tester(s3_cred['host'], s3_cred['port'], s3_cred['access_key'],
s3_cred['secret_key'], s3_cred['bucket'], 'filename.txt', 'filecontentttttt', 'text/html', U_M_LIMIT + 100)
#def test_create_bucket(self):
# self.assertEquals(self.boto_tester.create_bucket(), True)
def test_upload(self):
self.assertEquals(self.boto_tester.test_upload(), True)
def test_upload_with_headers(self):
self.assertEquals(self.boto_tester.test_upload_with_headers(), True)
def test_delete(self):
self.assertEquals(self.boto_tester.test_delete(), True)
def test_public_read_acl(self):
self.assertEquals(self.boto_tester.test_public_read_acl(), True)
def test_upload_private_acl(self):
self.assertEquals(self.boto_tester.test_upload_private_acl(), True)
def test_upload_multipart(self):
self.assertEquals(self.boto_tester.test_multipart_upload(), True)
#---------------------------------------
if __name__ == "__main__":
unittest.main()
|
def getModifiedArray(length, updates):
rtn_list = [0] * (length + 1)
for update in updates:
rtn_list[update[0]] += update[2]
rtn_list[update[1] + 1] -= update[2]
for i in xrange(1, length):
rtn_list[i] += rtn_list[i - 1]
return rtn_list[:-1]
print(getModifiedArray(5, [[1, 3, 2],[2, 4, 3],[0, 2, -2]]))
#[
# [1,3,2],
# [2,4,3],
# [0,2,-2],
#]
|
from __future__ import absolute_import
from collections import defaultdict
import grequests
from celery import shared_task
from django.conf import settings
from documentos.helpers import split_document
from documentos.models import Frame
from gerente.datatxt_helpers import Datatxt
from pruebas.helpers import compute_confusion_matrix
from pruebas.models import BaseTestResult, DocumentAnnotation, FrameAnnotation, \
DocumentTestResult
def compute_class_mapping():
mappings = defaultdict(list)
for mapping in settings.MODEL_MAPPINGS:
for topic, sn in mapping.iteritems():
if topic not in mappings[sn]:
mappings[sn].append(topic)
mappings['developemental state'] = 'developmental state'
return mappings
def score_result(right_class, founded_class):
classes_count = 8
tp = 0
tn = 0
fp = 0
fn = 0
if right_class == founded_class:
tp = 1
elif founded_class == '':
fn = 1
else:
fp = 1
try:
precision = tp/float(tp + fp)
except ZeroDivisionError:
precision = 1
try:
recall = tp/float(tp + fn)
except ZeroDivisionError:
recall = 1
accuracy = (tp + tn)/float(tp + tn + fp + fn)
fscore = (2 * tp)/float(2 * tp + fp + fn)
return {
'tp': tp,
'tn': tn,
'fp': fp,
'fn': fn,
'precision': precision,
'recall': recall,
'accuracy': accuracy,
'fscore': fscore
}
def score_result_complex(gs, res, mappings):
right_values = gs.keys()
founded_values = [mappings.get(val) for val in res.keys()]
right_match = [
True
for val in founded_values
if len(set(val).intersection(set(right_values)))
]
tp = len(right_match)
tn = 8 - len(founded_values)
fp = len(founded_values) - tp
fn = len(right_values) - tp
#print "tp {} fp {} tn {} right_values {}".format(tp, fp, tn, len(right_values))
try:
precision = tp/float(tp + fp)
except ZeroDivisionError:
precision = 1
try:
recall = tp/float(tp + fn)
except ZeroDivisionError:
recall = 1
accuracy = (tp + tn)/float(tp + tn + fp + fn)
fscore = (2 * tp)/float(2 * tp + fp + fn)
return {
'tp': tp,
'tn': tn,
'fp': fp,
'fn': fn,
'precision': precision,
'recall': recall,
'accuracy': accuracy,
'fscore': fscore
}
# def analyze_frames(frames, model_id, dt, threshold=0.3):
def analyze_frame(res, threshold=0.3):
# res = dt.classify(model_id, frame.text)
res_topics = res.json().get('categories', {})
result = ''
if len(res_topics):
best_obj = sorted(
res_topics, key=lambda x: x.get('score', 0), reverse=True)[0]
if best_obj.get('score', 0) >= threshold:
result = best_obj.get('name')
else:
print best_obj.get('score', 0)
return result, res.json()
def analyze_doc(doc, model_id, dt, threshold=0.25):
all_results = defaultdict(int)
raw_results = []
reqs = []
reqs_data = []
for idx, part in enumerate(split_document(doc.original_text)):
reqs.append(dt.classify(model_id, part, True))
reqs_data.append(part)
for idx, res in enumerate(grequests.map(reqs)):
res_json = res.json()
res_topics = res_json.get('categories', {})
raw_results.append({
'text': reqs_data[idx],
'response': res_topics,
'threshold': threshold
}
)
if len(res_topics):
best_obj = sorted(
res_topics, key=lambda x: x.get('score', 0), reverse=True)[0]
if best_obj.get('score', 0) >= threshold:
all_results[best_obj.get('name')] += best_obj.get('score')
return all_results, raw_results
def compute_micro(scores):
tmp = {
'tp': [],
'fp': [],
'fn': []
}
for score in scores:
tmp['tp'].append(score.get('tp'))
tmp['fp'].append(score.get('fp'))
tmp['fn'].append(score.get('fn'))
recall = sum(tmp['tp'])/float(sum(tmp['tp']) + sum(tmp['fp']))
precision = sum(tmp['tp'])/float(sum(tmp['tp']) + sum(tmp['fn']))
fscore = 2 / (1 / recall + 1 / precision)
ret_val = {
'recall': recall,
'precision': precision,
'fscore': fscore
}
return ret_val
def compute_macro(scores):
tmp = {
'recall': [],
'precision': []
}
for score in scores:
tmp['recall'].append(score.get('recall'))
tmp['precision'].append(score.get('precision'))
recall = sum(tmp['recall'])/float(len(tmp['recall']))
precision = sum(tmp['precision'])/float(len(tmp['precision']))
fscore = 2 / (1 / recall + 1 / precision)
ret_val = {
'recall': recall,
'precision': precision,
'fscore': fscore
}
return ret_val
@shared_task
def test_document_set(model, document_group, threshold=0.32):
print 'Testing {}'.format(document_group)
docs = document_group.basedocument_set.all()
#create a new classifier on datatxt
dt = Datatxt()
req = dt.create_model(model.json_model)
if not req.ok:
print 'Datatxt call earth, we have a problem'
return False
res = req.json()
datatxt_id = res.get('id')
test_result = DocumentTestResult.objects.create(
json_model=model.json_model,
model_version=model,
document_group=document_group
)
global_results = {}
all_done = True
ret_code = 0
try:
all_count = docs.count()
count = 1
for doc in docs:
print "{}/{}".format(count, all_count)
count += 1
res, raw_results = analyze_doc(doc, datatxt_id, dt, threshold)
for key, value in res.iteritems():
if key not in global_results:
global_results[key] = []
global_results[key].append(doc.pk)
# create a document Annotation
DocumentAnnotation.objects.create(
test_results=res,
document=doc,
test_running=test_result,
raw_result=raw_results,
)
except Exception, e:
print "huston we have a problem"
print e
[doc_a.delete() for doc_a in test_result.documentannotation_set.all()]
test_result.delete()
all_done = False
ret_code = 1
finally:
#delete model
dt.delete_model(datatxt_id)
document_group.testing_task_id = None
document_group.save()
if all_done:
test_result.scoring_result = global_results
test_result.save()
return ret_code
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
@shared_task
def test_model(datatxt_id, model, threshold=0):
# get frame to test
generator_frames = model.generation_frames.all()\
.values_list('pk', flat=True)
frame_nodes_pk_list = set(model.generation_frames.all()
.values_list('node__pk', flat=True))
frame_to_analyze = Frame.objects.filter(node__pk__in=frame_nodes_pk_list)\
.exclude(pk__in=generator_frames)
current_gs = model.goal_standard
# grouped_frames = chunks(frame_to_analyze, 50)
dt = Datatxt()
test_result = BaseTestResult()
test_result.json_model = model.json_model
test_result.model_version = model
test_result.save()
ret_code = 0
# tests all frames
try:
all_scores = []
all_count = frame_to_analyze.count()
count = 1
frames_chuncked = chunks(frame_to_analyze, 40)
for frames in frames_chuncked:
reqs = []
annotations = []
for idx, frame_ in enumerate(frames):
reqs.append(dt.classify(datatxt_id, frame_.text, True))
for idx, res_obj in enumerate(grequests.map(reqs)):
print "{}/{}".format(count, all_count)
count += 1
frame = frames[idx]
current_class = frame.node.super_node\
.get(goal_standard=current_gs).name
found_class, raw_res = analyze_frame(
res_obj, threshold)
print 'frame: {} compute score: {} - {}'.format(
frame.pk, current_class, found_class)
score = score_result(current_class, found_class)
all_scores.append(score)
# score this annotation
annotations.append(FrameAnnotation(
test_results=score,
raw_scoring=found_class,
raw_result=raw_res,
frame=frame,
test_running=test_result
))
FrameAnnotation.objects.bulk_create(annotations)
#compute mico/macro precision
micro = compute_micro(all_scores)
test_result.micro_f1 = micro.get('fscore')
test_result.micro_precision = micro.get('precision')
test_result.micro_recall = micro.get('recall')
macro = compute_macro(all_scores)
test_result.macro_f1 = macro.get('fscore')
test_result.macro_precision = macro.get('precision')
test_result.macro_recall = macro.get('recall')
test_result.save()
confusion_matrix = compute_confusion_matrix(test_result, current_gs)
test_result.confusion_matrix = confusion_matrix
test_result.save()
except Exception, e:
print 'Huston we have a problem!'
print e
[frame_a.delete() for frame_a in test_result.frameannotation_set.all()]
test_result.delete()
ret_code = 1
finally:
dt.delete_model(datatxt_id)
model.testing_task_id = None
model.save()
return ret_code
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 8 14:51:06 2019
@author: kanchana
"""
import numpy as np
A = np.random.rand(9,10)
print(A)
m = A.shape[0]
n = A.shape[1]
i = 0
j = 0
jb = []
while i <= m-1 and j <= n-1:
#print( A[i:m,j].size)
if A[i:m,j].size == 0 :
break
else:
p = np.max(A[i:m,j])
k = np.argmax(A[i:m,j])
#print(i,j)
xx = list(range(m))
jb = np.append(jb,j)
k=k+i
A[[i,k],j:n] = A[[k,i],j:n]
A[i,j:n] = np.divide(A[i,j:n],A[i,j])
xx.remove(i)
for k in xx:
A[k,j:n] = A[k,j:n] - np.multiply(A[k,j], A[i,j:n])
i = i+1
j = j+1
print("Row Reduced Echelon form: ")
print(A)
|
import db_query
def get_action(hand, stack, last_opponent_action, position, db):
push_stack_value = db_query.get_valid_stack_value_to_push(hand, db)
data = [stack]
if int(stack) <= push_stack_value:
data.insert(0, 'push')
return data
elif last_opponent_action == 'limp' and position == 'big_blind':
data.insert(0, 'check')
return data
else:
data.insert(0, 'fold')
return data
|
import re
testdata = [
["byr", 1920], ["byr", 1921], ["byr", 2001], ["byr", 2001],
["iyr", 2010], ["iyr", 2011], ["iyr", 2019], ["iyr", 2020],
["eyr", 2020], ["eyr", 2021], ["eyr", 2029], ["eyr", 2030],
["hgt", "150cm"], ["hgt", "151cm"], ["hgt", "193cm"], ["hgt", "59in"],["hgt", "60in"], ["hgt", "76in"],
["hcl", "#012345"], ["hcl", "#abcdef"], ["hcl", "#0a1b2c"],
["ecl", "amb"], ["ecl", "blu"], ["ecl", "brn"], ["ecl", "gry"], ["ecl", "grn"], ["ecl", "hzl"], ["ecl", "oth"],
["pid", "000000001"], ["pid", "123456789"]
]
"""
byr (Birth Year) - four digits; at least 1920 and at most 2002.
iyr (Issue Year) - four digits; at least 2010 and at most 2020.
eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
pid (Passport ID) - a nine-digit number, including leading zeroes.
cid (Country ID) - ignored, missing or not.
"""
for test in testdata:
this_pass_valid = True
if test[0] == "byr":
if int(test[1]) < 1920 or int(test[1]) > 2002:
this_pass_valid = False
elif test[0] == "iyr":
if int(test[1]) < 2010 or int(test[1]) > 2020:
this_pass_valid = False
elif test[0] == "eyr":
if int(test[1]) < 2020 or int(test[1]) > 2030:
this_pass_valid = False
elif test[0] == "hgt":
if test[1][-2:] == "cm":
if int(test[1][:-2]) < 150 or int(test[1][:-2]) > 193:
this_pass_valid = False
elif test[1][-2:] == "in":
if int(test[1][:-2]) < 59 or int(test[1][:-2]) > 76:
this_pass_valid = False
elif test[0] == "hcl":
# # followed by exactly six characters 0-9 or a-f
match = re.search("^#([0-9a-f]){6}$", test[1])
if not match:
this_pass_valid = False
elif test[0] == "ecl":
if test[1] not in ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]:
this_pass_valid = False
elif test[0] == "pid":
if len(test[1]) != 9 or isinstance(test[1], int) == False:
this_pass_valid = False
else:
this_pass_valid = False
if this_pass_valid:
print("PASS")
else:
print("FAIL" + str(test))
print("----------------")
faildata = [
["byr", 1919], ["byr", 2003],
["iyr", 2009], ["iyr", 2021],
["eyr", 2019], ["eyr", 2031],
["hgt", "149cm"], ["hgt", "194cm"], ["hgt", "58in"], ["hgt", "77in"], ["hgt", "77i"],
["hcl", "#0123456"], ["hcl", "#abcdefg"], ["hcl", "#01234"], ["hcl", "#abcde"], ["hcl", "#0a1b2c3"], ["hcl", "#0a1b2"],
["hcl", "0a1b2c3"], ["hcl", "123456"], ["hcl", "abcdef"], ["hcl", "a1b2c3#"], ["hcl", "0a#b2c3"], ["hcl", "#ABCDEF"],
["ecl", "bla"], ["ecl", ""],
["pid", "1"], ["pid", "1234567890"], ["pid", "abcdefghi"]
]
"""
byr (Birth Year) - four digits; at least 1920 and at most 2002.
iyr (Issue Year) - four digits; at least 2010 and at most 2020.
eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
pid (Passport ID) - a nine-digit number, including leading zeroes.
cid (Country ID) - ignored, missing or not.
"""
for test in faildata:
this_pass_valid = True
if test[0] == "byr":
if int(test[1]) < 1920 or int(test[1]) > 2002:
this_pass_valid = False
elif test[0] == "iyr":
if int(test[1]) < 2010 or int(test[1]) > 2020:
this_pass_valid = False
elif test[0] == "eyr":
if int(test[1]) < 2020 or int(test[1]) > 2030:
this_pass_valid = False
elif test[0] == "hgt":
if test[1][-2:] == "cm":
if int(test[1][:-2]) < 150 or int(test[1][:-2]) > 193:
this_pass_valid = False
elif test[1][-2:] == "in":
if int(test[1][:-2]) < 59 or int(test[1][:-2]) > 76:
this_pass_valid = False
else:
this_pass_valid = False
elif test[0] == "hcl":
# # followed by exactly six characters 0-9 or a-f
match = re.search("^#([0-9a-f]){6}$", test[1])
if not match:
this_pass_valid = False
elif test[0] == "ecl":
if test[1] not in ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]:
this_pass_valid = False
elif test[0] == "pid":
if len(test[1]) != 9 or isinstance(test[1], int) == False:
this_pass_valid = False
if this_pass_valid:
print("FAIL" + str(test))
else:
print("PASS") |
# -*- coding: utf-8 -*-
import operator
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
from jarvis_scraper.items import JarvisScraperItem
from jarvis_scraper.nlp.lib import get_distance
class JarvisScraperSpider(scrapy.Spider):
name = 'jarvis_scraper'
start_urls = ['http://www.musee-armee.fr']
rules = [
Rule(
LinkExtractor(
canonicalize=True,
unique=True
),
follow=True,
callback="parse_items"
)
]
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url, callback=self.parse, dont_filter=True)
# Method for parsing items
def parse(self, response):
items = []
# The list of items that are found on the particular page
url_distance = {}
# Only extract canonicalized and unique links (with respect to the
# current page)
links = LinkExtractor(
canonicalize=True, unique=True).extract_links(response)
# Now go through all the found links
for link in links:
url_to = link.url
distance = get_distance(url_to)
url_distance[url_to] = distance
# Get url with best content matches based on Cosine distance
best_url_matches = dict(
sorted(url_distance.items(), key=operator.itemgetter(1),
reverse=True)[:5])
for url in best_url_matches:
item = JarvisScraperItem()
item['url_to'] = url
items.append(item)
return items
|
import itertools
with open('input.txt') as my_file:
input = my_file.readline()
input_list = [int(s) for s in input]
def pattern_maker(mask_length, repeat_count):
base_pattern = [0, 1, 0, -1]
pattern_block = list(itertools.chain.
from_iterable(itertools.repeat(x, repeat_count)
for x in base_pattern))
output = pattern_block
while len(output) < mask_length + 1:
output = output + pattern_block
return output[1:mask_length + 1]
def output_list(input_list, index):
mask = pattern_maker(len(input_list), index + 1)
components = [a * b for (a, b) in zip(input_list, mask)]
return abs(sum(components)) % 10
def one_phase(input_list):
return [output_list(input_list, index) for index in range(len(input_list))]
def n_phases(input_list, n):
for i in range(n):
input_list = one_phase(input_list)
return input_list
output = (n_phases(input_list,100))[0:8]
print(''.join([str(x) for x in output])) |
__all__ = ["client", "collectables", "metrics", "cnfparse"]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import os
import sys
sys.path.append(os.curdir)
SITEURL = ''
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
OUTPUT_PATH = '/output/'
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
TRAVIS_CI_BADGE = "https://travis-ci.org/Hernrup/hernrup_se_colors.svg?branch=master" |
'''
Created on Feb 2, 2016
@author: henry
'''
# Accept number from user and determine if it is odd or EVEN_ODD
num = input("Enter a number: ")
mod = num % 2
if mod > 0:
print("This is an odd number.")
else:
print("This is an even number.") |
"""
Members resource implementation.
"""
from typing import Optional, Union
from pyyoutube.resources.base_resource import Resource
from pyyoutube.models import MemberListResponse
from pyyoutube.utils.params_checker import enf_parts, enf_comma_separated
class MembersResource(Resource):
"""A member resource represents a channel member for a YouTube channel.
References: https://developers.google.com/youtube/v3/docs/members
"""
def list(
self,
parts: Optional[Union[str, list, tuple, set]] = None,
mode: Optional[str] = None,
max_results: Optional[int] = None,
page_token: Optional[str] = None,
has_access_to_level: Optional[str] = None,
filter_by_member_channel_id: Optional[Union[str, list, tuple, set]] = None,
return_json: bool = False,
**kwargs: Optional[dict],
) -> Union[dict, MemberListResponse]:
"""Lists members (formerly known as "sponsors") for a channel.
Args:
parts:
Comma-separated list of one or more channel resource properties.
Accepted values: snippet
mode:
Indicates which members will be included in the API response.
Accepted values:
- all_current: List current members, from newest to oldest.
- updates: List only members that joined or upgraded since the previous API call.
max_results:
The parameter specifies the maximum number of items that should be returned
the result set.
Acceptable values are 0 to 1000, inclusive. The default value is 5.
page_token:
The parameter identifies a specific page in the result set that should be returned.
has_access_to_level:
A level ID that specifies the minimum level that members in the result set should have.
filter_by_member_channel_id:
specifies a comma-separated list of channel IDs that can be used to check the membership
status of specific users.
Maximum of 100 channels can be specified per call.
return_json:
Type for returned data. If you set True JSON data will be returned.
**kwargs:
Additional parameters for system parameters.
Refer: https://cloud.google.com/apis/docs/system-parameters.
Returns:
Members data.
"""
params = {
"part": enf_parts(resource="members", value=parts),
"mode": mode,
"maxResults": max_results,
"pageToken": page_token,
"hasAccessToLevel": has_access_to_level,
"filterByMemberChannelId": enf_comma_separated(
field="filter_by_member_channel_id", value=filter_by_member_channel_id
),
**kwargs,
}
response = self._client.request(path="members", params=params)
data = self._client.parse_response(response=response)
return data if return_json else MemberListResponse.from_dict(data)
|
############################################################################
# LOGISTIC REGRESSION #
# Note: NJUST Machine Learning Assignment. #
# Optimization: Grediant Descent (GD), Stochastic Grediant Descent(SGD). #
# Author: Edmund Sowah #
############################################################################
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
class logistic_regression():
def __init__(self, data, label):
self.label = label.reshape(-1, 1)
self.num_data = len(data)
bias = np.ones(len(data),)
data = self.preprocess(data)
self.data = np.vstack((data.T, bias)).T
self.theta = np.random.randn(self.data.shape[1], 1)
self.lr = 0.8
self.color_list = ['r', 'g', 'b', 'y']
def preprocess(self, data):
data = data - np.mean(data, axis=0, keepdims=True)
data = data / (np.max(data, axis=0, keepdims=True) -
np.min(data, axis=0, keepdims=True))
return data
def sigmoid(self, score):
return 1. / (1. + np.exp(-score))
def hypothesis(self, data):
''' Calculate Classification Hypothesis. '''
score = data.dot(self.theta)
hypothesis = self.sigmoid(score)
return hypothesis
def loss(self):
''' Using Log-Likelihood estimation to compute loss function. '''
hypothesis = self.hypothesis(self.data)
loss = - np.sum(np.dot(self.label.T, np.log(hypothesis)) +
np.dot((1 - self.label).T, np.log(1 - hypothesis)))
return loss / self.num_data
def update_parameter(self, stochastic=0):
''' Calculate Grediant. '''
if stochastic != 0:
rand_i = np.random.randint(0, self.num_data, stochastic)
if stochastic == 1:
self.lr = 6
x = self.data[rand_i]
y = self.label[rand_i]
else:
self.lr = 3
x = self.data[rand_i]
y = self.label[rand_i]
else:
x = self.data
y = self.label
grad = np.dot(x.T, (self.hypothesis(x) - y)) / self.num_data
self.theta -= self.lr * grad
print('theta:\n', self.theta)
if __name__ == '__main__':
opt = input('Input Optimization strategy number: ')
# loading the data
exam_data = np.loadtxt('Exam\exam_x.dat')
exam_label = np.loadtxt('Exam\exam_y.dat', dtype=int)
data = exam_data
label = exam_label
log_reg = logistic_regression(data, label)
print('Initiated theta Value is:\n', log_reg.theta)
loss_list = []
step_list = []
acc_list = []
plt.ion()
fig, ax = plt.subplots(1, 4, figsize=(16, 5))
for steps in range(300):
step_list.append(steps)
pred = log_reg.hypothesis(log_reg.data).flatten()
classification = pred > 0.5
classification = np.array(classification, dtype=int)
loss = log_reg.loss()
print('Current loss value is:\n', loss)
loss_list.append(loss)
plt.subplot(1, 4, 1)
plt.title('Ground Truth')
for i in range(2):
data_x = np.array(data.T[0][label == i])
data_y = np.array(data.T[1][label == i])
plt.scatter(data_x, data_y, c=log_reg.color_list[i])
plt.subplot(1, 4, 2)
plt.title('Classification Plot')
for i in range(2):
data_x = np.array(data.T[0][classification == i])
data_y = np.array(data.T[1][classification == i])
if len(data_x) == 0:
continue
plt.scatter(data_x, data_y, c=log_reg.color_list[i])
ax[1].cla()
plt.subplot(1, 4, 3)
plt.title('Loss')
ax[2].cla()
plt.plot(step_list, loss_list, c='b', ls='-', marker='o')
plt.subplot(1, 4, 4)
acc = sum(label == classification) / log_reg.num_data
acc_list.append(acc)
plt.plot(step_list, acc_list, c='g', ls='-', marker='*')
plt.title('Accuracy')
plt.pause(0.1)
if opt == '0':
log_reg.update_parameter()
else:
log_reg.update_parameter(stochastic=int(opt))
|
# Generated by Django 2.1.7 on 2019-03-10 15:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('milliard', '0010_player'),
]
operations = [
migrations.AlterField(
model_name='player',
name='count_correct_answers',
field=models.IntegerField(blank=True, default=0),
),
migrations.AlterField(
model_name='player',
name='money_won',
field=models.IntegerField(blank=True, default=0),
),
]
|
from django.apps import AppConfig
class InvestAdminAppConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'apps.invest_admin_app'
|
from django import forms
from .models import NoticeDev
from django_summernote.fields import SummernoteTextFormField, SummernoteTextField
from django_summernote.widgets import SummernoteWidget
# 공지사항 및 개발로그 게시판 글쓰기 폼
class BoardWriteForm(forms.ModelForm):
noticedev_title = forms.CharField(
max_length=128,
label='글제목',
widget=forms.TextInput(
attrs={
'class':'uk-input',
'placeholder': '게시글 제목'
}),
required=True,
error_messages={'required' : '제목을 입력해주세요.'}
)
# 좌측값은 model에 저장되는 값, 우측값은 html에 출력되는 값
options = (
('공지사항', '공지사항'),
('공지사항', '개발로그')
)
noticedev_board_name = forms.ChoiceField(
label='게시판 선택',
widget=forms.Select(
attrs={
'class':'uk-select',
}) ,
required=True,
error_messages={'required' : '게시판을 선택해주세요.'},
choices = options,
)
noticedev_contents = SummernoteTextFormField()
# models.py의 NoticeDev모델안의 noticedev_contents field만 위젯변경
class Meta:
model = NoticeDev
fields = ['noticedev_title', 'noticedev_board_name', 'noticedev_contents']
widgets = {
'noticedev_contents' : SummernoteWidget()
}
def clean(self):
cleaned_data = super().clean()
noticedev_title = cleaned_data.get('noticedev_title')
noticedev_board_name = cleaned_data.get('noticedev_board_name')
noticedev_contents = cleaned_data.get('noticedev_contents')
if not noticedev_title:
self.add_error('noticedev_title', '글제목을 입력해 주세요.')
elif not noticedev_board_name:
self.add_error('noticedev_board_name', '게시판을 선택해 주세요.')
elif not noticedev_contents:
self.add_error('noticedev_contents', '내용을 입력해 주세요.')
else:
self.noticedev_title = noticedev_title
self.noticedev_board_name = noticedev_board_name
self.noticedev_contents = noticedev_contents |
import socket
import tkinter
import threading
import random
import tkinter.messagebox
Ip = '127.0.0.1'
Port = 50007
ServerAddr = (Ip, Port)
Port = random.randint(50008, 60000)
client_addr = ('127.0.0.1', Port)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(client_addr)
Channel = '0'
UserName = '0' # 表示还未进入聊天室,没有username
UserId = '0' # 表示还未进入聊天室,没有username
# 登录窗口
root1 = tkinter.Tk()
root1.title('选择UDP服务器')
root1['height'] = 110
root1['width'] = 280
IP1 = tkinter.StringVar()
IP1.set('127.0.0.1:50007') # 默认显示的ip和端口
User = tkinter.StringVar()
User.set('')
# 服务器标签
labelIP = tkinter.Label(root1, text='服务器地址')
labelIP.place(x=20, y=10, width=80, height=20)
entryIP = tkinter.Entry(root1, width=80, textvariable=IP1)
entryIP.place(x=110, y=10, width=110, height=20)
# 用户名标签
labelUser = tkinter.Label(root1, text='用户名')
labelUser.place(x=20, y=40, width=80, height=20)
entryUser = tkinter.Entry(root1, width=80, textvariable=User)
entryUser.place(x=110, y=40, width=110, height=20)
# 登录按钮
# 用户名不能为空
def login(event=0):
global Ip, Port, WindowUserName
Ip, Port = entryIP.get().split(':') # 获取IP和端口号
Port = int(Port) # 端口号需要为int类型
WindowUserName = entryUser.get()
if WindowUserName == "":
tkinter.messagebox.showinfo(title="Reminder", message="Please input username")
else:
root1.destroy() # 关闭窗口
root1.bind('<Return>', login) # 回车绑定登录功能
but = tkinter.Button(root1, text='登录', command=login)
but.place(x=90, y=70, width=70, height=30)
root1.mainloop()
# 聊天窗口
# 创建图形界面
root = tkinter.Tk()
root.title('Welcome to our chat rooms! ' + WindowUserName) # 窗口命名为用户名
root['height'] = 380
root['width'] = 600
UserName = WindowUserName
# 创建多行文本框,聊天内容
listbox = tkinter.Listbox(root)
listbox.place(x=130, y=0, width=300, height=300)
# 创建输入文本框和关联变量
msg = tkinter.StringVar()
msg.set('')
entry = tkinter.Entry(root, width=120, textvariable=msg)
entry.place(x=130, y=310, width=230, height=50)
# channel list
channel_list = tkinter.Listbox(root)
channel_list.place(x=0, y=20, width=130, height=280)
# 请求channel的按钮
def channels_butt():
message = UserName + ' ' + UserId + ':09:' + Channel
s.sendto(message.encode(), ServerAddr)
channel_button = tkinter.Button(root, text="Channels:", command=channels_butt)
channel_button.place(x=25, y=0, width=80, height=20)
# 清除channel列表
def clear_channels():
channel_list.delete(0, tkinter.END)
# channel列表
# 如果在其他房间,要先离开
def join(event):
global UserName, UserId, Channel
me = channel_list.get(channel_list.curselection())
if Channel == '0':
message = UserName + ' ' + UserId + ':10:' + Channel + ':' + me+' '+UserName
s.sendto(message.encode(), ServerAddr)
else:
tkinter.messagebox.showerror(title="error", message="You have to leave this room before you join another room !")
# double click 加入 channel
channel_list.bind('<Double-Button>', join)
# online users
online_user_list = tkinter.Listbox(root)
online_user_list.place(x=430, y=00, width=170, height=300)
# 清除user list
def init_user_list():
online_user_list.delete(0, tkinter.END)
online_user_list.insert(tkinter.END, "----------Online Users--------")
online_user_list.itemconfig(tkinter.END, fg='blue')
init_user_list()
# 清除user list
def clear_user_list():
online_user_list.delete(0, tkinter.END)
online_user_list.insert(tkinter.END, "----------Online Users--------")
online_user_list.itemconfig(tkinter.END, fg='blue')
# 更新user list
def update_user_list(parameter):
clear_user_list()
para = parameter.split(' ')
for item in para:
online_user_list.insert(tkinter.END, item)
# msg
def client_msg(event):
dstId = online_user_list.get(online_user_list.curselection())
# 本窗口
m = entry.get()
if m == "":
tkinter.messagebox.showinfo(title="Reminder", message="Please input message")
else:
listbox.insert(tkinter.END, "(private) To " + dstId + ': ' + m)
listbox.itemconfig(tkinter.END, fg='blue')
# 发送
m = UserName + ' ' + UserId + ':12:' + Channel + ':' + dstId + ' ' + m
s.sendto(m.encode(), ServerAddr)
msg.set('') # 发送后清空文本框
# 发送私人消息
# 消息不能为空
online_user_list.bind('<Double-Button>', client_msg)
def init():
global Channel, UserId
Channel = '0'
UserId = '0'
# 清空用户列表
clear_user_list()
# 清空文本框
listbox.delete(0, tkinter.END)
# 标题
root.title('Welcome to our chat rooms! ' + UserName)
# 请求新的channel
channels_butt()
def leave_butt():
if Channel == '0':
tkinter.messagebox.showerror(title="ERROR", message="You are not in any channel!")
else:
message = UserName + ' ' + UserId + ':13:' + Channel
s.sendto(message.encode(), ServerAddr)
leave_butt = tkinter.Button(root, text='Leave', command=leave_butt)
leave_butt.place(x=25, y=310, width=80, height=20)
# 滚动条
scrollbar = tkinter.Scrollbar(listbox, command=listbox.yview)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
listbox.config(yscrollcommand=scrollbar.set)
# 收到服务器回复的允许离开
def client_leave():
tkinter.messagebox.showwarning(title="Attention", message="You have left channel!")
init()
def client_recv(user, recv_data):
user_info = user.split(' ', 1)
user_id = user_info[1]
# user_id == 0 表示服务器发送的
m = 'From ' + user_id + ' :' + recv_data
listbox.insert(tkinter.END, m)
listbox.insert(tkinter.END, " ")
# 收到私人信息,变蓝
def private_msg(user, recv_data):
user_info = user.split(' ', 1)
user_id = user_info[1]
m = 'From ' + user_id + ' :' + recv_data
listbox.insert(tkinter.END, m)
listbox.itemconfig(tkinter.END, fg='blue')
# 得到服务器回复准许加入
def client_join(user):
global UserName, UserId, Channel
user_info = user.split(' ', 2)
UserId = user_info[1]
Channel = user_info[2]
# 标题
root.title(Channel + " : " + UserId)
# 被服务器踢出channel
def kicked_out():
tkinter.messagebox.showwarning(title="Attention", message="You have been kicked out!")
init()
# 请求channel
def client_channel(parameter):
if len(parameter) == 0:
tkinter.messagebox.showinfo(title="Channels", message="There is no channel now!")
clear_channels()
else:
clear_channels()
para = parameter.split(' ')
for item in para:
channel_list.insert(tkinter.END, item)
def sb_leave(u):
m = u + " has left!"
tkinter.messagebox.showinfo(title="Attention", message=m)
def channel_closed():
tkinter.messagebox.showwarning(title="Attention", message="Channel has been closed")
init()
def send(event=0):
# 应用层协议格式
# user info
# command type
# sender_channel
# parameters(if any)
# msg(if any)
global Channel, UserName, UserId
m = entry.get()
if m == "":
tkinter.messagebox.showerror(title="ERROR", message="textbox cannot be null!")
elif Channel == '0':
tkinter.messagebox.showinfo(title="Reminder", message="Please join in channel first")
else:
m = UserName + ' ' + UserId + ':08:' + Channel + ':' + m
s.sendto(m.encode(), ServerAddr)
msg.set('') # 发送后清空文本框
def rec():
while True:
data, r_addr = s.recvfrom(512)
data = data.decode()
recv_mes = data.split(':', 3)
# [user_info,command,channel,parameters]
if recv_mes[1] == '15':
client_recv(recv_mes[0], recv_mes[3])
elif recv_mes[1] == '16':
client_leave()
elif recv_mes[1] == '17':
client_join(recv_mes[3])
elif recv_mes[1] == '18':
private_msg(recv_mes[0], recv_mes[3])
elif recv_mes[1] == '19':
client_channel(recv_mes[3])
elif recv_mes[1] == '20':
update_user_list(recv_mes[3])
elif recv_mes[1] == '22':
sb_leave(recv_mes[3])
elif recv_mes[1] == '21':
kicked_out()
elif recv_mes[1] == '23':
channel_closed()
else:
tkinter.messagebox.showerror(title="ERROR", message="Unknown message from server")
# 接收消息线程
t = threading.Thread(target=rec)
t.start()
# 创建发送按钮
send_button = tkinter.Button(root, text='发送', command=send)
send_button.place(x=370, y=310, width=60, height=50)
# 绑定回车发送信息
root.bind('<Return>', send)
root.mainloop()
s.close()
|
# memo: camera center is located 32.0mm left of pickup center, y=220mm
import json
import cv2
import numpy as np
import math
from socket import *
import time
# physical limit
Xlimit = [0, 240]
Ylimit = [0, 259]
#Zlimit = [0, 160] #[mm]
Zlimit = [0, 800]
#--------------------------------------------------------
global udpSock, UDP_SERIAL_Addr
# local client
Client_IP = "127.0.0.1"
Client_Port = 10031
Client_Addr = (Client_IP, Client_Port)
# UDP-Serial server
UDP_SERIAL_IP = "127.0.0.1"
UDP_SERIAL_Port = 10030
UDP_SERIAL_Addr = (UDP_SERIAL_IP, UDP_SERIAL_Port)
udpSock = socket(AF_INET, SOCK_DGRAM)
udpSock.bind(Client_Addr)
udpSock.settimeout(1)
cap = ""
Ncap = 0
A2image = {}
A2real = {}
def load_config(filename = 'config.txt') :
#def load_config(filename) :
global config, pos_r, pos_c, pos_r_o, pos_c_o, pos_r_e, pos_c_e, cap
print("Loading "+filename)
config = json.load(open(filename, 'r'))
camID = config["Camera"]["ID"]
cap = cv2.VideoCapture(camID, cv2.CAP_DSHOW)
print("capture size: ({0:d}, {1:d})".format(config["Camera"]["Pixel"][0], config["Camera"]["Pixel"][1]))
#cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'));
cap.set(cv2.CAP_PROP_FRAME_WIDTH, config["Camera"]["Pixel"][0])
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, config["Camera"]["Pixel"][1])
for trayID in config["Tray"]:
A2image[trayID] = calc_transform_to_image(trayID).tolist()
A2real[trayID] = calc_transform_to_real(trayID).tolist()
#print(trayID, A2image[trayID])
#print(trayID, A2real[trayID])
return config
def save_config(data, filename = 'config.txt'):
print("Saving "+filename)
json.dump(data, open(filename, 'w'))
def send_cmd(cmd):
global udpSock, UDP_SERIAL_Addr
udpSock.sendto(cmd.encode('utf-8'), UDP_SERIAL_Addr)
UDP_BUFSIZE = 1024
#data, addr = udpSock.recvfrom(UDP_BUFSIZE)
#print(data)
'''
while True:
try:
data, addr = udpSock.recvfrom(UDP_BUFSIZE)
except:
print("timeout")
else:
print("*",data)
break;
'''
while True:
try:
data, addr = udpSock.recvfrom(UDP_BUFSIZE)
except:
#print("timeout")
pass
else:
#print("*",data)
break;
def capture(fCapture, trayID, fWarp = False):
global cap,Ncap
if fCapture == True:
ret,img_src = cap.read()
ret,img_src = cap.read()
ret,img_src = cap.read()
cv2.imwrite('raw{0:d}.png'.format(Ncap), img_src)
Ncap = Ncap + 1
else:
img_src = cv2.imread('raw.png', 1)
if (fWarp == True):
# perform warp perspective
#img_src = cv2.warpPerspective(img_src, np.array(config["Tray"][trayID]["MatrixToImage"]), config["Camera"]["Pixel"])
img_src = cv2.warpPerspective(img_src, np.array(A2image[trayID]), config["Camera"]["Pixel"])
return(img_src)
def move_XY(x, y):
if (Xlimit[0] <= x <= Xlimit[1]) and (Ylimit[0] <= y <= Ylimit[1]):
send_cmd('G0X{:.2f}Y{:.2f}'.format(x, y))
else:
print("X={0:.2f} is out of range ({1:.1f}, {2:.1f})".format(x, Ylimit[0], Ylimit[1]))
print("Y={0:.2f} is out of range ({1:.1f}, {2:.1f})".format(y, Ylimit[0], Ylimit[1]))
def move_X(pos):
if Xlimit[0] <= pos <= Xlimit[1]:
send_cmd('G0X{:.2f}'.format(pos))
else:
print("X={0:.2f} is out of range ({1:.1f}, {2:.1f})".format(pos, Ylimit[0], Ylimit[1]))
def move_Y(pos):
if Ylimit[0] <= pos <= Ylimit[1]:
send_cmd('G0Y{:.2f}'.format(pos))
else:
print("Y={0:.2f} is out of range ({1:.1f}, {2:.1f})".format(pos, Ylimit[0], Ylimit[1]))
def move_Z(pos):
# pos = pos * 5 # Z's [step/mm] = XY's [step/mm] * 5
if Zlimit[0] <= pos <= Zlimit[1]:
send_cmd('G0Z{:.2f}'.format(pos))
def intake_control(status):
if (status == True):
send_cmd('M3')
else:
send_cmd('M5')
def exhaust_control(status):
if (status == True):
send_cmd('M7')
else:
send_cmd('M9')
def rotate_head(angle):
send_cmd('G0A'+'{:.2f}'.format(angle * 100 / 360))
theta_s = -180
theta_range = 360
def normalize_angle(theta):
# normalize [theta_s:theta_e]
theta_e = theta_s + theta_range
while theta < theta_s:
theta = theta + theta_range
while theta > theta_e:
theta = theta - theta_range
return theta
# calculate minimzed rotation
def angle_to_horizontal(theta):
theta = normalize_angle(theta)
if theta_s/2 < theta < (theta_s + theta_range)/2:
rot = -theta
else:
rot = -theta + theta_range/2
return normalize_angle(rot)
def pick(x, y, angle):
print("pick ({0:.2f}, {1:.2f} / {2:.2f})".format(x, y, angle))
move_Z(config["Physical"]["Height"]["Motion"])
#move_Z(config["Camera"]["Height"])
move_XY(x, y)
move_Z(config["Physical"]["Height"]["Pick"])
intake_control(True)
time.sleep(0.2)
move_Z(config["Physical"]["Height"]["Motion"])
rotate_head(angle_to_horizontal(angle))
## ToDo: wait for head rotate finished
def place(x, y, angle, thickness):
print("place ({0:.2f}, {1:.2f} / {2:.2f})".format(x, y, angle))
move_XY(x, y)
#rotate_head(angle)
rotate_head(angle_to_horizontal(angle))
## ToDo: wait for head rotate finished
#print("place angle=",angle)
move_Z(config["Physical"]["Height"]["Place"] + thickness * 5)
intake_control(False)
exhaust_control(True)
#time.sleep(0.2)
time.sleep(0.5)
exhaust_control(False)
move_Z(config["Physical"]["Height"]["Motion"])
#move_Z(config["Camera"]["Height"])
def digitize(img, HSV_Range):
# convert H=(0:180) to (0:360), S&V=(0:100) to (0:255)
hsvL = np.array([HSV_Range["Lower"]["H"] / 2,
HSV_Range["Lower"]["S"] / 100.0 * 255,
HSV_Range["Lower"]["V"] / 100.0 * 255])
hsvU = np.array([HSV_Range["Upper"]["H"] / 2,
HSV_Range["Upper"]["S"] / 100.0 * 255,
HSV_Range["Upper"]["V"] / 100.0 * 255])
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img_bin = cv2.inRange(img_hsv, hsvL, hsvU)
return(img_bin)
def calc_transform(pos_from, pos_to, fMoveOrigin = True):
# temporal origin
if (fMoveOrigin == True):
pos_from_o = [(pos_from[0][0]+pos_from[1][0]+pos_from[2][0]+pos_from[3][0])/4,
(pos_from[0][1]+pos_from[1][1]+pos_from[2][1]+pos_from[3][1])/4]
pos_to_o = [(pos_to[0][0]+pos_to[1][0]+pos_to[2][0]+pos_to[3][0])/4,
(pos_to[0][1]+pos_to[1][1]+pos_to[2][1]+pos_to[3][1])/4]
else:
pos_from_o = [0, 0]
pos_to_o = [0, 0]
# 4 corner of colored area in camera image
pos_from_e= [np.array([pos_from[0][0] - pos_from_o[0], pos_from[0][1] - pos_from_o[1]]),
np.array([pos_from[1][0] - pos_from_o[0], pos_from[1][1] - pos_from_o[1]]),
np.array([pos_from[2][0] - pos_from_o[0], pos_from[2][1] - pos_from_o[1]]),
np.array([pos_from[3][0] - pos_from_o[0], pos_from[3][1] - pos_from_o[1]])]
pos_to_e= [np.array([pos_to[0][0] - pos_to_o[0], pos_to[0][1] - pos_to_o[1]]),
np.array([pos_to[1][0] - pos_to_o[0], pos_to[1][1] - pos_to_o[1]]),
np.array([pos_to[2][0] - pos_to_o[0], pos_to[2][1] - pos_to_o[1]]),
np.array([pos_to[3][0] - pos_to_o[0], pos_to[3][1] - pos_to_o[1]])]
Aconv = cv2.getPerspectiveTransform(
np.array([pos_from_e[0], pos_from_e[1], pos_from_e[2], pos_from_e[3]], dtype=np.float32),
np.array([pos_to_e[0], pos_to_e[1], pos_to_e[2], pos_to_e[3]], dtype=np.float32)
)
return Aconv
def calc_transform_to_real(trayID):
# image -> real
width, height = config["Camera"]["Pixel"]
return calc_transform(
[
[ 0, 0], #'UpperLeft'],
[width, 0], #'UpperRight'],
[width, height], #'LowerRight'],
[ 0, height] #['LowerLeft']
],
[config["Tray"][trayID]['Corner']['Real']['UpperLeft'],
config["Tray"][trayID]['Corner']['Real']['UpperRight'],
config["Tray"][trayID]['Corner']['Real']['LowerRight'],
config["Tray"][trayID]['Corner']['Real']['LowerLeft']],
True)
def calc_transform_to_image(trayID):
# camera -> image
width, height = config["Camera"]["Pixel"]
return calc_transform(
# 4 corner of colored area in camera
# camera is mounted with 180 deg rotated
[config["Tray"][trayID]['Corner']['Camera']['LowerRight'],
config["Tray"][trayID]['Corner']['Camera']['LowerLeft'],
config["Tray"][trayID]['Corner']['Camera']['UpperLeft'],
config["Tray"][trayID]['Corner']['Camera']['UpperRight']
],
[
[ 0, 0], #'UpperLeft'],
[width, 0], #'UpperRight'],
[width, height], #'LowerRight'],
[ 0, height] #['LowerLeft']
],
False)
def pos_transform_to(Aconv, pos, pos_from_o, pos_to_o):
px = pos[0] - pos_from_o[0]
py = pos[1] - pos_from_o[1]
prx, pry, w = np.dot(Aconv, [px, py, 1])
# move temporal origin to origin
prx = prx + pos_to_o[0]
pry = pry + pos_to_o[1]
return [prx, pry]
def pos_transform_to_real(trayID, pos):
# image -> real
width, height = config["Camera"]["Pixel"]
#return pos_transform_to(config["Tray"][trayID]["MatrixToReal"], pos,
return pos_transform_to(A2real[trayID], pos,
[width/2, height/2],
[(config["Tray"][trayID]['Corner']['Real']['UpperLeft'][0] +
config["Tray"][trayID]['Corner']['Real']['UpperRight'][0] +
config["Tray"][trayID]['Corner']['Real']['LowerRight'][0] +
config["Tray"][trayID]['Corner']['Real']['LowerLeft'][0])/4,
(config["Tray"][trayID]['Corner']['Real']['UpperLeft'][1] +
config["Tray"][trayID]['Corner']['Real']['UpperRight'][1] +
config["Tray"][trayID]['Corner']['Real']['LowerRight'][1] +
config["Tray"][trayID]['Corner']['Real']['LowerLeft'][1])/4
])
def pos_transform_to_image(trayID, pos):
# camera -> image
width, height = config["Camera"]["Pixel"]
#return pos_transform_to(config["Tray"][trayID]["MatrixToImage"], pos,
return pos_transform_to(A2image[trayID], pos,
[0, 0], [0, 0])
def move_camera(tray):
move_Z(config["Camera"]["Height"])
# move to camera position
p = config["Tray"][tray]['Camera']
move_XY(p[0], p[1])
def light_control(status):
if status == True:
send_cmd('M12')
else:
send_cmd('M13')
def pump_control(status):
if status == True:
send_cmd('M10')
else:
send_cmd('M11')
def find_component(trayID, margin=0):
# move_camera(trayID)
# img = capture(True, config['Camera']['ID'], trayID)
img = capture(True, config['Camera']['ID'], trayID)
cmp, img_ext = create_component_list(img, trayID, margin)
#cv2.imwrite('ext{0:d}.png'.format(Ncap), img_ext)
return(cmp)
def create_component_list(img, trayID, tray_margin = 0):
#img_rect = cv2.warpPerspective(img, np.array(config["Tray"][trayID]["MatrixToImage"]), config["Camera"]["Pixel"])
img_rect = cv2.warpPerspective(img, np.array(A2image[trayID]), config["Camera"]["Pixel"])
areaL, areaU = config["Tray"][trayID]["Area"]["Component"]["Lower"], config["Tray"][trayID]["Area"]["Component"]["Upper"]
# area threshold for black area
areaLB, areaUB = config["Tray"][trayID]["Area"]["Black"]["Lower"], config["Tray"][trayID]["Area"]["Black"]["Upper"]
height, width, channel = img.shape
img_mask = 255 - digitize(img_rect, config["HSV_Range"]["Back"])
#cv2.imwrite('mask{0:d}.png'.format(Ncap), img_mask)
img_ext = cv2.bitwise_and(img_rect, img_rect, mask=img_mask)
#cv2.imwrite('ext_{0:d}.png'.format(Ncap), img_ext)
img_maskB = digitize(img_rect, config["HSV_Range"]["Black"])
img_black = cv2.bitwise_and(img_mask, img_maskB)
#--------------------------------------
# labeling of digitized image
#--------------------------------------
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(img_mask)
img_label = np.zeros((height, width), dtype = 'uint8')
for i in range(1, nlabels):
if stats[i][4] > 200:
cv2.putText(img_ext, '{:d}'.format(stats[i][4]), (int(centroids[i][0]), int(centroids[i][1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
if areaL < stats[i][4] < areaU: # object's size
img_label[labels == i] = 255
# detecting black area (front of R)
nlabelsB, labelsB, statsB, centroidsB = cv2.connectedComponentsWithStats(img_maskB)
img_labelB = np.zeros((height, width), dtype = 'uint8')
for i in range(1, nlabelsB):
if statsB[i][4] > 100:
cv2.putText(img_ext, '{:d}'.format(statsB[i][4]), (int(centroidsB[i][0]), int(centroidsB[i][1])+50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv2.LINE_AA)
if areaLB < statsB[i][4] < areaUB: # object's size
img_labelB[labelsB == i] = 255
contours, hierarchy = cv2.findContours(img_label, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#--------------------------------------
# store component positions
#--------------------------------------
tray_p1 = config["Tray"][trayID]["Corner"]["Real"]["UpperLeft"]
tray_p2 = config["Tray"][trayID]["Corner"]["Real"]["UpperRight"]
tray_p3 = config["Tray"][trayID]["Corner"]["Real"]["LowerRight"]
tray_p4 = config["Tray"][trayID]["Corner"]["Real"]["LowerLeft"]
tray_xs, tray_xe = (tray_p1[0] + tray_p4[0]) / 2, (tray_p2[0] + tray_p3[0]) / 2
tray_ys, tray_ye = (tray_p3[1] + tray_p4[1]) / 2, (tray_p1[1] + tray_p2[1]) / 2
cmp = []
for i, cnt in enumerate(contours):
rect = cv2.minAreaRect(cnt)
px = rect[0][0]
py = rect[0][1]
ang = rect[2] / 180 * math.pi # deg -> rad
box_int = np.int0(cv2.boxPoints(rect))
box = cv2.boxPoints(rect)
ba1 = (box[0] + box[1])/2
ba2 = (box[2] + box[3])/2
bb1 = (box[0] + box[3])/2
bb2 = (box[1] + box[2])/2
la = np.linalg.norm(ba1 - ba2)
lb = np.linalg.norm(bb1 - bb2)
if la < lb:
ang = ang + math.pi/2
angD = ang / math.pi * 180 # angle in degree
angR = angD / 360 * 100 # angle in percent
# find front side (black area)
x1 = px
y1 = py
fFront = False;
for j in range(1, nlabelsB):
if areaLB < statsB[j][4] < areaUB: # object's size
x2 = centroidsB[j][0]
y2 = centroidsB[j][1]
d = np.array([x2 - x1, y2 - y1])
dis = np.linalg.norm(d)
if dis < 5:
fFront = True
cv2.drawContours(img_ext, [box_int], 0, (0,0,255), 1)
rl = 10
rs = 5
p1x = np.int0(px + rl * math.cos(ang))
p1y = np.int0(py + rl * math.sin(ang))
p2x = np.int0(px - rl * math.cos(ang))
p2y = np.int0(py - rl * math.sin(ang))
p3x = np.int0(px + rs * math.cos(ang+math.pi/2))
p3y = np.int0(py + rs * math.sin(ang+math.pi/2))
p4x = np.int0(px - rs * math.cos(ang+math.pi/2))
p4y = np.int0(py - rs * math.sin(ang+math.pi/2))
cv2.line(img_ext, (p1x, p1y), (p2x, p2y), (0, 255, 0))
cv2.line(img_ext, (p3x, p3y), (p4x, p4y), (0, 0, 255))
if fFront == True:
cv2.circle(img_ext, (int(px), int(py)), 20, (255, 255, 255))
prx, pry = pos_transform_to_real(trayID, [px, py])
# store in compnent list in each tray
#print(prx, pry, tray_xs, tray_xe, tray_ys, tray_ye, tray_margin)
if tray_xs + tray_margin <= prx <= tray_xe - tray_margin and tray_ys + tray_margin <= pry <= tray_ye - tray_margin:
cmp.append([prx, pry, angD, fFront])
return(cmp, img_ext)
def move_dispense(d):
send_cmd('G0B{:.2f}'.format(d))
Aextrude = 2 # coefficient of extrude per area[mm2]
Aback = 0.8
Zoffset_extrude = 1 # 1/5=0.2mm
def dispense(x, y, area, Zoffset, thickness):
# dispense solder at (x, y), pad area of "area"
extrude = area * Aextrude
back = area * Aback
print("solder paste at ({0:.3f}, {1:.3f}), area={2:.3f} / extrude={3:.3f}".format(x, y, area, extrude))
move_XY(x, y)
move_Z(Zoffset + thickness * 5 + Zoffset_extrude)
move_dispense(extrude)
move_dispense(-back)
move_Z(Zoffset + thickness * 5 + 10 + Zoffset_extrude)
def load_board_config(filename = 'board.txt') :
print("Loading board config from "+filename)
board = json.load(open(filename, 'r'))
return board
|
import urllib.request
from urllib.request import Request, urlopen
import csv
from bs4 import BeautifulSoup
from teamURLs import teamurls, teamNames
def writeStats(url, name):
quote_page = url
user_agent = 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46'
# get the HTML page of the url declared
request = Request(quote_page, headers={'User-Agent': user_agent})
page = urlopen(request)
soup = BeautifulSoup(page, 'html.parser')
# Find all <div> tags that are labeled class 'scrollable'. There are
# multiple miscellaneous <div> classes but we only want the 0th because
# that is all players stats
listOfScrollableDivs = soup.findAll('div', {'class': 'scrollable'})
rosterStats = listOfScrollableDivs[0]
# Strip and format the huge ass string we scraped from the <div> and
# split it into a list seperated by '\n'
rosterStats = rosterStats.text.strip()
rosterStatsList = rosterStats.split('\n')
# Chop everything before the first players number, which is index[29] in all cases
# Chop everything after the last players last stat and remake list
end = rosterStatsList.index("Totals")
rosterStatsList = rosterStatsList[29:end-1]
with open((name + 'Data.csv'), 'w') as csv_file:
csv_writer = csv.writer(csv_file)
for j in rosterStatsList:
csv_writer.writerow([j])
for i in range(0,12):
writeStats(teamurls[i], teamNames[i])
print("writing "+teamNames[i]+"...("+str(12-i)+")")
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('insumos', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Checklist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lugar_compra', models.CharField(max_length=120)),
('user', models.CharField(max_length=120)),
],
),
migrations.CreateModel(
name='ChecklistInsumo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.CharField(max_length=120)),
('cantidad', models.IntegerField()),
('comprado', models.BooleanField()),
('checklist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='checklist.Checklist')),
('insumo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insumos.Insumo')),
],
),
]
|
#!/usr/bin/python
import os
import sys
def clean(fname):
print('cleaning %s' % fname)
with open(fname, 'rt') as f:
lines = f.readlines()
newlines = []
for line in lines:
if line.startswith('glossaryText:') or line.startswith('conceptRef:'):
pass
else:
if line.startswith('hoverText:'):
line = line[:10] + line[10:].replace(':', ' -- ')
newlines.append(line)
bak = fname + '.bak'
if os.path.isfile(bak):
os.remove(bak)
os.rename(fname, fname + '.bak')
with open(fname, 'wt') as f:
f.write('\n'.join(newlines))
if __name__ == '__main__':
args = sys.argv[1:]
for arg in args:
clean(arg)
|
# -- Project information -----------------------------------------------------
project = 'LUNA'
copyright = '2020 Great Scott Gadgets'
author = 'Katherine J. Temkin'
# -- General configuration ---------------------------------------------------
master_doc = 'index'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_css_files = ['status.css']
# -- Options for automatic documentation -------------------------------------
# Skip documenting Tests.
def autodoc_skip_member_handler(app, what, name, obj, skip, options):
return \
name.endswith("Test") or \
name.startswith('_') or \
(name == "elaborate")
def setup(app):
app.connect('autodoc-skip-member', autodoc_skip_member_handler)
|
class Solution(object):
def reverse(self, n):
"""
:type x: int
:rtype: int
"""
rev = 0
negative = False
# reverse the int
# taking care of negative cases
if n < 0:
negative = True
n = abs(n)
# reversing the number
while(n != 0):
a = n % 10
rev = rev * 10 + a
n = n / 10
# check if reversed number is 32 bit int
if (-2**31) <= rev <= (2**31)-1:
# check if the number was negative and add minus sign
if negative:
return (-rev)
return rev
else:
return 0
|
from __future__ import print_function
import re
from inspect import isfunction
from collections import defaultdict
from . import uff_pb2 as uff_pb
from .data import FieldType, create_data
from .exceptions import UffException
def _resolve_ref(field, referenced_data):
field_type = field.WhichOneof("data_oneof")
if field_type == FieldType.ref:
if field.ref in referenced_data:
return _resolve_ref(referenced_data[field.ref], referenced_data)
else:
raise UffException("Unknown reference: %s" % field.ref)
else:
return field
class _Constraint(object):
def __init__(self, func, priority):
self._deleted = False
self.priority = priority
self.func = func
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
class _DeletedConstraint(_Constraint):
def __init__(self):
super(_DeletedConstraint, self).__init__(None, 0)
self._deleted = True
class _SharedDescriptorOpMemory(object):
def __init__(self):
self.fields = set()
self.extra_fields = set()
def mark_field(self, field_name, _is_extra_field=False):
fields = self.fields if not _is_extra_field else self.extra_fields
fields.add(field_name)
class DescriptorOp(object):
def __init__(self):
self._deleted = False
self._constraints = {}
self._priority = 0
def extend_descriptor_op(self, descriptor_op):
self._deleted = descriptor_op._deleted
for name, constraint in descriptor_op._constraints.items():
if name in self._constraints:
# the priorities must stay the same for constraints that are linked
self._constraints[name].func = constraint.func
self._constraints[name]._deleted = constraint._deleted
else:
self._constraints[name] = constraint
def delete_constraint(self, name):
self._constraints[name] = _DeletedConstraint()
return self
def _delete_field(self, field_name, _is_extra_field):
constraint_prefix = "field_" if not _is_extra_field else "extra_field_"
return self.delete_constraint(constraint_prefix + field_name)
def delete_field(self, field_name):
return self._delete_field(field_name, _is_extra_field=False)
def delete_extra_field(self, field_name):
return self._delete_field(field_name, _is_extra_field=True)
def constraint(self, name, func, error, priority=None):
def _constraint(node, fields, extra_fields, shared_mem):
res = func(node, fields, extra_fields, shared_mem)
if not res:
raise error
if priority is None:
self._priority += 1
priority = self._priority
self._constraints[name] = _Constraint(_constraint, priority)
return self
def _field(self, field_type, field_name, default_value=None, _is_extra_field=False):
constraint_prefix = "field_" if not _is_extra_field else "extra_field_"
def _check_field(n, f, e, s):
if _is_extra_field: # swap fields and extra_fields
e, f = f, e
s.mark_field(field_name)
if field_name not in f:
if isfunction(default_value):
f[field_name] = default_value(n, f, e, s)
else:
f[field_name] = create_data(default_value, field_type)
return f[field_name].WhichOneof("data_oneof") == field_type
self.constraint(constraint_prefix + field_name, _check_field,
UffException("%s had bad type or is not present" % field_name))
self._constraints[constraint_prefix + field_name].type = field_type
return self
def field(self, field_type, field_name, default_value=None):
return self._field(field_type, field_name, default_value, _is_extra_field=False)
def extra_field(self, field_type, field_name, default_value=None):
return self._field(field_type, field_name, default_value, _is_extra_field=True)
def _field_enum(self, field_name, enum, optional=False, _is_extra_field=False):
constraint_prefix = "field_" if not _is_extra_field else "extra_field_"
def _check_field(n, f, e, s):
if _is_extra_field:
e, f = f, e
s.mark_field(field_name, _is_extra_field)
if field_name not in f:
return optional
return f[field_name].WhichOneof("data_oneof") == FieldType.s and f[field_name].s in enum
self.constraint(constraint_prefix + field_name, _check_field,
UffException("%s had bad type or is not present" % field_name))
self._constraints[constraint_prefix + field_name].type = FieldType.s
return self
def field_enum(self, field_name, enum, optional=False):
return self._field_enum(field_name, enum, optional, _is_extra_field=False)
def extra_field_enum(self, field_name, enum, optional=False):
return self._field_enum(field_name, enum, optional, _is_extra_field=True)
def _ref_field(self, field_name, _is_extra_field):
constraint_prefix = "ref_field_" if not _is_extra_field else "ref_extra_field_"
def _check_field(n, f, e, s):
f = n.fields if not _is_extra_field else n.extra_fields
if field_name not in f:
return True # an optional field must not be checked in that constraint
return f[field_name].WhichOneof("data_oneof") == FieldType.ref
self.constraint(constraint_prefix + field_name, _check_field,
UffException("%s is not a referenced data" % field_name))
return self
def ref_field(self, field_name):
return self._ref_field(field_name, _is_extra_field=False)
def ref_extra_field(self, field_name):
return self._ref_field(field_name, _is_extra_field=True)
def fieldOrders(self, size=-1):
def _inputs_orders_size(n, f, e, s):
exp_size = len(n.inputs) if size < 0 else size
return exp_size == len(f["inputs_orders"].dim_orders_list.val) and exp_size > 0
def _default_outputs_orders(n, f, e, s):
return create_data(
[f["inputs_orders"].dim_orders_list.val[0]],
FieldType.dim_orders_list)
return (self.field(FieldType.dim_orders_list, "inputs_orders")
.constraint("inputs_orders_size", _inputs_orders_size,
UffException("Invalid number of inputs_orders"))
.field(FieldType.dim_orders_list, "outputs_orders", _default_outputs_orders)
.ref_field("inputs_orders").ref_field("outputs_orders"))
def inputs_size(self, size):
return self.constraint("inputs_size",
lambda n, f, e, s: len(n.inputs) == size,
UffException("Invalid number of inputs, expected: %d" % size),
priority=0)
def has_inputs(self):
return self.constraint("has_inputs", lambda n, f, e, _: len(n.inputs) > 0,
UffException("No inputs found"), priority=0)
def get_field_type(self, field_name):
try:
return self._constraints["field_" + field_name].type
# except:
# raise UffException("The field {} doesn't exist".format(field_name))
# catching the exception just to pass pylinter. I think the exception
# message looks good.
except Exception:
raise UffException("The field {} doesn't exist".format(field_name))
def _check_node(self, node, fields, extra_fields):
shared_mem = _SharedDescriptorOpMemory()
for constraint in sorted(self._constraints.values(), key=lambda c: c.priority):
if constraint._deleted:
continue
err = constraint(node, fields, extra_fields, shared_mem)
if err:
raise UffException(err)
for field_name in fields.keys():
if field_name not in shared_mem.fields:
raise UffException("field %s unknown" % field_name)
for field_name in extra_fields.keys():
if field_name not in shared_mem.extra_fields:
raise UffException("extra_field %s unknown" % field_name)
return True
class _DeletedDescriptorOp(DescriptorOp):
def __init__(self):
super(_DeletedDescriptorOp, self).__init__()
self._deleted = True
class Descriptor(object):
def __init__(self, name, version, optional, desc_ops):
self._desc_ops = defaultdict(_DeletedDescriptorOp, desc_ops)
self.name = name
self.version = version
self.optional = optional
self.descriptors_extended = []
self._regexes_operators = []
def __delitem__(self, op, desc_op):
self.add_descriptor_op(op, desc_op)
def __setitem__(self, op, desc):
self.add_operator(op, desc)
def __getitem__(self, op):
value = self._desc_ops[op]
if value._deleted:
raise KeyError(op)
return self._desc_ops[op]
def __contains__(self, op):
if op in self._desc_ops:
return not self._desc_ops[op]._deleted
return False
def to_uff(self, debug=False):
if self.name is None:
raise UffException("The core descriptor cannot be serialized")
return uff_pb.Descriptor(id=self.name, version=self.version, optional=self.optional)
def extend_descriptor(self, descriptor):
for op, desc_op in descriptor._desc_ops.items():
self._desc_ops[op].extend_descriptor_op(desc_op)
self.descriptors_extended.append(descriptor)
self.descriptors_extended.extend(descriptor.descriptors_extended)
self._regexes_operators.extend(descriptor._regexes_operators)
return self
def delete_descriptor_op(self, op):
self._desc_ops[op]._deleted = True
return self
def add_descriptor_op(self, op, desc_op):
self._desc_ops[op] = desc_op
return self
def add_regex_operator(self, regex):
self._regexes_operators.append(regex)
return self
def check_node(self, node, referenced_data):
if node.operation not in self:
if not any(re.match(regex_op, node.operation) for regex_op in self._regexes_operators):
raise UffException("Unknown operation %s" % node.operation)
return True
fields = {k: _resolve_ref(v, referenced_data) for k, v in node.fields.items()}
extra_fields = {k: _resolve_ref(v, referenced_data) for k, v in node.extra_fields.items()}
return self._desc_ops[node.operation]._check_node(node, fields, extra_fields)
|
from absl import app, flags, logging
from absl.flags import FLAGS
import numpy as np
from yolov3.models import YoloV3
from yolov3.utils import load_darknet_weights
import tensorflow as tf
flags.DEFINE_string('weights', './data/yolov3.wiegthts', 'path to weights file')
flags.DEFINE_string('output', './checkpoints/yolov3.tf', 'path to output')
flags.DEFINE_integer('num_classes', 80, 'number of classes in the model')
def main(_argv):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
yolo = YoloV3(classes=FLAGS.num_classes)
yolo.summary()
logging.info("model created")
load_darknet_weights(yolo, FLAGS.weights, False) # False for absence of yolo-TinY
logging.info("weights loaded")
img = np.random.random((1,320,320,3)).astype(np.float32)
output = yolo(img)
logging.info("sanity check passed")
yolo.save_weights(FLAGS.output)
logging.info("weights saved")
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass |
from flask import Flask, render_template
from flask_sockets import Sockets
import random
import json
import algo
import info
app = Flask(__name__, static_url_path='')
sockets = Sockets(app)
## Load Data ##
main_data = info.main()
###############
@app.route('/')
def index():
return render_template('index.html', **main_data)
@app.route('/settings')
def settings():
return render_template('settings.html')
def send(ws, data):
ws.send(json.dumps(data))
@sockets.route('/prediction')
def prediction_socket(ws):
while not ws.closed:
stock = ws.receive().upper()
if stock in main_data["symbols"]:
print('Loading ' + stock + '...')
send(ws, {
'status': 'loading',
'stock': stock
})
pred, data, time_taken = algo.algo_predict(stock)
pred_up = round(float(pred[0]), 3)
pred_down = round(float(pred[1]), 3)
raw_input = data[-1]
num_headlines = raw_input.count('**NEXT**')
send(ws, {
'status': 'complete',
'stock': stock,
'prediction': [pred_up, pred_down],
'rawinput': raw_input,
'numheadlines': num_headlines,
'time': time_taken
})
else:
send(ws, {'status': 'error'})
if __name__ == "__main__":
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
server = pywsgi.WSGIServer(('', 5000), app, handler_class=WebSocketHandler)
print("http://localhost:5000/")
server.serve_forever()
|
"""AppConfig for dmarc."""
from django.apps import AppConfig
from modoboa.dmarc.forms import load_settings
class DmarcConfig(AppConfig):
"""App configuration."""
name = "modoboa.dmarc"
verbose_name = "Modoboa DMARC tools"
def ready(self):
load_settings()
from . import handlers
|
class Node:
def __init__(self,value):
self.value=value
self.link=None
class List:
def __init__(self):
self.header=None
def insert(self,value):
new=Node(value)
if(self.header==None):
self.header=new
return
ptr=self.header
while(ptr.link!=None):
ptr=ptr.link
ptr.link=new
def display(self):
ptr=self.header
while(ptr):
print(ptr.value,end=" ")
ptr=ptr.link
def Reverse(self,ptr):
if(ptr.link==None):
return ptr[]
if(ptr.link.link==None):
ptr.link.link=ptr
self.Reverse(ptr.link)
def reverse(self):
self.Reverse(self.header)
|
import cv2
import numpy as np
from shapely.geometry import Polygon
from functions.homography_validation import all_inliers_in_the_polygon, \
out_area_ratio
from functions.plot_manager import save_two_polygons, save_window, save_inliers
from functions.window_functions import window_filter, \
get_continually_discarded_constant, get_min_match_count_constant, \
get_max_iters_constant
from objects.constants import Constants
from objects.homography import Homography
def find_homography_double_check(test_image, template, good_matches, window,
test_keypoints, template_keypoints, plots,
id_hotpoint, id_pos, id_homography, ratios_list, id_hom_global, big_window=False):
H_found = None
inliers_found = None
object_polygon_found = None
H1, inliers_matches1, object_polygon1, plots = __find_homography(test_image, template, window, good_matches,
test_keypoints, template_keypoints, ratios_list,
id_homography, big_window, id_hom_global, plots)
if H1 is None:
# print('No homography found\n')
return None, good_matches, ratios_list, plots
else:
H_found = H1
inliers_found = inliers_matches1
object_polygon_found = object_polygon1
H2, inliers_matches2, object_polygon2, plots = __find_homography(test_image, template, object_polygon1,
good_matches,
test_keypoints, template_keypoints, ratios_list,
id_homography, big_window, id_hom_global, plots)
if H2 is not None:
# print('The second homography not found\n')
H_found = H2
inliers_found = inliers_matches2
object_polygon_found = object_polygon2
# ------------------------------------------
# PLOTS
# ------------------------------------------
if Constants.SAVE:
save_two_polygons(test_image, id_hotpoint, id_pos, id_homography, object_polygon1, object_polygon2,
template.name)
save_window(test_image, id_hotpoint, id_pos, id_homography, window, inliers_found, test_keypoints,
template.name)
save_inliers(test_image, id_hotpoint, id_pos, id_homography, object_polygon_found, inliers_found,
test_keypoints, template.name)
# ------------------------------------------
# CLEAN MATCHES
# ------------------------------------------
for match in inliers_found:
good_matches.remove(match)
# Update ratio
homography_found = Homography(H_found, object_polygon_found, id_hotpoint, id_pos, id_homography, id_hom_global,
template, len(inliers_found))
ratios_list.add_new_ratio(homography_found, template)
return homography_found, good_matches, ratios_list, plots
def __find_homography(test_image, template, window, good_matches, test_keypoints, template_keypoints, ratios_list, idd,
big_window, id_hom_global, plots):
inliers_good_matches, dst_pts, src_pts = window_filter(good_matches, test_keypoints, template_keypoints, window)
height, width, _ = test_image.shape # 1000, 750, 3
test_image_polygon = Polygon([(0, 0), (width, 0), (width, height), (0, height)])
area_test_image = test_image_polygon.area
area_window = window.area
continuously_discarded_count = 0
end = False
while not end:
max_continuously_discarded = get_continually_discarded_constant(area_window, area_test_image)
if continuously_discarded_count < max_continuously_discarded:
min_match_count = get_min_match_count_constant(area_window, area_test_image)
if len(good_matches) >= min_match_count:
if len(src_pts) >= Constants.MIN_MATCH_CURRENT:
max_iters = get_max_iters_constant(continuously_discarded_count, max_continuously_discarded,
len(good_matches), min_match_count)
H, inliers_mask = cv2.findHomography(src_pts,
dst_pts,
cv2.RANSAC,
Constants.RANSAC_REPROJECTION_ERROR,
maxIters=max_iters)
# If no available homography exists, the algorithm ends
if H is not None:
matches_mask = inliers_mask.ravel().tolist()
inliers_matches = [match for i, match in enumerate(inliers_good_matches) if matches_mask[i]]
height, width = template.image.shape[0:2]
src_vrtx = np.float32([[0, 0],
[0, height - 1],
[width - 1, height - 1],
[width - 1, 0]]).reshape(-1, 1, 2)
# Extract test image rectangle vertices
test_height, test_width = test_image.shape[0:2]
test_vrtx = np.float32([[0, 0],
[0, test_height - 1],
[test_width - 1, test_height - 1],
[test_width - 1, 0]])
test_image_polygon = Polygon([(test_vrtx[0][0], test_vrtx[0][1]),
(test_vrtx[1][0], test_vrtx[1][1]),
(test_vrtx[2][0], test_vrtx[2][1]),
(test_vrtx[3][0], test_vrtx[3][1])])
dst_vrtx = cv2.perspectiveTransform(src_vrtx, H)
object_polygon = Polygon([(dst_vrtx[0][0][0], dst_vrtx[0][0][1]),
(dst_vrtx[1][0][0], dst_vrtx[1][0][1]),
(dst_vrtx[2][0][0], dst_vrtx[2][0][1]),
(dst_vrtx[3][0][0], dst_vrtx[3][0][1])])
if (np.linalg.matrix_rank(H) == H.shape[0] and
object_polygon.is_valid and
all_inliers_in_the_polygon(test_keypoints, inliers_matches, object_polygon) and
out_area_ratio(object_polygon, test_image_polygon, Constants.OUT_OF_IMAGE_THRESHOLD)):
if np.count_nonzero(matches_mask) >= Constants.MIN_MATCH_CURRENT:
is_likely, plot = ratios_list.is_homography_likely(object_polygon, template,
test_image, idd,
big_window, id_hom_global)
if plot is not None:
plots.append(plot)
if is_likely:
return H, inliers_matches, object_polygon, plots
else:
# print('Homography discarded because of the ratio of sides')
continuously_discarded_count += 1
id_hom_global[0] += 1
else:
# print("Not enough matches are found in the last homography: {}/{}".format(
# np.count_nonzero(matches_mask), MIN_MATCH_CURRENT))
end = True
else:
# print("Degenerate homography found")
continuously_discarded_count += 1
else:
# print("Not possible to find another homography")
end = True
else:
# print("Too few points to fit the homography ({} when minimum is {})".format(len(src_pts),
# MIN_MATCH_CURRENT))
end = True
else:
# print("Not enough matches remain: {}/{}".format(len(good_matches),
# get_min_match_count_constant(area_window,
# area_test_image)))
end = True
else:
# print("Discarded " + str(continuously_discarded_count) +
# " homography in a row. Not able to find other homographies")
end = True
return None, None, None, plots
|
from mod_base import*
class Raw(Command):
"""Send raw data to the irc server."""
def run(self, win, user, data, caller=None):
if data == None:
return False
self.bot.SendRaw(data)
return True
module = {
"class": Raw,
"type": MOD_COMMAND,
"level": 5,
"zone":IRC_ZONE_BOTH
} |
import sys
import os
f = open("C:/Users/user/Documents/atCoderProblem/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
d,n = map(int,input().split())
if n <= 99:
print(100 ** d * n)
else:
print(100 ** d * 101)
|
""" config.py
Microsimulation config for mulit-LAD MPI simulation
"""
import numpy as np
import glob
import neworder
# define some global variables describing where the starting population and the parameters of the dynamics come from
initial_populations = glob.glob("examples/people_multi/data/ssm_*_MSOA11_ppp_2011.csv")
asfr = "examples/shared/NewETHPOP_fertility.csv"
asmr = "examples/shared/NewETHPOP_mortality.csv"
# internal in-migration
asir = "examples/shared/NewETHPOP_inmig.csv"
# internal out-migration
asor = "examples/shared/NewETHPOP_outmig.csv"
# immigration
ascr = "examples/shared/NewETHPOP_immig.csv"
# emigration
asxr = "examples/shared/NewETHPOP_emig.csv"
# MPI split initial population files over threads
def partition(arr, count):
return [arr[i::count] for i in range(count)]
initial_populations = partition(initial_populations, neworder.mpi.size())
# running/debug options
neworder.log_level = 1
# initialisation
neworder.initialisations = {
"people": { "module": "population", "class_": "Population", "args": (initial_populations[neworder.mpi.rank()], asfr, asmr, asir, asor, ascr, asxr) }
}
# define the evolution
neworder.timeline = neworder.Timeline(2011.25, 2050.25, [39])
# timestep must be defined in neworder
neworder.dataframe.transitions = {
"fertility": "people.births(timestep)",
"mortality": "people.deaths(timestep)",
"migration": "people.migrations(timestep)",
"age": "people.age(timestep)"
}
# checks to perform after each timestep. Assumed to return a boolean
neworder.do_checks = True # Faith
# assumed to be methods of class_ returning True if checks pass
neworder.checks = {
"check": "people.check()"
}
# Generate output at each checkpoint
neworder.checkpoints = {
#"check_data" : "people.check()",
"write_table" : "people.write_table()"
}
|
# Copyright 2010 Alon Zakai ('kripken'). All rights reserved.
# This file is part of Syntensity/the Intensity Engine, an open source project. See COPYING.txt for licensing.
import os, math, signal, unittest, shutil, time, sys, random
import pexpect
MENU_DELAY = 0.4
def sign(x):
if x < 0: return -1
elif x > 0: return 1
else: return 0
## TODO: Move to somewhere useful for the engine itself?
class Vector3:
def __init__(self, x, y, z):
self.x, self.y, self.z = x, y, z
@staticmethod
def parse(text):
return Vector3(*(map(float, text.replace('>', '').replace('<', '').split(','))))
def add(self, other):
self.x += other.x
self.y += other.y
self.z += other.z
return self
def sub(self, other):
self.x -= other.x
self.y -= other.y
self.z -= other.z
return self
def mul(self, other):
self.x *= other
self.y *= other
self.z *= other
return self
def magnitude(self):
return math.sqrt(self.x**2 + self.y**2 + self.z**2)
def __str__(self):
return '[%f,%f,%f]' % (self.x, self.y, self.z)
def __repr__(self):
return '[%f,%f,%f]' % (self.x, self.y, self.z)
def copy(self):
return Vector3(self.x, self.y, self.z)
barrier_counter = 0
class TestMaster(unittest.TestCase):
# Utilities
def assertExpect(self, proc, text, timeout=4):
self.assertEquals(proc.expect(text, timeout), 0)
##! Make a new unique 'barrier' that can be read from a process, without it coming from any other source
def make_barrier(self):
global barrier_counter
barrier_counter += 1
# print " Input barrier:", barrier_counter
return 'okapi%dipako' % barrier_counter
def make_unique(self):
global barrier_counter
barrier_counter += 1
return 'xyz%d_zyx' % barrier_counter
def inject_mouse_click(self, proc, x, y, button=1):
# print "Inject mouse click", x, y, button
barrier = self.make_barrier()
proc.sendline('CModule.inject_mouse_position(%f, %f, True); CModule.flush_input_events(); print "%s"' % (x, y, barrier))
self.assertExpect(proc, barrier)
barrier = self.make_barrier()
proc.sendline('CModule.inject_mouse_click(%d, 1); CModule.flush_input_events(); print "%s";' % (button, barrier))
self.assertExpect(proc, barrier)
barrier = self.make_barrier()
proc.sendline('CModule.inject_mouse_click(%d, 0); CModule.flush_input_events(); print "%s";' % (button, barrier))
self.assertExpect(proc, barrier)
# print " complete"
def inject_key_press(self, proc, syms, _unicodes=None):
# print "Inject key press", sym, _unicode
if type(syms) not in [list, tuple]:
syms = [syms]
for i in range(len(syms)):
sym = syms[i]
if _unicodes is None:
_unicode = sym
else:
_unicode = unicodes[i]
barrier = self.make_barrier()
proc.sendline('CModule.inject_key_press(%d, %d, 1, False)' % (sym, _unicode))
# proc.sendline('CModule.flush_input_events(); print "%sk";' % barrier) # No unicode on way up
# self.assertExpect(proc, barrier)
#
# barrier = self.make_barrier()
proc.sendline('CModule.inject_key_press(%d, 0, 0, False)' % sym)
proc.sendline('CModule.flush_input_events(); print "%sk";' % barrier) # No unicode on way up
self.assertExpect(proc, barrier)
# print " complete"
# Setup/teardown
def setUp(self):
self.procs = []
self.local_dir = '/dev/shm/intensityengine-temp-local'
shutil.rmtree(self.local_dir, ignore_errors=True)
os.makedirs(self.local_dir)
self.master_dir = os.path.join(self.local_dir, 'master')
os.makedirs(self.master_dir)
shutil.copyfile(os.path.join('local', 'master_server', 'settings.json'), os.path.join(self.master_dir, 'settings.json'))
shutil.copytree(os.path.join('local', 'master_server', 'templates'), os.path.join(self.master_dir, 'templates'))
self.server_dir = os.path.join(self.local_dir, 'server')
os.makedirs(self.server_dir)
shutil.copyfile(os.path.join('local', 'server', 'settings.json'), os.path.join(self.server_dir, 'settings.json'))
self.client_dir = os.path.join(self.local_dir, 'client')
os.makedirs(self.client_dir)
shutil.copyfile(os.path.join('local', 'client', 'settings_console.json'), os.path.join(self.client_dir, 'settings.json')) # console
def tearDown(self):
for proc in self.procs:
try:
os.kill(-proc.pid, signal.SIGKILL)
except:
try:
os.kill(proc.pid, signal.SIGKILL)
except:
print "Warning: Killing failed for process", proc.pid
pass
# shutil.rmtree(self.local_dir)
def add_proc(self, proc):
proc.delaybeforesend = 0.1 # Might need the default of 0.1 for the GUI - if you have problems, try that XXX
self.procs.append(proc)
return proc
def run_command(self, procs, command, barrier=None):
if barrier is None:
barrier = self.make_barrier()
command += ' ; print "%s"' % barrier
if type(procs) not in [list, tuple]:
procs = [procs]
ret = []
for proc in procs:
unique = self.make_unique()
proc.sendline('''
def doit_%s(): %s
main_actionqueue.add_action(doit_%s)
''' % (unique, command, unique))
self.assertEquals(proc.expect(barrier, 4.0), 0)
ret += [proc.readline().replace('\n', '').replace('\r', '')]
if len(ret) == 1:
return ret[0]
else:
return ret
## Flushes away the procedure's output
def ignore_output(self, proc, text, timeout=1.0):
try:
proc.expect(text, timeout)
except pexpect.TIMEOUT:
pass
except pexpect.EOF:
pass
def eval_script(self, proc, script):
return self.run_command(proc, "CModule.run_script('log(WARNING, %s);', 'test'); print ''" % script, "WARNING]] - ")
def run_script(self, proc, script):
return self.run_command(proc, "CModule.run_script('%s;', 'test'); print 'alldone'" % script, 'alldone')
def start_master(self):
master = self.add_proc( pexpect.spawn('python intensity_master.py %s' % self.master_dir) )
self.assertExpect(master, 'Would you like to create one now')
master.sendline('no')
self.assertEquals(master.expect("Create default user ('test')?", 4), 0)
self.assertEquals(master.expect("Y/n]", 4), 0)
master.sendline('')
self.assertEquals(master.expect('Creating default user...', 4), 0)
self.assertEquals(master.expect('Development server is running at http://127.0.0.1:8080/', 4), 0)
return master
def start_server(self):
server = self.add_proc( pexpect.spawn('sh intensity_server.sh %s' % self.server_dir) )
self.assertEquals(server.expect('recalculating geometry', 4), 0)
self.assertEquals(server.expect('MAP LOADING]] - Success', 4), 0)
# Check for downloaded assets
self.assertTrue(os.path.exists(os.path.join(self.server_dir, 'packages', 'base', 'storming.tar.gz')))
self.assertTrue(os.path.exists(os.path.join(self.server_dir, 'packages', 'base', 'storming', 'map.js')))
self.assertTrue(os.path.exists(os.path.join(self.server_dir, 'packages', 'base', 'storming', 'map.ogz')))
# Read some data, to be sure the map fully loaded, including entities
output = self.eval_script(server, 'getEntity(50).position')
self.assertTrue( Vector3.parse(output).sub(Vector3(347.85, 536.40, 392.10)).magnitude() < 0.03 )
return server
def start_client(self):
client = self.add_proc( pexpect.spawn('sh intensity_client.sh %s' % self.client_dir) )# Use for debugging: , logfile=sys.stdout) )
self.assertExpect(client, 'Starting threaded interactive console in parallel')
# self.assertExpect(client, 'init: mainloop')
time.sleep(0.5) # Might need to increase this
# Log in
self.inject_mouse_click(client, 0.479, 0.434, 1) # Select log in
time.sleep(MENU_DELAY) # Let menu appear
self.inject_mouse_click(client, 0.508, 0.474, 1) # Focus on username
self.inject_key_press(client, [116, 101, 115, 116, 13]) # test
self.inject_mouse_click(client, 0.439, 0.528, 1) # Focus on username
self.inject_key_press(client, [115, 101, 99, 114, 101, 116, 13]) # secret
self.inject_mouse_click(client, 0.210, 0.656, 1) # Do log in
self.assertExpect(client, 'Logged in successfully')
self.inject_mouse_click(client, 0.456, 0.454, 1) # Local connect
# Map load
self.assertExpect(client, 'MAP LOADING]] - Success', 8)
self.ignore_output(client, 'physics for this round.', 3)
self.ignore_output(client, '("start_red")', 3)
# Check for downloaded assets
self.assertTrue(os.path.exists(os.path.join(self.client_dir, 'packages', 'base', 'storming.tar.gz')))
self.assertTrue(os.path.exists(os.path.join(self.client_dir, 'packages', 'base', 'storming', 'map.js')))
self.assertTrue(os.path.exists(os.path.join(self.client_dir, 'packages', 'base', 'storming', 'map.ogz')))
# # Read some data, to be sure the map fully loaded, including entities
output = self.eval_script(client, 'getEntity(50).position')
self.assertTrue( Vector3.parse(output).sub(Vector3(347.85, 536.40, 392.10)).magnitude() < 0.03 )
return client
def start_client_serverrunner(self, mapname):
client = self.add_proc( pexpect.spawn('sh intensity_client.sh %s -config:Components:list:intensity.components.server_runner' % self.client_dir) )# Use for debugging: , logfile=sys.stdout) )
self.assertExpect(client, 'Starting threaded interactive console in parallel')
time.sleep(0.5) # Might need to increase this
self.inject_mouse_click(client, 0.485, 0.510, 1) # Select plugins
time.sleep(MENU_DELAY) # Let menu appear
self.inject_mouse_click(client, 0.621, 0.508, 1) # Focus on map name
self.inject_key_press(client, mapname) # test
self.inject_mouse_click(client, 0.113,0.550, 1) # Start
self.assertExpect(client, 'MAP LOADING]] - Success', 10)
self.ignore_output(client, '("start_red")', 3)
return client
def start_components(self):
return self.start_master(), self.start_server(), self.start_client()
def make_new_value(self, value):
new_value = ''.join([str((int(v) + random.randrange(10)) % 10) for v in value])
new_value = str(int(new_value)) # remove 0's from the beginning
self.assertNotEquals(new_value, value)
return new_value
# Tests '''
def testClient2Server(self):
master, server, client = self.start_components()
original_value = self.eval_script(server, 'getEntity(51).attr2')
self.assertEquals(self.eval_script(client, 'getEntity(51).attr2'), original_value)
new_value = self.make_new_value(original_value)
self.run_script(client, 'getEntity(51).attr2 = %s' % new_value)
time.sleep(0.25) # Let propagate
self.assertEquals(self.eval_script([client, server], 'getEntity(51).attr2'), [new_value, new_value])
def testServer2Client(self):
master, server, client = self.start_components()
original_value = self.eval_script(server, 'getEntity(51).attr2')
self.assertEquals(self.eval_script(client, 'getEntity(51).attr2'), original_value)
new_value = self.make_new_value(original_value)
self.run_script(server, 'getEntity(51).attr2 = %s' % new_value)
time.sleep(0.25) # Let propagate
self.assertEquals(self.eval_script([client, server], 'getEntity(51).attr2'), [new_value, new_value])
def testRestartMap(self):
master, server, client = self.start_components()
original_value = self.eval_script(server, 'getEntity(51).attr2')
self.assertEquals(self.eval_script(client, 'getEntity(51).attr2'), original_value)
new_value = self.make_new_value(original_value)
self.run_script(server, 'getEntity(51).attr2 = %s' % new_value)
time.sleep(0.25) # Let propagate
self.assertEquals(self.eval_script([client, server], 'getEntity(51).attr2'), [new_value, new_value])
self.inject_key_press(client, 27) # escape for menu
self.inject_mouse_click(client, 0.417, 0.500) # restart map
time.sleep(MENU_DELAY) # Let menu appear
self.inject_mouse_click(client, 0.180, 0.605) # we are sure
for proc in [server, client]:
self.assertExpect(proc, 'MAP LOADING]] - Success', 8)
self.run_command([client, server], 'time.sleep(1.0)') # flush messages
self.assertEquals(self.eval_script([client, server], 'getEntity(51).attr2'), [original_value, original_value]) # Old value
def testUploadMap(self):
master, server, client = self.start_components()
original_value = self.eval_script(server, 'getEntity(51).attr2')
self.assertEquals(self.eval_script(client, 'getEntity(51).attr2'), original_value)
new_value = self.make_new_value(original_value)
self.run_script(server, 'getEntity(51).attr2 = %s' % new_value)
time.sleep(0.25) # Let propagate
self.assertEquals(self.eval_script([client, server], 'getEntity(51).attr2'), [new_value, new_value])
self.inject_key_press(client, 27) # escape for menu
self.inject_mouse_click(client, 0.414, 0.550) # upload map
time.sleep(MENU_DELAY) # Let menu appear
self.inject_mouse_click(client, 0.150, 0.592) # we are sure
self.assertExpect(client, 'wrote map file', 4)
for proc in [server, client]:
self.assertExpect(proc, 'MAP LOADING]] - Success', 8)
self.run_command([client, server], 'time.sleep(1.0)') # flush messages
self.assertEquals(self.eval_script([client, server], 'getEntity(51).attr2'), [new_value, new_value]) # New value
# Prevent regressions with state variable values flushing. That is, on the server,
# wrapped C variables should be correctly initialized when assiged to, even before
# the C entity is created (we queue and then flush them when the C entity is ready).
def testSVFlushingRegressions(self):
master, server = self.start_master(), self.start_server()
self.run_script(server, 'deadlyArea = newEntity("DeadlyArea");')
self.assertEquals(self.eval_script(server, 'deadlyArea.attr2'), '-1')
self.assertEquals(self.eval_script(server, 'deadlyArea.collisionRadiusWidth'), '10')
self.assertEquals(self.eval_script(server, 'deadlyArea.collisionRadiusHeight'), '10')
# Ensure smooth movement of other clients
def doTestSmoothMovement(self, fps, max_spread):
master, server, client = self.start_components()
if fps is not None:
self.run_command(client, 'CModule.run_cubescript("maxfps %d")' % fps)
self.run_script(server, 'bot = newNPC("Character");')
bot_id = self.eval_script(server, 'bot.uniqueId')
self.eval_script(server, 'bot.position.x = 512-15')
self.eval_script(server, 'bot.position.y = 512-15')
# Wait for bot to fall to floor
time.sleep(1.0)
pos = Vector3(0, 0, 0)
while Vector3.parse(self.eval_script(server, 'bot.position')).sub(pos).magnitude() > 1:
pos = Vector3.parse(self.eval_script(server, 'bot.position'))
time.sleep(0.25)
time.sleep(0.1)
# Check client got it
self.assertTrue(Vector3.parse(self.eval_script(client, 'getEntity(%s).position' % bot_id)).sub(pos) < 1)
# Move bot and see that client smoothly tracks it
newpos = pos.copy()
move = Vector3(-10, 10, 0)
newpos.add(move)
direction = newpos.copy().sub(pos)
self.run_script(server, 'bot.position = ' + str(newpos))
smoothmove = 0.075 # Sync with sauer XXX
delta = 0.001
# print pos, newpos
start = time.time()
client.delaybeforesend = 0 # We need very responsive pexpect procs here!
history = []
while time.time() - start <= smoothmove*10:
client_pos = Vector3.parse(self.eval_script(client, 'getEntity(%s).position' % bot_id))
# print time.time() - start, client_pos
history.append(client_pos)
time.sleep(delta)
# Validate the history
# Start and finish
# print pos, newpos, history
self.assertTrue(history[0].copy().sub(pos).magnitude() < 1)
self.assertTrue(history[-1].copy().sub(newpos).magnitude() < 1)
# Steps are all in the right direction
for i in range(len(history)-1):
self.assertNotEquals( sign(history[i+1].x - history[i].x), -sign(direction.x) ) # Can be 0,
self.assertNotEquals( sign(history[i+1].y - history[i].y), -sign(direction.y) ) # just not opposite
# Steps are small
for func in [lambda vec: vec.x, lambda vec: vec.y]:
jumps = map(lambda i: abs(func(history[i])-func(history[i+1])), range(len(history)-1))
spread = abs(func(history[0]) - func(history[-1]))
# print jumps, spread, max(jumps)
self.assertTrue(max(jumps) <= spread*max_spread)
def testSmoothMovement30(self):
self.doTestSmoothMovement(30, 0.666)
def testSmoothMovementDefault(self):
self.doTestSmoothMovement(None, 0.35)
# Ensure smooth movement of other clients
def testDeath(self):
master, server, client = self.start_components()
player_id = self.eval_script(client, 'getPlayerEntity().uniqueId')
self.assertEquals(self.eval_script(client, 'getEntity(%s).animation' % player_id), '130')
self.assertEquals(self.eval_script(client, 'getEntity(%s).health' % player_id), '100')
self.run_script(client, 'getEntity(%s).health = 0' % player_id) # Die
time.sleep(1.0)
self.assertEquals(self.eval_script([client, server], 'getEntity(%s).health' % player_id), ['0', '0'])
self.assertEquals(self.eval_script(client, 'getEntity(%s).animation' % player_id), '1')
time.sleep(6.0) # Wait for respawn, and check that completely restored
self.run_command(client, '\n') # Clean the output (comments on missing player start marker)
self.assertEquals(self.eval_script([client, server], 'getEntity(%s).health' % player_id), ['100', '100'])
self.assertEquals(self.eval_script(client, 'getEntity(%s).animation' % player_id), '130')
def testClientServerRunner(self):
# Run storming_test
client = self.start_client_serverrunner([115, 116, 111, 114, 109, 105, 110, 103, 95, 116, 101, 115, 116, 13])
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import sys
import unittest
import mock
from oslo_config import cfg
from st2common.constants.pack import SYSTEM_PACK_NAMES
from st2common.util.sandboxing import get_sandbox_path
from st2common.util.sandboxing import get_sandbox_python_path
from st2common.util.sandboxing import get_sandbox_python_path_for_python_action
from st2common.util.sandboxing import get_sandbox_python_binary_path
from st2common.util.sandboxing import clear_virtualenv_prefix
from st2common.util.sandboxing import get_virtualenv_prefix
from st2common.util.sandboxing import set_virtualenv_prefix
import st2tests.config as tests_config
__all__ = [
'SandboxingUtilsTestCase'
]
class SandboxingUtilsTestCase(unittest.TestCase):
def setUp(self):
super(SandboxingUtilsTestCase, self).setUp()
# Restore PATH and other variables before each test case
os.environ['PATH'] = self.old_path
os.environ['PYTHONPATH'] = self.old_python_path
set_virtualenv_prefix(self.old_virtualenv_prefix)
@classmethod
def setUpClass(cls):
tests_config.parse_args()
# Store original values so we can restore them in setUp
cls.old_path = os.environ.get('PATH', '')
cls.old_python_path = os.environ.get('PYTHONPATH', '')
cls.old_virtualenv_prefix = get_virtualenv_prefix()
@classmethod
def tearDownClass(cls):
os.environ['PATH'] = cls.old_path
os.environ['PYTHONPATH'] = cls.old_python_path
set_virtualenv_prefix(cls.old_virtualenv_prefix)
def test_get_sandbox_python_binary_path(self):
# Non-system content pack, should use pack specific virtualenv binary
result = get_sandbox_python_binary_path(pack='mapack')
expected = os.path.join(cfg.CONF.system.base_path, 'virtualenvs/mapack/bin/python')
self.assertEqual(result, expected)
# System content pack, should use current process (system) python binary
result = get_sandbox_python_binary_path(pack=SYSTEM_PACK_NAMES[0])
self.assertEqual(result, sys.executable)
def test_get_sandbox_path(self):
# Mock the current PATH value
os.environ['PATH'] = '/home/path1:/home/path2:/home/path3:'
virtualenv_path = '/home/venv/test'
result = get_sandbox_path(virtualenv_path=virtualenv_path)
self.assertEqual(result, '/home/venv/test/bin/:/home/path1:/home/path2:/home/path3')
@mock.patch('st2common.util.sandboxing.get_python_lib')
def test_get_sandbox_python_path(self, mock_get_python_lib):
# No inheritance
python_path = get_sandbox_python_path(inherit_from_parent=False,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':')
# Inherit python path from current process
# Mock the current process python path
os.environ['PYTHONPATH'] = ':/data/test1:/data/test2'
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
# Inherit from current process and from virtualenv (not running inside virtualenv)
clear_virtualenv_prefix()
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
# Inherit from current process and from virtualenv (running inside virtualenv)
sys.real_prefix = '/usr'
mock_get_python_lib.return_value = sys.prefix + '/virtualenvtest'
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=True)
self.assertEqual(python_path, ':/data/test1:/data/test2:%s/virtualenvtest' %
(sys.prefix))
@mock.patch('os.path.isdir', mock.Mock(return_value=True))
@mock.patch('os.listdir', mock.Mock(return_value=['python2.7']))
@mock.patch('st2common.util.sandboxing.get_python_lib')
def test_get_sandbox_python_path_for_python_action_python2_used_for_venv(self,
mock_get_python_lib):
# No inheritance
python_path = get_sandbox_python_path_for_python_action(pack='dummy_pack',
inherit_from_parent=False,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':')
# Inherit python path from current process
# Mock the current process python path
os.environ['PYTHONPATH'] = ':/data/test1:/data/test2'
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
# Inherit from current process and from virtualenv (not running inside virtualenv)
clear_virtualenv_prefix()
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
# Inherit from current process and from virtualenv (running inside virtualenv)
sys.real_prefix = '/usr'
mock_get_python_lib.return_value = sys.prefix + '/virtualenvtest'
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=True)
self.assertEqual(python_path, ':/data/test1:/data/test2:%s/virtualenvtest' %
(sys.prefix))
|
t1 = () #빈 튜플
t2 = (1, ) #요소를 하나만 저장할 때 ,를 붙여줍니다.
t3 = (1, 2, 3)
t4 = 1 ,2 ,3 #파이썬에서 요소들 사이에 ,를 넣어주면 튜플 선언
t4 = "송", "진", "우"
t5 = (1, 2, ("ab", "cd"), ["list1", "list2"]) #리스트처럼 모든 자료형 저장가능
print(t1)
print(t2)
print(t3)
print(t4)
print(t5)
#바뀌지 않는 데이터를 저장할 때 사용하는 자료형 : 튜플
song = ("O형", "황인", "남성")
# song[0] = "AB형" 튜플은 수정, 삭제가 불가능
# del song[0]
#수정, 삭제를 제외하면 리스트와 유사
a = (1, 2, 3, 4, 5)
print(a[0]) #인덱싱
print(a[:2]) #슬라이싱
#튜플간의 연산 가능
a = (1, 2, 3)
b = (4, 5, 6)
c = a + b
print(c)
print(a * 3)
#문제
#a튜플을 이용해서 리스트b에 a의 요소에 4를 추가해서 넣어주시고 출력도 해주세요
#b => [1, 2 ,3 ,4]
#자료형
#int, float, str, bool
#list, tuple
a = (1, 2, 3)
b = list(a) #list로 형변환시켜서 저장
b.append(4)
print(b)
|
# Generated by Django 2.2.13 on 2020-07-03 16:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0012_auto_20200702_1624'),
]
operations = [
migrations.CreateModel(
name='Fronts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('card_title', models.CharField(max_length=20)),
('card_text', models.CharField(max_length=100)),
('card_image', models.ImageField(upload_to='front')),
],
),
migrations.DeleteModel(
name='front',
),
]
|
__author__ = 'c.mayo'
|
def global_estimate(estimates):
best,worst,avg = 0,0,0
for x in estimates:
best += x[0]
worst += x[1]
avg += sum(x)
return (best,avg/2,worst)
'''
Lately, feature requests have been piling up and you need a way to make global
estimates of the time it would take to implement them all. If you estimate feature
A to take 4 to 6 hours to implement, and feature B to take 2 to 5 hours, then in
the best case it will only take you 6 (4 + 2) hours to implement both features,
and in the worst case it will take you 11 (6 + 5). In the average case,
it will take you 8.5 hours.
To help you streamline the estimation process, write a function that returns a
tuple (JS: array) of the global best case, average case and worst case given a
tuple of tuples (JS: array of arrays) representing best case and worst case guesses.
For example,
estimates = ((1, 2), (3, 4))
global_estimate(estimates) -> (4, 5, 6)
For example,
estimates = [[1, 2], [3, 4]]
globalEstimate(estimates) -> [4, 5, 6]
'''
|
import json
import tornado
from tornado import gen, web
from extensions import TumblrMixin
from handlers.base import BaseHandler
class AuthLoginHandler(BaseHandler, TumblrMixin):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
try:
self.require_setting("tumblr_consumer_secret", "Tumblr OAuth")
user = self.get_gl_user()
if not user:
self.render('misc/auth.html', error='User must be logged in with Google')
return
gid = user['id']
redirect_uri = self.get_redirect_url()
if self.get_argument('oauth_token', None):
tr_user = yield self.get_authenticated_user()
self.logger.debug('AuthLoginHandler, _on_auth: tr_user = [{0}]'.format(tr_user))
if not tr_user:
self.render('misc/auth.html', error='Tumblr authentication failed.')
return
# purge all temp accounts, we now have fresh user data
self.data.purge_temp_accounts(gid)
for blog in tr_user['blogs']:
blog['avatar'] = 'https://api.tumblr.com/v2/blog/{0}.tumblr.com/avatar'.format(blog['name'])
blog['access_token'] = tr_user['access_token']
blog['master'] = blog['name'] == tr_user['name']
# save account info in .t store
self.data.add_temp_account(gid, 'tumblr', blog['name'], json.dumps(blog))
# redirect to selector
self.selector_redirect('tumblr')
return
else:
yield self.authorize_redirect(callback_uri=redirect_uri)
# always close the popup on errors
except Exception as e:
self.logger.error('ERROR: Failed to authenticate with Tumblr, {0}'.format(e))
self.render('misc/auth.html', error='System error while authenticating with Tumblr.')
return
class AuthLogoutHandler(BaseHandler, TumblrMixin):
def get(self):
self.redirect('/')
|
#!/usr/bin/env python3
# Simulation of the children's game "Hoot Owl Hoot". The game involves simple
# decision making, which provides an opportunity to simulate and compare
# strategies.
import attr
import pprint
import random
import time
from enum import Enum
NEST = 888
ORIG_BOARD = "YGOBP RBPRY GBORP YGOBP RGYOB PRYGB ORPYG BORP"
BOARD_SPACES = (
[6, 2, 3, 1, 4, 5, 1, 4, 5, 6,
2, 1, 3, 5, 4, 6, 2, 3, 1, 4,
5, 2, 6, 3, 1, 4, 5, 6, 2, 1,
3, 5, 4, 6, 2, 1, 3, 5, 4]
)
class Color(Enum):
blue = 1
green = 2
orange = 3
purple = 4
red = 5
yellow = 6
@attr.s(frozen=True, repr=False)
class Card(object):
sun = attr.ib()
color = attr.ib()
@classmethod
def create_sun(cls):
return cls(True, None)
@classmethod
def create_colored(cls, color):
return cls(False, color)
def __repr__(self):
if self.sun:
return "S"
else:
return self.color.name[0].upper()
@attr.s
class Hand(object):
cards = attr.ib(init=False, factory=list)
def add(self, card):
self.cards.append(card)
def find_sun(self):
return next((c for c in self.cards if c.sun), None)
def remove(self, card):
self.cards.remove(card)
@attr.s
class Deck(object):
_cards = attr.ib(init=False)
@_cards.default
def _init_cards(self):
result = []
result.extend([Card.create_sun()] * 14)
for color in Color:
result.extend([Card.create_colored(color)] * 6)
random.shuffle(result)
return result
def draw(self):
if len(self._cards) > 0:
return self._cards.pop()
return None
@attr.s
class Space(object):
index = attr.ib()
color = attr.ib()
owl = attr.ib(init=False, default=None)
@attr.s
class Game(object):
owls = attr.ib()
_capture_trace = attr.ib(default=False)
_spaces = attr.ib(init=False)
starting_owls = attr.ib(init=False)
suns = attr.ib(init=False, default=0)
actions = attr.ib(init=False, default=0)
_trace_lines = attr.ib(init=False, factory=list)
@_spaces.default
def _init_spaces(self):
result = []
for i, bs in enumerate(BOARD_SPACES):
result.append(Space(i, Color(bs)))
for i in range(self.owls):
owl_num = i + 1
result[5-i].owl = owl_num
return result
@starting_owls.default
def _init_starting_owls(self):
return self.owls
def is_win(self):
return self.owls == 0
def is_loss(self):
return self.suns == 13
def add_sun(self):
self.actions += 1
self.suns += 1
self._trace_current_state()
@property
def occupied(self):
return [i for i, v in enumerate(self._spaces) if v.owl is not None]
def _can_move_to(self, idx, color):
color_match = self._spaces[idx].color == color
is_open = self._spaces[idx].owl is None
return color_match and is_open
def _trace_current_state(self):
if self._capture_trace:
trace_line = ""
for i in range(len(self._spaces)):
if i > 0 and i % 5 == 0:
trace_line += "|"
owl = self._spaces[i].owl
trace_line += " " if owl is None else str(owl)
in_nest = self.starting_owls - self.owls
trace_line += f" || N:{in_nest} S:{self.suns}"
self._trace_lines.append(trace_line)
def compute_end(self, start, color):
end = start + 1
while end < len(self._spaces) and not self._can_move_to(end, color):
end += 1
return NEST if end == len(self._spaces) else end
def move_owl(self, start, end):
assert start >= 0, "Start too small"
assert start < len(self._spaces), "Start too big"
assert end < len(self._spaces) or end == NEST, "End too big"
assert end > start, "End before start"
assert self._spaces[start].owl is not None, "Start not occupied"
assert end == NEST or self._spaces[end].owl is None, "End occupied"
if end == NEST:
self.owls -= 1
else:
self._spaces[end].owl = self._spaces[start].owl
self._spaces[start].owl = None
self.actions += 1
self._trace_current_state()
def color_at(self, idx):
return self._spaces[idx].color
def get_trace(self):
if not self._capture_trace:
return ""
lines = []
header = ""
for i in range(len(self._spaces)):
if i > 0 and i % 5 == 0:
header += "|"
header += self._spaces[i].color.name[0].upper()
stats = f"Won:{self.is_win()}; Loss:{self.is_loss()}; " \
f"Act:{self.actions}; Sun:{self.suns}; Owls:{self.owls}"
lines.append(header)
lines.extend(self._trace_lines)
lines.append(stats)
return"\n".join(lines)
@attr.s(frozen=True)
class Result(object):
owls = attr.ib()
players = attr.ib()
actions = attr.ib()
strategy = attr.ib()
suns = attr.ib()
won = attr.ib()
elapsed = attr.ib()
trace = attr.ib()
def play(owls, players, draw, select, trace=False):
start_time = time.perf_counter()
game = Game(owls, capture_trace=trace)
hands = []
for p in range(players):
hand = Hand()
hand.add(draw())
hand.add(draw())
hand.add(draw())
hands.append(hand)
hand_idx = 0
while not (game.is_win() or game.is_loss()):
hand = hands[hand_idx]
sun = hand.find_sun()
if sun is not None:
game.add_sun()
hand.remove(sun)
else:
owl, card = select(game, hands, hand_idx, random.randint)
hand.remove(card)
end = game.compute_end(owl, card.color)
game.move_owl(owl, end)
drawn = draw()
if drawn is not None:
hand.add(drawn)
hand_idx += 1
if hand_idx == len(hands):
hand_idx = 0
elapsed = time.perf_counter() - start_time
return Result(owls, players, game.actions, select.__name__, game.suns,
game.is_win(), elapsed, game.get_trace())
|
##############################################################################
#
# NAME: srmmetrics.py
#
# FACILITY: SAM (Service Availability Monitoring)
#
# COPYRIGHT:
# Copyright (c) 2009, Members of the EGEE Collaboration.
# http://www.eu-egee.org/partners/
# Licensed under the Apache License, Version 2.0.
# http://www.apache.org/licenses/LICENSE-2.0
# This software is provided "as is", without warranties
# or conditions of any kind, either express or implied.
#
# DESCRIPTION:
#
# Nagios SRM metrics.
#
# AUTHORS: Konstantin Skaburskas, CERN
#
# CREATED: 21-Nov-2008
#
# NOTES:
#
# MODIFIED:
# 2009-12-07 : Konstantin Skaburskas
# - using 'gridmon' and 'metrics' packages after merging
# 'gridmonsam' with 'gridmon'
# - metrics implementation class was moved into the module
##############################################################################
"""
Nagios SRM metrics.
Nagios SRM metrics.
Konstantin Skaburskas <konstantin.skaburskas@cern.ch>, CERN
SAM (Service Availability Monitoring)
"""
import os
import sys
import getopt
import time #@UnresolvedImport
import commands
import errno
try:
from gridmon import probe
from gridmon import utils as samutils
from gridmon import gridutils
import lcg_util
import gfal
except ImportError,e:
summary = "UNKNOWN: Error loading modules : %s" % (e)
sys.stdout.write(summary+'\n')
sys.stdout.write(summary+'\nsys.path: %s\n'% str(sys.path))
sys.exit(3)
# Reasonable defaults for timeouts
LCG_GFAL_BDII_TIMEOUT = 10
LCG_UTIL_TIMEOUT_BDII = LCG_GFAL_BDII_TIMEOUT
LCG_UTIL_TIMEOUT_CONNECT = 10
LCG_UTIL_TIMEOUT_SENDRECEIVE = 120
LCG_UTIL_TIMEOUT_SRM = 180
class SRMMetrics(probe.MetricGatherer) :
"""A Metric Gatherer specific for SRM."""
# Service version(s)
svcVers = ['1', '2'] # NOT USED YET
svcVer = '2'
# The probe's author name space
ns = 'org.sam'
# Timeouts
_timeouts = {
'srm_connect' : LCG_UTIL_TIMEOUT_SENDRECEIVE,
'ldap_timelimit' : LCG_GFAL_BDII_TIMEOUT,
'LCG_GFAL_BDII_TIMEOUT' : LCG_GFAL_BDII_TIMEOUT,
'lcg_util' : {
'CLI': {
'connect-timeout' : LCG_UTIL_TIMEOUT_CONNECT,
'sendreceive-timeout': LCG_UTIL_TIMEOUT_SENDRECEIVE,
'bdii-timeout' : LCG_UTIL_TIMEOUT_BDII,
'srm-timeout' : LCG_UTIL_TIMEOUT_SRM },
'API': {
'connect-timeout' : LCG_UTIL_TIMEOUT_CONNECT}
}
}
_ldap_url = "ldap://sam-bdii.cern.ch:2170"
probeinfo = { 'probeName' : ns+'.SRM-Probe',
'probeVersion' : '1.0',
'serviceVersion' : '1.*, 2.*'}
# Metrics' info
_metrics = {
'GetSURLs' : {'metricDescription': "Get full SRM endpoints and storage areas from BDII.",
'cmdLineOptions' : ['ldap-uri=',
'ldap-timeout='],
'cmdLineOptionsReq' : [],
'metricChildren' : ['LsDir','Put','Ls','GetTURLs','Get','Del']
},
'LsDir' : {'metricDescription': "List content of VO's top level space area(s) in SRM.",
'cmdLineOptions' : ['se-timeout='],
'cmdLineOptionsReq' : [],
'metricChildren' : [],
'critical' : 'Y',
'statusMsgs' : {'OK' :'OK: Storage Path directory was listed successfully.',
'WARNING' :'WARNING: Problems listing Storage Path directory.' ,
'CRITICAL':'CRITICAL: Problems listing Storage Path directory.' ,
'UNKNOWN' :'UNKNOWN: Problems listing Storage Path directory.'}
},
'Put' : {'metricDescription': "Copy a local file to the SRM into default space area(s).",
'cmdLineOptions' : ['se-timeout='],
'cmdLineOptionsReq' : [],
'metricChildren' : ['Ls','GetTURLs','Get','Del']
},
'Ls' : {'metricDescription': "List (previously copied) file(s) on the SRM.",
'cmdLineOptions' : ['se-timeout='],
'cmdLineOptionsReq' : [],
'metricChildren' : [],
'critical' : 'Y',
'statusMsgs' : {'OK' :'OK: File(s) was listed successfully.',
'WARNING' :'WARNING: Problems listing file(s).' ,
'CRITICAL':'CRITICAL: Problems listing file(s).' ,
'UNKNOWN' :'UNKNOWN: Problems listing file(s).'}
},
'GetTURLs' : {'metricDescription': "Get Transport URLs for the file copied to storage.",
'cmdLineOptions' : ['se-timeout=',
'ldap-uri=',
'ldap-timeout='],
'cmdLineOptionsReq' : [],
'metricChildren' : [],
'critical' : 'Y'
},
'Get' : {'metricDescription': "Copy given remote file(s) from SRM to a local file.",
'cmdLineOptions' : ['se-timeout='],
'cmdLineOptionsReq' : [],
'metricChildren' : [],
'critical' : 'Y'
},
'Del' : {'metricDescription': "Delete given file(s) from SRM.",
'cmdLineOptions' : ['se-timeout='],
'cmdLineOptionsReq' : [],
'metricChildren' : [],
'critical' : 'Y'
},
'All' : {'metricDescription': "Run all metrics.",
'cmdLineOptions' : ['srmv='],
'cmdLineOptionsReq' : [],
'metricsOrder' : ['GetSURLs','LsDir','Put','Ls','GetTURLs','Get','Del']
},
}
def __init__(self, tuples, srmtype):
probe.MetricGatherer.__init__(self, tuples, srmtype)
self.usage=""" Metrics specific options:
--srmv <1|2> (Default: %s)
%s
--ldap-uri <URI> Format [ldap://]hostname[:port[/]]
(Default: %s)
--ldap-timeout <sec> (Default: %i)
%s
--se-timeout <sec> (Default: %i)
!!! NOT IMPLEMENTED YET !!!
--sapath <SAPath,...> Storage Area Path to be tested on SRM. Comma separated
list of Storage Paths to be tested.
"""%(self.svcVer,
self.ns+'.SRM-{GetSURLs,GetTURLs}',
self._ldap_url,
self._timeouts['ldap_timelimit'],
self.ns+'.SRM-{LsDir,Put,Ls,GetTURLs,Get,Del}',
self._timeouts['srm_connect'])
# TODO: move to super class
# Need to be parametrized from CLI at runtime
self.childTimeout = 120 # timeout
# initiate metrics description
self.set_metrics(self._metrics)
# parse command line parameters
self.parse_cmd_args(tuples)
# working directory for metrics
self.make_workdir()
# LDAP
self._ldap_base = "o=grid"
self._ldap_fileEndptSAPath = self.workdir_metric+"/EndpointAndPath"
# files and patterns
self._fileTest = self.workdir_metric+'/testFile.txt'
self._fileTestIn = self.workdir_metric+'/testFileIn.txt'
self._fileFilesOnSRM = self.workdir_metric+'/FilesOnSRM.txt'
self._fileSRMPattern = 'testfile-put-%s-%s.txt' # time, uuid
# lcg_util and GFAL versions
self.lcg_util_gfal_ver = gridutils.get_lcg_util_gfal_ver()
# lock file
self._fileLock = self.workdir_metric+'/lock'
self._fileLock_timelimit = 5*60
'timelimit on working directory lock'
def parse_args(self, opts):
for o,v in opts:
if o in ('--srmv'):
if v in self.svcVers:
self.svcVer = str(v)
else:
errstr = '--srmv must be one of '+\
', '.join([x for x in self.svcVers])+'. '+v+' given.'
raise getopt.GetoptError(errstr)
elif o in ('--ldap-uri'):
[host, port] = samutils.parse_uri(v)
if port == None or port == '':
port = '2170'
self._ldap_url = 'ldap://'+host+':'+port
os.environ['LCG_GFAL_INFOSYS'] = host+':'+port
elif o in ('--ldap-timeout'):
self._timeouts['ldap_timelimit'] = int(v)
elif o in ('--se-timeout'):
self._timeouts['srm_connect'] = int(v)
def __workdir_islocked(self):
"""Check if working directory is locked within allowed timelimit.
"""
if not os.path.exists(self._fileLock):
return False
else:
delta = time.time() - os.stat(self._fileLock).st_ctime
if delta >= self._fileLock_timelimit:
os.unlink(self._fileLock)
return False
else:
return True
def __workdir_lock(self):
"""Lock working directory.
"""
if self.__workdir_islocked():
raise IOError('Working directory is locked: %s' %
self.workdir_metric)
file(self._fileLock, 'w')
def __workdir_unlock(self):
"""Unlock working directory.
"""
try: os.unlink(self._fileLock)
except Exception: pass
def __query_bdii(self, ldap_filter, ldap_attrlist, ldap_url=''):
'Local wrapper for gridutils.query_bdii()'
ldap_url = ldap_url or self._ldap_url
try:
tl = self._timeouts['ldap_timelimit']
except KeyError:
tl = None
self.printd('Query BDII.')
self.printd('''Parameters:
ldap_url: %s
ldap_timelimit: %i
ldap_filter: %s
ldap_attrlist: %s'''% (ldap_url, tl, ldap_filter, ldap_attrlist))
self.print_time()
self.printd('Querying BDII %s' % ldap_url)
rc, qres = gridutils.query_bdii(ldap_filter, ldap_attrlist,
ldap_url=ldap_url,
ldap_timelimit=tl)
self.print_time()
return rc, qres
def metricGetSURLs(self):
"""Get full SRM endpoint(s) and storage areas from BDII.
"""
try:
self.__workdir_lock()
except Exception, e:
self.printd('Failed to lock. %s' % str(e))
return 'UNKNOWN', 'UNKNOWN: Failed to lock working directory.'
ldap_filter = "(|(&(GlueChunkKey=GlueSEUniqueID=%s)(|(GlueSAAccessControlBaseRule=%s)(GlueSAAccessControlBaseRule=VO:%s)))(&(GlueChunkKey=GlueSEUniqueID=%s)(|(GlueVOInfoAccessControlBaseRule=%s)(GlueVOInfoAccessControlBaseRule=VO:%s))) (&(GlueServiceUniqueID=*://%s*)(GlueServiceVersion=%s.*)(GlueServiceType=srm*)))" % (
self.hostName,self.voName,self.voName,
self.hostName,self.voName,self.voName,
self.hostName,self.svcVer)
ldap_attrlist = ['GlueServiceEndpoint', 'GlueSAPath', 'GlueVOInfoPath']
rc, qres = self.__query_bdii(ldap_filter, ldap_attrlist,
self._ldap_url)
if not rc:
if qres[0] == 0: # empty set
sts = 'CRITICAL'
else: # all other problems
sts = 'UNKNOWN'
self.printd(qres[2])
return (sts, qres[1])
res = {}
for k in ldap_attrlist: res[k] = []
for entry in qres:
for attr in res.keys():
try:
for val in entry[1][attr]:
if val not in res[attr]:
res[attr].append(val)
except KeyError: pass
# GlueServiceEndpoint is not published
k = 'GlueServiceEndpoint'
if not res[k]:
return ('CRITICAL',
"%s is not published for %s in %s" % \
(k, self.hostName, self._ldap_url))
elif len(res[k]) > 1:
return ('CRITICAL',
"More than one SRMv"+self.svcVer+" "+\
k+" is published for "+self.hostName+": "+', '.join(res[k]))
else:
endpoint = res[k][0]
self.printd('GlueServiceEndpoint: %s' % endpoint)
# GlueVOInfoPath takes precedence
# Ref: "Usage of Glue Schema v1.3 for WLCG Installed Capacity
# information" v 1.9, Date: 03/02/2009
if res['GlueVOInfoPath']:
storpaths = res['GlueVOInfoPath']
self.printd('GlueVOInfoPath: %s' % ', '.join(storpaths))
elif res['GlueSAPath']:
storpaths = res['GlueSAPath']
self.printd('GlueSAPath: %s' % ', '.join(storpaths))
else:
# GlueSAPath or GlueVOInfoPath is not published
return ('CRITICAL',
"GlueVOInfoPath or GlueSAPath not published for %s in %s" % \
(res['GlueServiceEndpoint'][0], self._ldap_url))
eps = [ endpoint.replace('httpg','srm',1)+'?SFN='+sp+"\n" for sp in storpaths]
self.printd('SRM endpoint(s) to test:')
self.printd('\n'.join(eps).strip('\n'))
self.printd('Saving endpoints to %s' % self._ldap_fileEndptSAPath, v=2)
try:
fp = open(self._ldap_fileEndptSAPath, "w")
for ep in eps:
fp.write(ep)
fp.close()
except IOError, e:
try:
os.unlink(self._ldap_fileEndptSAPath)
except StandardError: pass
return ('UNKNOWN', 'IOError: %s' % str(e))
return ('OK', "Got SRM endpoint(s) and Storage Path(s) from BDII")
def metricLsDir(self):
"List content of VO's top level space area(s) in SRM using gfal_ls()."
status = 'OK'
summary = ''
self.printd(self.lcg_util_gfal_ver)
srms = []
try:
for srm in open(self._ldap_fileEndptSAPath, 'r'):
srms.append(srm.rstrip('\n'))
if not srms:
return ('UNKNOWN', 'No SRM endpoints found in %s' %
self._ldap_fileEndptSAPath)
except IOError, e:
self.printd('ERROR: %s' % str(e))
return ('UNKNOWN', 'Error opening local file.')
req = {'surls' : srms,
'defaultsetype' : 'srmv'+self.svcVer,
'setype' : 'srmv'+self.svcVer,
'timeout' : self._timeouts['srm_connect'],
'srmv2_lslevels' : 0,
'no_bdii_check' : 1
}
self.printd('Using gfal_ls().')
self.printd('Parameters:\n%s' % '\n'.join(
[' %s: %s' % (x,str(y)) for x,y in req.items()]))
errmsg = ''
try:
(rc, gfalobj, errmsg) = gfal.gfal_init(req)
except MemoryError, e:
try: gfal.gfal_internal_free(gfalobj)
except StandardError: pass
summary = 'error initialising GFAL: %s' % str(e)
self.printd('ERROR: %s' % summary)
return ('UNKNOWN', summary)
else:
if rc != 0:
summary = 'problem initialising GFAL: %s' % errmsg
self.printd('ERROR: %s' % summary)
return ('UNKNOWN', summary)
self.print_time()
self.printd('Listing storage url(s).')
try:
(rc, gfalobj, errmsg) = gfal.gfal_ls(gfalobj)
except StandardError:
try: gfal.gfal_internal_free(gfalobj)
except StandardError: pass
return ('UNKNOWN', 'problem invoking gfal_ls(): %s' % errmsg)
else:
self.print_time()
if rc != 0:
try: gfal.gfal_internal_free(gfalobj)
except StandardError: pass
em = probe.ErrorsMatching(self.errorDBFile, self.errorTopics)
er = em.match(errmsg)
summary = 'problem listing Storage Path(s).'
if er:
if status != 'CRITICAL':
status = er[0][2]
summary += ' [ErrDB:%s]' % str(er)
else:
status = 'CRITICAL'
self.printd('ERROR: %s' % errmsg)
return (status, summary)
try:
(rc, gfalobj, gfalstatuses) = gfal.gfal_get_results(gfalobj)
except StandardError:
try: gfal.gfal_internal_free(gfalobj)
except StandardError: pass
raise
else:
summary = ''
for st in gfalstatuses:
summary += 'Storage Path[%s]' % st['surl']
self.printd('Storage Path[%s]' % st['surl'], cr=False)
if st['status'] != 0:
em = probe.ErrorsMatching(self.errorDBFile, self.errorTopics)
er = em.match(st['explanation'])
if er:
if status != 'CRITICAL':
status = er[0][2]
summary += '-%s [ErrDB:%s];' % (status.lower(), str(er))
else:
status = 'CRITICAL'
summary += '-%s;' % status.lower()
self.printd('-%s;\nERROR: %s\n' % (status.lower(), st['explanation']))
else:
summary += '-ok;'
self.printd('-ok;')
try: gfal.gfal_internal_free(gfalobj)
except StandardError: pass
return (status, summary)
def metricPut(self):
"Copy a local file to the SRM into default space area(s)."
self.printd(self.lcg_util_gfal_ver)
# generate source file
try:
src_file = self._fileTest
fp = open(src_file, "w")
for s in "1234567890": fp.write(s+'\n')
fp.close()
# multiple 'SAPath's are possible
dest_files = []
fn = self._fileSRMPattern % (str(int(time.time())),
samutils.uuidstr())
for srmendpt in open(self._ldap_fileEndptSAPath):
dest_files.append(srmendpt.rstrip('\n')+'/'+fn)
if not dest_files:
return ('UNKNOWN', 'No SRM endpoints found in %s' %
self._ldap_fileEndptSAPath)
fp = open(self._fileFilesOnSRM, "w")
for dfile in dest_files:
fp.write(dfile+'\n')
fp.close()
except IOError, e:
self.printd('ERROR: %s' % str(e))
return ('UNKNOWN', 'Error opening local file.')
self.printd('Copy file using lcg_cp3().')
# bug in lcg_util: https://gus.fzk.de/ws/ticket_info.php?ticket=39926
# SRM types: string to integer mapping
# TYPE_NONE -> 0
# TYPE_SRM -> 1
# TYPE_SRMv2 -> 2
# TYPE_SE -> 3
defaulttype = int(self.svcVer)
srctype = 0
dsttype = defaulttype
nobdii = 1
vo = self.voName
nbstreams = 1
conf_file = ''
insecure = 0
verbose = 0 # if self.verbosity > 0: verbose = 1 # when API is fixed
timeout = self._timeouts['srm_connect']
src_spacetokendesc = ''
dest_spacetokendesc = ''
self.printd('''Parameters:
defaulttype: %i
srctype: %i
dsttype: %i
nobdi: %i
vo: %s
nbstreams: %i
conf_file: %s
insecure: %i
verbose: %i
timeout: %i
src_spacetokendesc: %s
dest_spacetokendesc: %s''' % (defaulttype, srctype,
dsttype, nobdii, vo, nbstreams, conf_file or '-',
insecure, verbose, timeout,
src_spacetokendesc or '-', dest_spacetokendesc or '-'))
errmsg = ''
stMsg = 'File was%s copied to SRM.'
for dest_file in dest_files:
self.print_time()
self.printd('Destination: %s' % dest_file)
try:
rc, errmsg = \
lcg_util.lcg_cp3(src_file, dest_file, defaulttype, srctype,
dsttype, nobdii, vo, nbstreams, conf_file,
insecure, verbose, timeout,
src_spacetokendesc, dest_spacetokendesc)
except AttributeError, e:
status = 'UNKNOWN'
summary = stMsg % ' NOT'
self.printd('ERROR: %s %s' % (str(e), sys.exc_info()[0]))
else:
if rc != 0:
em = probe.ErrorsMatching(self.errorDBFile, self.errorTopics)
er = em.match(errmsg)
if er:
status = er[0][2]
summary = stMsg % (' NOT')+' [ErrDB:%s]' % str(er)
else:
status = 'CRITICAL'
summary = stMsg % ' NOT'
self.printd('ERROR: %s' % errmsg)
else:
status = 'OK'
summary = stMsg % ''
self.print_time()
return (status, summary)
def metricLs(self):
"List (previously copied) file(s) on the SRM."
self.printd(self.lcg_util_gfal_ver)
status = 'OK'
srms = []
try:
for sfile in open(self._fileFilesOnSRM, 'r'):
srms.append(sfile.rstrip('\n'))
except IOError, e:
self.printd('ERROR: %s' % str(e))
return ('UNKNOWN', 'Error opening local file.')
req = {'surls' : srms,
'defaultsetype' : 'srmv'+self.svcVer,
'setype' : 'srmv'+self.svcVer,
'no_bdii_check' : 1,
'timeout' : self._timeouts['srm_connect'],
'srmv2_lslevels' : 0
}
self.printd('Using gfal_ls().')
self.printd('Parameters:\n%s' % '\n'.join(
[' %s: %s' % (x,str(y)) for x,y in req.items()]))
errmsg = ''
try:
(rc, gfalobj, errmsg) = gfal.gfal_init(req)
except MemoryError, e:
try: gfal.gfal_internal_free(gfalobj)
except StandardError: pass
summary = 'error initialising GFAL: %s' % str(e)
self.printd('ERROR: %s' % summary)
return ('UNKNOWN', summary)
else:
if rc != 0:
summary = 'problem initialising GFAL: %s' % errmsg
self.printd('ERROR: %s' % summary)
return ('UNKNOWN', summary)
self.print_time()
self.printd('Listing file(s).')
errmsg = ''
try:
(rc, gfalobj, errmsg) = gfal.gfal_ls(gfalobj)
except StandardError:
try: gfal.gfal_internal_free(gfalobj)
except StandardError: pass
return ('UNKNOWN', 'problem invoking gfal_ls(): %s' % errmsg)
else:
self.print_time()
if rc != 0:
try: gfal.gfal_internal_free(gfalobj)
except StandardError: pass
em = probe.ErrorsMatching(self.errorDBFile, self.errorTopics)
er = em.match(errmsg)
summary = 'problem listing file(s).'
if er:
if status != 'CRITICAL':
status = er[0][2]
summary += ' [ErrDB:%s]' % str(er)
else:
status = 'CRITICAL'
self.printd('ERROR: %s' % errmsg)
return (status, summary)
try:
(rc, gfalobj, gfalstatuses) = gfal.gfal_get_results(gfalobj)
except StandardError:
try: gfal.gfal_internal_free(gfalobj)
except StandardError: pass
raise
else:
summary = ''
for st in gfalstatuses:
summary += 'listing [%s]' % st['surl']
self.printd('listing [%s]' % st['surl'], cr=False)
if st['status'] != 0:
em = probe.ErrorsMatching(self.errorDBFile, self.errorTopics)
er = em.match(st['explanation'])
if er:
if status != 'CRITICAL':
status = er[0][2]
summary += '-%s [ErrDB:%s];' % (status.lower(), str(er))
else:
status = 'CRITICAL'
summary += '-%s;' % status.lower()
self.printd('-%s;\nERROR: %s\n' % (status.lower(), st['explanation']))
else:
summary += '-ok;'
self.printd('-ok;')
try: gfal.gfal_internal_free(gfalobj)
except StandardError: pass
return (status, summary)
def metricGetTURLs(self):
"Get Transport URLs for the file copied to storage."
self.printd(self.lcg_util_gfal_ver)
# discover transport protocols
ldap_filter = "(&(objectclass=GlueSEAccessProtocol)"+\
"(GlueChunkKey=GlueSEUniqueID=%s))" % self.hostName
ldap_attrlist = ['GlueSEAccessProtocolType']
rc, qres = self.__query_bdii(ldap_filter, ldap_attrlist,
self._ldap_url)
if not rc:
if qres[0] == 0: # empty set
sts = 'WARNING'
else: # all other problems
sts = 'UNKNOWN'
self.printd(qres[2])
return (sts, qres[1])
protos = []
for e in qres:
if e[1]['GlueSEAccessProtocolType'][0] not in protos:
protos.append(e[1]['GlueSEAccessProtocolType'][0])
if not protos:
return ('WARNING', "No access protocol types for %s published in %s" % \
(self.hostName, self._ldap_url))
self.printd('Discovered GlueSEAccessProtocolType: %s' % ', '.join(protos))
src_files = []
try:
for sfile in open(self._fileFilesOnSRM, 'r'):
src_files.append(sfile.rstrip('\n'))
except IOError, e:
self.printd('ERROR: %s' % str(e))
return ('UNKNOWN', 'Error opening local file.')
# lcg-gt $LCG_UTIL_TIMEOUT -b -D srmv2 -T srmv2 ${SURL} ${prot}
defaulttype = 'srmv2'
setype = 'srmv2'
timeouts = ''
for k,v in self._timeouts['lcg_util']['CLI'].items():
timeouts += '--%s %i ' % (k, v)
self.printd('Using lcg-gt CLI.')
_cmd = 'lcg-gt %s -b -D %s -T %s %s %s' % \
(timeouts, defaulttype, setype, '%s', '%s')
self.printd('Command:\n%s' % _cmd % ('<SURL>', '<proto>') )
ok = []; nok = []
status = 'OK'
for src_file in src_files:
self.printd('=====\nSURL: %s\n-----' % src_file)
for proto in protos:
self.print_time()
errmsg = ''
try:
cmd = _cmd % (src_file, proto)
rc, errmsg = commands.getstatusoutput(cmd)
rc = os.WEXITSTATUS(rc)
except Exception, e:
status = 'UNKNOWN'
self.printd('ERROR: %s\n%s' % (errmsg, str(e)))
else:
if rc != 0:
if not proto in nok:
nok.append(proto)
self.printd('proto: %s - FAILED' % proto)
self.printd('error: %s' % errmsg)
em = probe.ErrorsMatching(self.errorDBFile, self.errorTopics)
er = em.match(errmsg)
if er:
status = er[0][2]
else:
status = 'CRITICAL'
else:
if not proto in ok:
ok.append(proto)
self.printd('proto: %s - OK' % proto)
if not samutils.to_retcode(status) > samutils.to_retcode('OK'):
status = 'OK'
self.print_time()
self.printd('-----')
summary = 'protocols OK-[%s]' % ', '.join([x for x in ok])
if nok:
summary += ', FAILED-[%s]' % ', '.join([x for x in nok])
return (status, summary)
def metricGet(self):
"Copy given remote file(s) from SRM to a local file."
self.printd(self.lcg_util_gfal_ver)
# multiple 'Storage Path's are possible
src_files = []
try:
for sfile in open(self._fileFilesOnSRM, 'r'):
src_files.append(sfile.rstrip('\n'))
except IOError, e:
self.printd('ERROR: %s' % str(e))
return ('UNKNOWN', 'Error opening local file.')
dest_file = 'file:'+self._fileTestIn
self.printd('Get file using lcg_cp3().')
# bug in lcg_util: https://gus.fzk.de/ws/ticket_info.php?ticket=39926
# SRM types string to integer mapping
# TYPE_NONE -> 0
# TYPE_SRM -> 1
# TYPE_SRMv2 -> 2
# TYPE_SE -> 3
defaulttype = int(self.svcVer)
srctype = defaulttype
dsttype = 0
nobdii = 1
vo = self.voName
nbstreams = 1
conf_file = ''
insecure = 0
verbose = 0 # if self.verbosity > 0: verbose = 1 # when API is fixed
timeout = self._timeouts['srm_connect']
src_spacetokendesc = ''
dest_spacetokendesc = ''
self.printd('''Parameters:
defaulttype: %i
srctype: %i
dsttype: %i
nobdi: %i
vo: %s
nbstreams: %i
conf_file: %s
insecure: %i
verbose: %i
timeout: %i
src_spacetokendesc: %s
dest_spacetokendesc: %s''' % (defaulttype, srctype,
dsttype, nobdii, vo, nbstreams, conf_file or '-',
insecure, verbose, timeout,
src_spacetokendesc or '-', dest_spacetokendesc or '-'))
stMsg = 'File was%s copied from SRM.'
for src_file in src_files:
self.print_time()
self.printd('Source: %s' % src_file)
errmsg = ''
try:
rc, errmsg = \
lcg_util.lcg_cp3(src_file, dest_file, defaulttype, srctype,
dsttype, nobdii, vo, nbstreams, conf_file,
insecure, verbose, timeout,
src_spacetokendesc, dest_spacetokendesc);
except Exception, e:
status = 'UNKNOWN'
summary = stMsg % ' NOT'
self.printd('ERROR: %s\n%s' % (errmsg, str(e)))
else:
if rc != 0:
em = probe.ErrorsMatching(self.errorDBFile, self.errorTopics)
er = em.match(errmsg)
if er:
status = er[0][2]
summary = stMsg % (' NOT')+'[ErrDB:%s]' % str(er)
else:
status = 'CRITICAL'
summary = stMsg % ' NOT'
self.printd('ERROR: %s' % errmsg)
else:
cmd = '`which diff` %s %s' % (self._fileTest, self._fileTestIn)
res = commands.getstatusoutput(cmd)
if res[0] == 0:
status = 'OK'
summary = stMsg % ('')+' Diff successful.'
elif res[0] == 256: # files differ
status = 'CRITICAL'
summary = stMsg % ('')+' Files differ!'
self.printd('diff ERROR: %s' % res[1])
else:
status = 'UNKNOWN'
summary = stMsg % ''+' Unknown problem when comparing files!'
self.printd('diff ERROR: %s' % res[1])
self.print_time()
return(status, summary)
def metricDel(self):
"Delete given file(s) from SRM."
self.printd(self.lcg_util_gfal_ver)
# TODO: - cleanup of the metric's working directory
# (this may go to metricAll() in the superclass)
# multiple Storage Paths are possible
src_files = []
try:
for sfile in open(self._fileFilesOnSRM, 'r'):
src_files.append(sfile.rstrip('\n'))
if not src_files:
return ('UNKNOWN', 'No files to depete from SRM found in %s' %
self._fileFilesOnSRM)
except IOError, e:
self.printd('ERROR: %s' % str(e))
return ('UNKNOWN', 'Error opening local file.')
# bug in lcg_util: https://gus.fzk.de/ws/ticket_info.php?ticket=39926
# SRM types string to integer mapping
# TYPE_NONE -> 0
# TYPE_SRM -> 1
# TYPE_SRMv2 -> 2
# TYPE_SE -> 3
defaulttype = int(self.svcVer)
setype = defaulttype
nobdii = 1
nolfc = 1
aflag = 0
se = ''
vo = self.voName
conf_file = ''
insecure = 0
verbose = 0 # if self.verbosity > 0: verbose = 1 # when API is fixed
timeout = self._timeouts['srm_connect']
self.printd('Using lcg_del4().')
self.printd('''Parameters:
defaulttype: %i
setype: %i
nobdii: %i
nolfc: %i
aflag: %i
se: %s
vo: %s
conf_file: %s
insecure: %i
verbose: %i
timeout: %i''' % (defaulttype, setype, nobdii, nolfc, aflag,
se or '-', vo, conf_file or '-', insecure,
verbose, timeout))
stMsg = 'File was%s deleted from SRM.'
for src_file in src_files:
errmsg = ''
self.print_time()
self.printd('Deleting: %s' % src_file)
try:
rc, errmsg = \
lcg_util.lcg_del4(src_file, defaulttype, setype, nobdii, nolfc, aflag,
se, vo, conf_file, insecure, verbose, timeout);
except Exception, e:
status = 'UNKNOWN'
summary = stMsg % ' NOT'
self.printd('ERROR: %s\n%s' % (errmsg, str(e)))
else:
if rc != 0:
em = probe.ErrorsMatching(self.errorDBFile, self.errorTopics)
er = em.match(errmsg)
if er:
status = er[0][2]
summary = stMsg % (' NOT')+' [ErrDB:%s]' % str(er)
else:
status = 'CRITICAL'
summary = stMsg % ' NOT'
self.printd('ERROR: %s' % errmsg)
else:
status = 'OK'
summary = stMsg % ''
self.print_time()
self.__workdir_unlock()
return(status, summary)
|
from spack import *
import glob
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class Geant4G4ndl(Package):
url = "http://cmsrep.cern.ch/cmssw/repos/cms/SOURCES/slc7_amd64_gcc700/external/geant4-G4NDL/4.5/G4NDL.4.5.tar.gz"
version('4.5', 'fd29c45fe2de432f1f67232707b654c0')
def install(self, spec, prefix):
mkdirp(join_path(prefix.share, 'data'))
install_path = join_path(prefix.share, 'data',
os.path.basename(self.stage.source_path))
install_tree(self.stage.source_path, install_path)
def url_for_version(self, version):
"""Handle version string."""
return ("http://cmsrep.cern.ch/cmssw/repos/cms/SOURCES/slc7_amd64_gcc700/external/geant4-G4NDL/%s/G4NDL.%s.tar.gz" % (version,version))
|
#!/usr/bin/env python
import rospy
from week2.srv import velocity, velocityResponse
def handle_rad_req(req):
return velocityResponse(1/req.radius)
def return_rad():
rospy.init_node('ang_vel_service_node')
s = rospy.Service('compute_ang_vel', velocity, handle_rad_req)
rospy.loginfo('Available for computing Angular Velocity')
rospy.spin()
if __name__ == "__main__":
return_rad() |
import re
from typing import List
import requests
from nio import MatrixRoom
import aiosqlite
from dors import command_hook, HookMessage, Jenny, startup_hook
@startup_hook()
async def __setup_db(bot: Jenny):
async with aiosqlite.connect("./balance.db") as db:
await db.execute("CREATE TABLE IF NOT EXISTS balance (id INTEGER PRIMARY KEY AUTOINCREMENT, "
"username VARCHAR(255), balance REAL)")
await db.commit()
# Utility functions for external use
async def transfer(user_from: str, user_to: str, amount: float) -> bool:
""" Transfers currency from one user to the other. Returns False if user_from does not have enough balance """
amount = round(amount, 8)
async with aiosqlite.connect("./balance.db") as db:
db.row_factory = aiosqlite.Row
async with db.execute("SELECT * FROM balance WHERE username = ?", [user_from]) as cursor:
row = await cursor.fetchone()
if not row or row['balance'] < amount:
return False
await _create_if_not_exists(db, user_to)
# Do the transfer
await db.execute("UPDATE balance SET balance = balance - ? WHERE username = ?", [amount, user_from])
await db.execute("UPDATE balance SET balance = balance + ? WHERE username = ?", [amount, user_to])
await db.commit()
return True
async def get_balance(user: str) -> int:
""" Returns the balance for a user. """
async with aiosqlite.connect("./balance.db") as db:
db.row_factory = aiosqlite.Row
await _create_if_not_exists(db, user)
async with db.execute("SELECT * FROM balance WHERE username = ?", [user]) as cursor:
row = await cursor.fetchone()
return int(round(row['balance'], 0))
async def give(user: str, amount: float):
""" Give money to a user. """
return await bulk_give([user], amount)
async def take(user: str, amount: float):
""" Give take money from a user. """
return await bulk_take([user], amount)
async def bulk_take(user_list: List[str], amount: float):
amount = round(amount, 8)
if amount < 0:
raise RuntimeError("Negative values are not allowed.")
async with aiosqlite.connect("./balance.db") as db:
db.row_factory = aiosqlite.Row
for user in user_list:
await _create_if_not_exists(db, user)
# Do the transfer
await db.execute("UPDATE balance SET balance = balance - ? WHERE username = ?", [amount, user])
await db.commit()
return True
async def bulk_give(user_list: List[str], amount: float):
amount = round(amount, 8)
if amount < 0:
raise RuntimeError("Negative values are not allowed.")
async with aiosqlite.connect("./balance.db") as db:
db.row_factory = aiosqlite.Row
for user in user_list:
await _create_if_not_exists(db, user)
# Do the transfer
await db.execute("UPDATE balance SET balance = balance + ? WHERE username = ?", [amount, user])
await db.commit()
return True
# Internal helpers
async def _create_if_not_exists(db, user: str):
async with db.execute("SELECT * FROM balance WHERE username = ?", [user]) as cursor:
row = await cursor.fetchone()
if not row:
await db.execute("INSERT INTO balance (username, balance) VALUES (?, 0)", [user])
await db.commit()
# Standard commands
@command_hook(['balance', 'bal', 'b'])
async def __cmd_balance(bot: Jenny, room: MatrixRoom, event: HookMessage):
currency = 'USD'
if event.args:
currency = event.args[0].upper()
ubalance = await get_balance(event.sender)
info = requests.get(f"https://min-api.cryptocompare.com/data/price?fsym=DOGE&tsyms={currency}").json()
extra = ''
if 'Error' not in str(info):
extra = f" (\002{round(float(info[currency]) * ubalance, 8)}\002 {currency})"
await bot.reply(f"Your balance: \002{round(ubalance, 8)}\002 DOGE{extra}.")
@command_hook(['tip'])
async def __cmd_tip(bot: Jenny, room: MatrixRoom, event: HookMessage):
if not event.args:
return await bot.say("Usage: .tip <amount> <user>")
try:
amount_f = float(event.args[0])
amount = int(event.args[0])
user = " ".join(event.args[1:])
except ValueError:
# Maybe the first arg is the nick?
try:
amount_f = float(event.args[-1])
amount = int(event.args[-1])
user = " ".join(event.args[0:-1])
except ValueError:
return await bot.reply("Invalid amount.")
if amount <= 0:
return await bot.reply("Invalid amount.")
if amount != amount_f:
return await bot.reply("Invalid amount. Must be a whole number")
if amount < 1:
return await bot.reply("Minimum tip is 0.01")
# Find the user...
# TODO: Make this a helper function? Will be used everywhere..
if user.startswith("@"):
if user not in room.users:
return await bot.say(f"'{user}' is not in the room")
real_user = user
else:
potential_users = room.user_name_clashes(user)
if len(potential_users) > 1:
# We will have to dissect the html thingy
poke_re = re.compile(r"\.tip ?.+?<a href=\"https://matrix.to/#/(.+?)\">.*")
if match := poke_re.match(event.formatted_body):
real_user = match.group(1)
else:
await bot.say(f"There is more than one {user}?!!")
return
else:
if not potential_users:
return await bot.say(f"I couldn't find any {user} here...")
real_user = potential_users[0]
if real_user == event.sender:
return await bot.say("No tipping yourself.")
if await get_balance(event.sender) < amount:
return await bot.reply("Not enough balance!")
if not event.sender or not real_user:
return await bot.reply("Internal error! Big fuckup!")
await transfer(event.sender, real_user, amount)
tag = await bot.source_tag(real_user)
await bot.message(room.room_id, f"Sent {amount} DOGE to {tag}.", p_html=True)
@command_hook(['baltop'])
async def __cmd_baltop(bot: Jenny, room: MatrixRoom, event: HookMessage):
resp = ""
async with aiosqlite.connect("./balance.db") as db:
db.row_factory = aiosqlite.Row
async with db.execute("SELECT SUM(balance) AS `total` FROM balance") as cursor:
row = await cursor.fetchone()
resp += f"Total balance: \002{round(row['total'], 8)}\002 DOGE.\n\nTop 11 balances:<ol>"
async with db.execute("SELECT * FROM balance ORDER BY balance DESC") as cursor:
rows = await cursor.fetchall()
amt = 0
for row in rows:
if row['username'] not in room.users:
continue
if amt > 10:
break
amt += 1
tag = await bot.source_tag(row['username'])
resp += f"<li>{tag}: \002{int(round(row['balance'], 0))}\002 DOGE</li>"
resp += "</ol>"
await bot.message(room.room_id, resp, p_html=True)
|
# given a matrix of words and a dictionary
# [
# [ 'c, 'a', 't', 'e' ]
# [ 'a', 'r', 't', 's' ]
# [ 'r', 'e', 'n', 't' ]
# ]
#
# find all words: horizontal, veritcal and diagonal
#
# e.g. cat, ate, at, a, car, are, est, rent, arts, art
def find_words_in_matrix():
# lets compose first
pass
words = set()
def find_word_in_list(letters, dictionary, words=set()):
if not letters:
return words
words = words.union(get_words_forward(dictionary, letters))
words = words.union(get_words_backwards(dictionary, letters))
return words.union(
find_word_in_list(letters[1:], dictionary, words),
find_word_in_list(letters[:-1], dictionary, words)
)
def get_words_backwards(dictionary, letters):
words = set()
possible_list = letters
while letters:
possible_word = "".join(letters)
if is_list_a_word(dictionary, possible_word):
words.add(possible_word)
letters = letters[1:]
return words
def get_words_forward(dictionary, letters):
words = set()
possible_word = ''
for letter in letters:
possible_word += letter
if is_list_a_word(dictionary, possible_word):
words.add(possible_word)
return words
def is_list_a_word(dictionary, possible_word):
return possible_word in dictionary
def test(actual, expected):
try:
assert expected == actual
print 'PASS!'
except Exception:
print 'FAIL!'
print 'expected %s to equal %s' % (actual, expected)
def rotate(matrix):
print [ i for i in len(matrix[0]) ]
return matrix
# Test finding a word in ['d','o','g']
word_set = {'a',
'do',
'dog',
'ogre',
'cat',
'ate',
'at'}
test(find_word_in_list(['a'], word_set), {'a'})
test(find_word_in_list(['d'], word_set), set())
test(find_word_in_list(['d', 'o'], word_set), {'do'})
test(find_word_in_list(['d', 'o', 'g'], word_set), {'do', 'dog'})
test(
find_word_in_list(['d', 'o', 'g', 'r', 'e'], word_set),
{'do',
'dog',
'ogre'}
)
# cate
test(
find_word_in_list(['c', 'a', 't', 'e'], word_set),
{'cat', 'at', 'ate', 'a'}
)
# # Test transpose
# test(rotate([[1]]), [[1]])
# test(rotate([[1, 2], [3, 4]]), [[1, 3], [2, 4]])
|
# -*- coding: utf-8 -*-
import wx
import numpy as np
import matplotlib
matplotlib.use("WXAgg")
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as NavigationToolbar
from matplotlib.ticker import MultipleLocator, FuncFormatter
import pylab
from matplotlib import pyplot
class MPL_Panel_base(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent=parent, id=-1)
self.Figure = matplotlib.figure.Figure(figsize=(4, 3))
self.axes = self.Figure.add_axes([0.1, 0.1, 0.8, 0.8])
self.FigureCanvas = FigureCanvas(self, -1, self.Figure)
self.NavigationToolbar = NavigationToolbar(self.FigureCanvas)
self.SubBoxSizer = wx.BoxSizer(wx.HORIZONTAL)
self.SubBoxSizer.Add(self.NavigationToolbar, proportion=0, border=2, flag=wx.ALL | wx.EXPAND)
self.TopBoxSizer = wx.BoxSizer(wx.VERTICAL)
self.TopBoxSizer.Add(self.SubBoxSizer, proportion=-1, border=2, flag=wx.ALL | wx.EXPAND)
self.TopBoxSizer.Add(self.FigureCanvas, proportion=-10, border=2, flag=wx.ALL | wx.EXPAND)
self.SetSizer(self.TopBoxSizer)
###方便调用
self.pylab = pylab
self.pl = pylab
self.pyplot = pyplot
self.numpy = np
self.np = np
self.plt = pyplot
def UpdatePlot(self):
'''''#修改图形的任何属性后都必须使用self.UpdatePlot()更新GUI界面 '''
self.FigureCanvas.draw()
def plot(self, *args, **kwargs):
'''''#最常用的绘图命令plot '''
self.axes.plot(*args, **kwargs)
self.UpdatePlot()
def semilogx(self, *args, **kwargs):
''''' #对数坐标绘图命令 '''
self.axes.semilogx(*args, **kwargs)
self.UpdatePlot()
def semilogy(self, *args, **kwargs):
''''' #对数坐标绘图命令 '''
self.axes.semilogy(*args, **kwargs)
self.UpdatePlot()
def loglog(self, *args, **kwargs):
''''' #对数坐标绘图命令 '''
self.axes.loglog(*args, **kwargs)
self.UpdatePlot()
def grid(self, flag=True):
''''' ##显示网格 '''
if flag:
self.axes.grid()
else:
self.axes.grid(False)
def title_MPL(self, TitleString="wxMatPlotLib Example In wxPython"):
''''' # 给图像添加一个标题 '''
self.axes.set_title(TitleString)
def xlabel(self, XabelString="X"):
''''' # Add xlabel to the plotting '''
self.axes.set_xlabel(XabelString)
def ylabel(self, YabelString="Y"):
''''' # Add ylabel to the plotting '''
self.axes.set_ylabel(YabelString)
def xticker(self, major_ticker=1.0, minor_ticker=0.1):
''''' # 设置X轴的刻度大小 '''
self.axes.xaxis.set_major_locator(MultipleLocator(major_ticker))
self.axes.xaxis.set_minor_locator(MultipleLocator(minor_ticker))
def yticker(self, major_ticker=1.0, minor_ticker=0.1):
''''' # 设置Y轴的刻度大小 '''
self.axes.yaxis.set_major_locator(MultipleLocator(major_ticker))
self.axes.yaxis.set_minor_locator(MultipleLocator(minor_ticker))
def legend(self, *args, **kwargs):
''''' #图例legend for the plotting '''
self.axes.legend(*args, **kwargs)
def xlim(self, x_min, x_max):
''' # 设置x轴的显示范围 '''
self.axes.set_xlim(x_min, x_max)
def ylim(self, y_min, y_max):
''' # 设置y轴的显示范围 '''
self.axes.set_ylim(y_min, y_max)
def savefig(self, *args, **kwargs):
''' #保存图形到文件 '''
self.Figure.savefig(*args, **kwargs)
def cla(self):
''' # 再次画图前,必须调用该命令清空原来的图形 '''
self.axes.clear()
self.Figure.set_canvas(self.FigureCanvas)
self.UpdatePlot()
def ShowHelpString(self, HelpString="Show Help String"):
''''' #可以用它来显示一些帮助信息,如鼠标位置等 '''
self.StaticText.SetLabel(HelpString)
################################################################
class MPL_Panel(MPL_Panel_base):
''''' #MPL_Panel重要面板,可以继承或者创建实例 '''
def __init__(self, parent):
MPL_Panel_base.__init__(self, parent=parent)
# 测试一下
self.FirstPlot()
# 仅仅用于测试和初始化,意义不大
def FirstPlot(self):
# self.rc('lines',lw=5,c='r')
self.cla()
x = np.arange(-5, 5, 0.25)
y = np.sin(x)
self.yticker(0.5, 0.1)
self.xticker(1.0, 0.2)
self.xlabel('X')
self.ylabel('Y')
self.title_MPL("图像")
self.grid()
self.plot(x, y, '--^g')
###############################################################################
# MPL_Frame添加了MPL_Panel的1个实例
###############################################################################
class MPL_Frame(wx.Frame):
"""MPL_Frame可以继承,并可修改,或者直接使用"""
def __init__(self, title="MPL_Frame Example In wxPython", size=(800, 500)):
wx.Frame.__init__(self, parent=None, title=title, size=size)
self.MPL = MPL_Panel_base(self)
# 创建FlexGridSizer
self.FlexGridSizer = wx.FlexGridSizer(rows=9, cols=1, vgap=5, hgap=5)
self.FlexGridSizer.SetFlexibleDirection(wx.BOTH)
self.RightPanel = wx.Panel(self, -1)
# 测试按钮1
self.Button1 = wx.Button(self.RightPanel, -1, "刷新", size=(100, 40), pos=(10, 10))
self.Button1.Bind(wx.EVT_BUTTON, self.Button1Event)
# 测试按钮2
self.Button2 = wx.Button(self.RightPanel, -1, "关于", size=(100, 40), pos=(10, 10))
self.Button2.Bind(wx.EVT_BUTTON, self.Button2Event)
# 加入Sizer中
self.FlexGridSizer.Add(self.Button1, proportion=0, border=5, flag=wx.ALL | wx.EXPAND)
self.FlexGridSizer.Add(self.Button2, proportion=0, border=5, flag=wx.ALL | wx.EXPAND)
self.RightPanel.SetSizer(self.FlexGridSizer)
self.BoxSizer = wx.BoxSizer(wx.HORIZONTAL)
self.BoxSizer.Add(self.MPL, proportion=-10, border=2, flag=wx.ALL | wx.EXPAND)
self.BoxSizer.Add(self.RightPanel, proportion=0, border=2, flag=wx.ALL | wx.EXPAND)
self.SetSizer(self.BoxSizer)
# 状态栏
self.StatusBar()
# MPL_Frame界面居中显示
self.Centre(wx.BOTH)
# 按钮事件,用于测试
def Button1Event(self, event):
self.MPL.cla() # 必须清理图形,才能显示下一幅图
x = np.arange(-10, 10, 0.25)
y = np.cos(x)
self.MPL.plot(x, y, '--*g')
self.MPL.xticker(2.0, 0.5)
self.MPL.yticker(0.5, 0.1)
self.MPL.title_MPL("MPL1")
self.MPL.ShowHelpString("You Can Show MPL Helpful String Here !")
self.MPL.grid()
self.MPL.UpdatePlot() # 必须刷新才能显示
def Button2Event(self, event):
self.AboutDialog()
# 打开文件,用于测试
def DoOpenFile(self):
wildcard = r"Data files (*.dat)|*.dat|Text files (*.txt)|*.txt|ALL Files (*.*)|*.*"
open_dlg = wx.FileDialog(self, message='Choose a file', wildcard=wildcard, style='')
if open_dlg.ShowModal() == wx.ID_OK:
path = open_dlg.GetPath()
try:
file = open(path, 'r')
text = file.read()
file.close()
except:
dlg = wx.MessageDialog(self, 'Error opening file\n')
dlg.ShowModal()
open_dlg.Destroy()
# 自动创建状态栏
def StatusBar(self):
self.statusbar = self.CreateStatusBar()
self.statusbar.SetFieldsCount(3)
self.statusbar.SetStatusWidths([-2, -2, -1])
# About对话框
def AboutDialog(self):
dlg = wx.MessageDialog(self,
'\twxMatPlotLib\t\nMPL_Panel_base,MPL_Panel,MPL_Frame and MPL2_Frame \n Created by Wu Xuping\n Version 1.0.0 \n 2012-02-01',
'About MPL_Frame and MPL_Panel', wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
###############################################################################
### MPL2_Frame添加了MPL_Panel的两个实例
###############################################################################
class MPL2_Frame(wx.Frame):
"""MPL2_Frame可以继承,并可修改,或者直接使用"""
def __init__(self, title="MPL2_Frame Example In wxPython", size=(850, 500)):
wx.Frame.__init__(self, parent=None, title=title, size=size)
self.BoxSizer = wx.BoxSizer(wx.HORIZONTAL)
self.MPL1 = MPL_Panel_base(self)
self.BoxSizer.Add(self.MPL1, proportion=-1, border=2, flag=wx.ALL | wx.EXPAND)
self.MPL2 = MPL_Panel_base(self)
self.BoxSizer.Add(self.MPL2, proportion=-1, border=2, flag=wx.ALL | wx.EXPAND)
self.RightPanel = wx.Panel(self, -1)
self.BoxSizer.Add(self.RightPanel, proportion=0, border=2, flag=wx.ALL | wx.EXPAND)
self.SetSizer(self.BoxSizer)
# 创建FlexGridSizer
self.FlexGridSizer = wx.FlexGridSizer(rows=9, cols=1, vgap=5, hgap=5)
self.FlexGridSizer.SetFlexibleDirection(wx.BOTH)
# 测试按钮1
self.Button1 = wx.Button(self.RightPanel, -1, "TestButton", size=(100, 40), pos=(10, 10))
self.Button1.Bind(wx.EVT_BUTTON, self.Button1Event)
# 测试按钮2
self.Button2 = wx.Button(self.RightPanel, -1, "AboutButton", size=(100, 40), pos=(10, 10))
self.Button2.Bind(wx.EVT_BUTTON, self.Button2Event)
# 加入Sizer中
self.FlexGridSizer.Add(self.Button1, proportion=0, border=5, flag=wx.ALL | wx.EXPAND)
self.FlexGridSizer.Add(self.Button2, proportion=0, border=5, flag=wx.ALL | wx.EXPAND)
self.RightPanel.SetSizer(self.FlexGridSizer)
# 状态栏
self.StatusBar()
# MPL2_Frame界面居中显示
self.Centre(wx.BOTH)
# 按钮事件,用于测试
def Button1Event(self, event):
self.MPL1.cla() # 必须清理图形,才能显示下一幅图
x = np.arange(-5, 5, 0.2)
y = np.cos(x)
self.MPL1.plot(x, y, '--*g')
self.MPL1.xticker(2.0, 1.0)
self.MPL1.yticker(0.5, 0.1)
self.MPL1.title_MPL("MPL1")
self.MPL1.ShowHelpString("You Can Show MPL1 Helpful String Here !")
self.MPL1.grid()
self.MPL1.UpdatePlot() # 必须刷新才能显示
self.MPL2.cla()
self.MPL2.plot(x, np.sin(x), ':^b')
self.MPL2.xticker(1.0, 0.5)
self.MPL2.yticker(0.2, 0.1)
self.MPL2.title_MPL("MPL2")
self.MPL2.grid()
self.MPL2.UpdatePlot()
def Button2Event(self, event):
self.AboutDialog()
# 自动创建状态栏
def StatusBar(self):
self.statusbar = self.CreateStatusBar()
self.statusbar.SetFieldsCount(3)
self.statusbar.SetStatusWidths([-2, -2, -1])
# About对话框
def AboutDialog(self):
dlg = wx.MessageDialog(self,
'\twxMatPlotLib\t\nMPL_Panel_base,MPL_Panel,MPL_Frame and MPL2_Frame \n Created by Wu Xuping\n Version 1.0.0 \n 2012-02-01',
'About MPL_Frame and MPL_Panel', wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
########################################################################
# 主程序测试
if __name__ == '__main__':
app = wx.App()
frame = MPL2_Frame()
# frame = MPL_Frame()
frame.Center()
frame.Show()
app.MainLoop() |
if __name__ == '__main__':
N = int(input())
lista = []
for i in range(0, N):
comando = input()
comando = comando.split()
if (comando[0] == "insert"):
lista.insert(int(comando[1]), int(comando[2]))
elif (comando[0] == "print"):
print(lista)
elif (comando[0] == "remove"):
lista.remove(int(comando[1]))
elif (comando[0] == "append"):
lista.append(int(comando[1]))
elif (comando[0] == "sort"):
lista.sort()
elif (comando[0] == "pop"):
lista.pop()
elif (comando[0] == "reverse"):
lista.reverse() |
#!/home/blue/tf2/bin/python
import os
import pandas as pd
from functools import partial
import numpy as np
import SimpleITK as sitk # to read nii files
from sklearn.model_selection import train_test_split
import pickle
import random
#================ Environment variables ================
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow import keras
import horovod.tensorflow.keras as hvd
#=================== Set up Horovod =================
# comment out this chunk of code if you train with 1 gpu
hvd.init()
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
#================= Build Data pipeline =================
def normalize(img):
mean = np.mean(img)
std = np.std(img)
img = (img - mean) / std
img = img.transpose()
return img
'''
def DataGenerator(file_list, y_list, shuffle_size, batch_size, random_seed = 42):
random.seed(random_seed)
def generator():
while True:
shuffle_pool = []
y_pool = []
for i in range(shuffle_size):
file, y = random.choice(list(zip(file_list, y_list)))
with open(file, 'rb') as f:
img = pickle.load(f)
img = normalize(img)
shuffle_pool.append(img)
y_pool.append(y)
while shuffle_pool:
j = 0
x_batch = []
y_batch = []
while j < batch_size and shuffle_pool:
x_batch.append(shuffle_pool.pop())
y_batch.append(y_pool.pop())
j += 1
yield np.array(x_batch), np.array(y_batch)
return generator
'''
def DatasetReader(file_list, y_list, shuffle_size, batch_size, random_seed = 42):
generator = DataGenerator(file_list, y_list, shuffle_size, batch_size, random_seed=random_seed)
dataset = tf.data.Dataset.from_generator(
generator,
output_types = (tf.float32, tf.float32),
output_shapes = (tf.TensorShape((batch_size, 53, 63, 52, 53)), tf.TensorShape((batch_size, 5)))
)
return dataset.prefetch(tf.data.experimental.AUTOTUNE)
class DataGenerator(keras.utils.Sequence):
def __init__(self, file_list, y_list, shape = (53, 63, 52, 53), batch_size = 32):
self.file_list = file_list
self.y_list = y_list
self.batch_size = batch_size
def __len__(self):
return int(np.floor(len(self.file_list) / self.batch_size))
def __getitem__(self, index):
batch_file = self.file_list[index*self.batch_size:(index+1)*self.batch_size]
batch_y = self.y_list[index*self.batch_size:(index+1)*self.batch_size]
x, y = self.__data_generation(batch_file, batch_y)
return x, y
def __data_generation(self, batch_file, batch_y):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros(shape = (self.batch_size, 53, 63, 52, 53), dtype = np.float32)
y = np.zeros(shape = (self.batch_size, 5), dtype = np.float32)
# Generate data
for i, (file, _y) in enumerate(zip(batch_file, batch_y)):
# Store sample
x = pickle.load(open(file, 'rb'))
for j in range(x.shape[0]):
mean = np.mean(x[j,])
std = np.std(x[j,])
if std == 0.0:
pass
else:
x[j,] = ( x[j,] - mean ) / std
x = x.transpose()
# Store class
X[i,] = x
y[i,] = _y
return X, y
DATA_PATH = "../fMRI_train_pk"
df = pd.read_csv("train_scores.csv")
df = df.dropna()
file_ls = []
y_ls = []
for _, row in df.iterrows():
file_ls.append(os.path.join(DATA_PATH, str(int(row["Id"]))+".pk"))
ys = [item for _, item in row.iteritems()]
y_ls.append(ys[1:])
y_ls = np.array(y_ls, dtype = np.float32)
train_f, test_f, train_label, test_label = train_test_split(
file_ls, y_ls, test_size = 0.3, random_state = 42
)
val_f, evl_f, val_label, evl_label = train_test_split(
test_f, test_label, test_size = 0.5, random_state = 42
)
BATCH_SIZE = 16
train_set = DataGenerator(train_f, train_label, BATCH_SIZE)
val_set = DataGenerator(val_f, val_label, BATCH_SIZE )
evl_set = DataGenerator(evl_f, evl_label, BATCH_SIZE )
#==================== Build model ====================
DefaultConv3D = partial(keras.layers.Conv3D, kernel_size=3, strides=(1,)*3,
padding="SAME", use_bias=True, kernel_regularizer = keras.regularizers.l2(0.01))
class ResidualUnit(keras.layers.Layer):
# separate construction and execution
# be aware of the strides' shape
def __init__(self, filters, strides=(1,)*3, activation="relu", **kwargs):
super().__init__(**kwargs)
self.activation = keras.activations.get(activation)
self.filters = filters
self.strides = strides
# a list a layers that can be iterated
self.main_layers = [
DefaultConv3D(self.filters, strides=self.strides, kernel_initializer="he_normal"),
keras.layers.BatchNormalization(),
self.activation,
DefaultConv3D(self.filters, strides=(1,)*3, kernel_initializer="he_normal"),
keras.layers.BatchNormalization()
]
self.skip_layers = []
if np.prod(self.strides) > 1:
#self.skip_layers = [keras.layers.MaxPool3D(pool_size=(2,)*3, strides=strides, padding="SAME")]
self.skip_layers = [
DefaultConv3D(self.filters, kernel_size=1, strides=self.strides, kernel_initializer="he_normal"),
keras.layers.BatchNormalization()
]
def call(self, inputs, **kwargs):
x = inputs
orig_x = inputs
for layer in self.main_layers:
x = layer(x) # f(x)
for layer in self.skip_layers:
orig_x = layer(orig_x)
return self.activation(x + orig_x)
def get_config(self):
config = super(ResidualUnit, self).get_config()
config.update({'filters': self.filters, 'strides':self.strides})
return config
filters = (16, 32, 64)
strides = (1, 2, 2)
#(1,1,1)
model = keras.models.Sequential()
model.add(keras.layers.Input(shape = (53, 63, 52, 53), dtype = tf.float32))
model.add(DefaultConv3D(filters[0], kernel_size=3, strides=(1,)*3,
input_shape=[53, 63, 52, 53], kernel_initializer="he_normal"))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Activation("relu"))
model.add(keras.layers.MaxPool3D(pool_size=(2,)*3, padding="SAME"))
for filter, stride in zip(filters[1:], strides[1:]):
model.add(ResidualUnit(filter, strides=(stride,)*3))
model.add(ResidualUnit(filter, strides=(1,)*3))
model.add(keras.layers.GlobalAvgPool3D())
model.add(keras.layers.Flatten()) # 128
model.add(keras.layers.Dense(16, activation="relu", kernel_regularizer = keras.regularizers.l2(0.002)))
#model.add(keras.layers.Dropout(0.5 ))
model.add(keras.layers.Dense(5))
optimizer = keras.optimizers.RMSprop(0.001)
'''
# set up Horovod
optimizer = hvd.DistributedOptimizer(optimizer)
'''
model.compile(loss="mse",
optimizer=optimizer,
metrics=["mse", "mae"],
experimental_run_tf_function=False)
#================== Configure Callbacks ==================
checkpoint_cb = keras.callbacks.ModelCheckpoint("./my_logs/First_try.h5",
monitor = 'val_loss', mode = 'min',
save_best_only=True
)
class PrintValTrainRatioCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
print("\nval/train: {:.2f} \n".format(logs["val_loss"] / logs["loss"]))
root_logdir = os.path.join(os.curdir, "./my_logs/First_try")
def get_run_logdir(comment=""):
import time
run_id = time.strftime("run_%Y_%m_%d-%H_%M_%S{}".format(comment))
return os.path.join(root_logdir, run_id)
run_logdir = get_run_logdir()
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
callbacks = [
hvd.callbacks.BroadcastGlobalVariablesCallback(0)
]
if hvd.rank() == 0:
callbacks.append(tensorboard_cb)
callbacks.append(checkpoint_cb)
#================== Training ==================
history = model.fit_generator(train_set, steps_per_epoch= 128 // BATCH_SIZE, epochs=300,
validation_data=val_set,
validation_steps=800 // BATCH_SIZE,
max_queue_size = 2,
workers = 2,
use_multiprocessing = True
) |
"""Managers for OAuth models"""
from __future__ import absolute_import
from readthedocs.privacy.loader import RelatedUserQuerySet
class RemoteRepositoryQuerySet(RelatedUserQuerySet):
pass
class RemoteOrganizationQuerySet(RelatedUserQuerySet):
pass
|
from django.shortcuts import get_object_or_404
from rest_framework import viewsets
from rest_framework.permissions import AllowAny
from rest_framework import mixins
from .models import View, Component
from .serializers import ComponentSerializer, ViewSerializer
class ComponentViewSet(mixins.RetrieveModelMixin, viewsets.GenericViewSet):
permission_classes = (AllowAny, )
queryset = Component.objects.all()
serializer_class = ComponentSerializer
def get_object(self):
queryset = self.filter_queryset(self.get_queryset())
view = self.kwargs.pop('view')
name = self.kwargs.pop('name')
return get_object_or_404(queryset, view__name=view, name=name)
class ViewViewSet(mixins.RetrieveModelMixin, viewsets.GenericViewSet):
permission_classes = (AllowAny, )
queryset = View.objects.all()
serializer_class = ViewSerializer
lookup_field = 'name'
|
import unittest
from katas.kyu_6.give_me_diamond import diamond
class DiamondTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(diamond(3), ' *\n***\n *\n')
def test_none(self):
self.assertIsNone(diamond(6))
def test_none_2(self):
self.assertIsNone(diamond(-1))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import allure
def test_attach_text():
allure.attach("这是一个纯文本", name="这是测试文本附件", attachment_type=allure.attachment_type.TEXT)
def test_attach_html():
allure.attach("<body>这是一段html块</body>", name="这是测试html附件", attachment_type=allure.attachment_type.HTML)
def test_attach_photo():
allure.attach.file("C:\\Users\\Administrator.USER-20150816GZ\\Desktop\\test.jpg", name="这是测试图片附件", attachment_type=allure.attachment_type.JPG)
|
import copy
from django.test import TestCase
from core.tests.test_helpers import ADDRESS_EXAMPLE_DATA
from custom_auth.forms import ClientUserCreationForm
from custom_auth.tests.test_helpers import create_client_example
from inquiry.forms import InquiryHomeForm, InquiryFirstForm, WizardClientUserCreationForm
from inquiry.tests.test_utils import INQUIRY_EXAMPLE_DATA
FIRST_FORM_EXAMPLE_DATA = dict(
ADDRESS_EXAMPLE_DATA, **{
'use_case_debts': True,
'use_case_diversify': True,
'use_case_renovate': True,
'use_case_education': False,
'use_case_buy_home': True,
'use_case_business': False,
'use_case_emergency': False,
'use_case_retirement': True,
'use_case_other': 'party',
"email": "test+client1@hometap.com",
}
)
HOME_FORM_EXAMPLE_DATA = {
'ten_year_duration_prediction': 'over_10',
'home_value': 1000000,
'primary_residence': 'True',
'property_type': 'sf',
'rent_type': 'no',
'household_debt': 500000,
}
class WizardClientUserCreationFormTests(TestCase):
def setUp(self):
self.data = {
"password1": "testpassword1",
"password2": "testpassword1",
"phone_number": "555-555-5555",
"sms_opt_in": "on",
"agree_to_terms": "on",
}
def test_is_valid_no_data(self):
form = WizardClientUserCreationForm(data={})
self.assertFalse(form.is_valid())
# the wizard user creation form doesn't require the email -- it gets it from the first step
for required_field in [f for f in ClientUserCreationForm.Meta.fields if f != 'email']:
self.assertEqual(form.errors[required_field], ["This field is required."])
def test_is_valid_valid_data(self):
form = WizardClientUserCreationForm(data=self.data)
self.assertTrue(form.is_valid())
def test_is_valid_phone_missing(self):
data = copy.deepcopy(self.data)
data.pop('phone_number')
form = WizardClientUserCreationForm(data=data)
self.assertFalse(form.is_valid())
class InquiryHomeFormTests(TestCase):
def test_is_valid_no_data(self):
form = InquiryHomeForm(data={})
self.assertFalse(form.is_valid())
for required_field in InquiryHomeForm.Meta.fields:
self.assertEqual(form.errors[required_field], ["This field is required."])
def test_is_valid(self):
form = InquiryHomeForm(data=INQUIRY_EXAMPLE_DATA)
self.assertTrue(form.is_valid())
def test_clean_true(self):
form = InquiryHomeForm(HOME_FORM_EXAMPLE_DATA)
self.assertTrue(form.is_valid())
class InquiryFirstFormTests(TestCase):
def test_is_valid_no_data(self):
form = InquiryFirstForm(data={})
self.assertFalse(form.is_valid())
for required_field in InquiryFirstForm.Meta.fields:
# use cases have their own test
if required_field in [
'referrer_name', 'use_case_debts', 'use_case_education', 'use_case_diversify',
'use_case_buy_home', 'use_case_renovate', 'use_case_business', 'use_case_emergency',
'use_case_retirement', 'use_case_other', 'notes'
]:
continue
self.assertEqual(form.errors[required_field], ["This field is required."])
def test_is_valid(self):
form = InquiryFirstForm(data=FIRST_FORM_EXAMPLE_DATA)
self.assertTrue(form.is_valid())
def test_is_valid_no_email(self):
data = copy.deepcopy(FIRST_FORM_EXAMPLE_DATA)
data.pop('email')
form = InquiryFirstForm(data=data)
self.assertFalse(form.is_valid())
def test_clean_email(self):
form = InquiryFirstForm(data=FIRST_FORM_EXAMPLE_DATA)
self.assertTrue(form.is_valid())
def test_clean_email_existing_user(self):
create_client_example()
form = InquiryFirstForm(data=FIRST_FORM_EXAMPLE_DATA)
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors['email'], ['User with this Email address already exists.'])
def test_no_use_case_selected(self):
""" Tests error when no use case selected or input """
data = copy.deepcopy(FIRST_FORM_EXAMPLE_DATA)
form = InquiryFirstForm(data=data)
self.assertTrue(form.is_valid())
for use_case in (
'use_case_debts',
'use_case_education',
'use_case_diversify',
'use_case_buy_home',
'use_case_renovate',
'use_case_business',
'use_case_emergency',
'use_case_retirement',
):
data[use_case] = False
data['use_case_other'] = ''
form = InquiryFirstForm(data=data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['use_case_other'], [
"This field is required if no other use cases are selected."
]
)
|
# Author:ambiguoustexture
# Date: 2020-02-05
from itertools import groupby
file = 'hightemp.txt'
lines = open(file).readlines()
items = list(line.split('\t')[0] for line in lines)
items.sort()
res = [(item, len(list(group))) for item, group in groupby(items)]
res.sort(key = lambda item : item[1], reverse = True)
for item in res:
print('{item}({count})'.format(item = item[0], count = item[1]))
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
题目:猴子吃桃问题:猴子第一天摘下若干个桃子,当即吃了一半,还不瘾,又多吃了一个
第二天早上又将剩下的桃子吃掉一半,又多吃了一个。
以后每天早上都吃了前一天剩下的一半零一个。
到第10天早上想再吃时,见只剩下一个桃子了。
求第一天共摘了多少。
程序分析:采取逆向思维的方法,从后往前推断。
"""
tmp = 1
print tmp
for i in range(1, 10, 1):
tmp = (tmp + 1) * 2
print tmp
|
from django.urls import path
from quotes.api.views import QuoteDetailAPIview, QuoteListCreateAPIView
urlpatterns = [
path("quotes/", QuoteListCreateAPIView.as_view(), name="quote-list"),
path("quotes/<int:pk>/", QuoteDetailAPIview.as_view(), name="quote-detail")
] |
"""Implementation of treadmill admin ldap CLI schema plugin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import codecs
import click
import pkg_resources
import six
from treadmill import cli
from treadmill import context
from treadmill import yamlwrapper as yaml
def init():
"""Schema CLI group"""
formatter = cli.make_formatter('ldap-schema')
@click.command(name='schema')
@click.option('-u', '--update', help='Refresh LDAP schema.', is_flag=True,
default=False)
@cli.admin.ON_EXCEPTIONS
def _schema(update):
"""View or update LDAP schema"""
if update:
context.GLOBAL.ldap.user = 'cn=Manager,cn=config'
utf8_reader = codecs.getreader('utf8')
schema_rsrc = utf8_reader(
pkg_resources.resource_stream('treadmill',
'/etc/ldap/schema.yml')
)
schema = yaml.load(stream=schema_rsrc)
context.GLOBAL.ldap.conn.update_schema(schema)
schema_obj = context.GLOBAL.ldap.conn.schema()
def dict_to_namevalue_list(item):
"""Translates name: value dict into [{name: $name, ...}]
"""
result = []
for pair in sorted(six.iteritems(item)):
entry = pair[1].copy()
entry.update(
{'name': pair[0]}
)
result.append(entry)
return result
schema_obj['attributeTypes'] = dict_to_namevalue_list(
schema_obj['attributeTypes'])
schema_obj['objectClasses'] = dict_to_namevalue_list(
schema_obj['objectClasses'])
cli.out(formatter(schema_obj))
return _schema
|
import os
import numpy as np
import json
import cv2
import torch
basicPath = os.sys.path[0]
trainPath = os.path.join(basicPath, "Classification\\Data\\Train")
testPath = os.path.join(basicPath, "Classification\\Data\\Test")
classList = ['i2', 'i4', 'i5', 'io', 'ip', 'p11', 'p23', 'p26', 'p5', 'pl30',
'pl40', 'pl5', 'pl50', 'pl60', 'pl80', 'pn', 'pne', 'po', 'w57']
dirs = os.listdir(trainPath)
data = []
label = []
class_idx = 0 # 类别编号
for d in dirs:
classPath = os.path.join(trainPath, d)
images = os.listdir(classPath)
data_temp = torch.zeros([len(images), 64, 64])
label_temp = torch.zeros(len(images))
i = 0
for image in images:
imagePath = os.path.join(classPath, image) # 处理训练集图片
img = cv2.imread(imagePath, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (64, 64))
img = torch.tensor(img)
data_temp[i] = img
# label_temp[i][class_idx] = 1 # 样本对应类别置为1
label_temp[i] = class_idx
i = i + 1
data.append(data_temp)
label.append(label_temp)
class_idx = class_idx + 1
train_data = data[0]
train_label = label[0]
for i in range(1, len(data)):
train_data = torch.cat((train_data, data[i]), 0)
train_label = torch.cat((train_label, label[i]), 0)
torch.save(train_data, 'train_data.pth')
torch.save(train_label, 'train_label.pth')
images = os.listdir(testPath)
test_data = torch.zeros([len(images), 64, 64])
i = 0
for image in images:
imagePath = os.path.join(testPath, image) # 处理训练集图片
img = cv2.imread(imagePath, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (64, 64))
img = torch.tensor(img)
test_data[i] = img
i = i + 1
torch.save(test_data, 'test_data.pth') |
from flask import render_template, flash, request
from flask_login import login_user, logout_user, current_user
from models.Modelos import Usuario
from flask_sqlalchemy import SQLAlchemy
from flask_login import login_required
import sys
db = SQLAlchemy()
'''
Función que se ejecuta al entrar a la página de inicio de sesión
"/usuario/iniciar_sesion". Se encarga de buscar al usuario en la base de datos
(dependiéndo de si es comprador o vendedor), comprobar que la contraseña sea
correcta y lo inicializa en la sesión.
'''
def iniciar_sesion():
if request.method == 'POST':
# checamos si ya hay un usuario en sesión
if current_user.is_authenticated:
# si es vendedor lo redireccionamos a la página correspondiente
if current_user.tipo:
return render_template('usuario/vendedor_principal.html')
# si es comprador, lo mandamos a la página principal de productos
else:
productos = db.engine.execute("""SELECT producto.correo_vendedor as correo_vendedor, producto.id_producto as id_producto,
nombre ,precio, cantidad, detalles, descripcion, estado, ruta
FROM producto
Left JOIN imagen
ON producto.id_producto = imagen.id_producto""")
return render_template('usuario/inicio_usuario.html', productos=productos)
# en otro caso, intentamos iniciar sesión
correo = request.form.get('correo')
contrasenia = request.form.get('contrasenia')
usuario = Usuario.query.filter_by(correo=correo).first()
# checamos si encontramos un usuario y las contraseñas coinciden
if usuario is not None and usuario.check_contrasenia(contrasenia):
login_user(usuario)
# si es vendedor lo redireccionamos a la página correspondiente
if usuario.tipo:
return render_template('usuario/vendedor_principal.html')
else:
productos = db.engine.execute("""SELECT producto.correo_vendedor as correo_vendedor, producto.id_producto as id_producto,
nombre ,precio, cantidad, detalles, descripcion, estado, ruta
FROM producto
Left JOIN imagen
ON producto.id_producto = imagen.id_producto""")
return render_template('usuario/inicio_usuario.html', productos=productos)
flash("Correo o contraseña incorrectos")
return render_template('usuario/iniciar_sesion.html', error=True)
else :
return render_template('usuario/iniciar_sesion.html', error=False)
'''
Función que se ejecuta al entrar a la página de cerrar sesión
"/usuario/cerrar_sesion". Se encarga de cerrar la sesión del usuario y
redireccionar a la página principal.
'''
@login_required
def cerrar_sesion():
logout_user()
return render_template('index.html')
# Método que redirecciona a la página de vendedor principal.
@login_required
def vendedor_principal():
return render_template('usuario/vendedor_principal.html')
# Método que redirecciona a la página de comprador principa
@login_required
def inicio_usuario():
productos = db.engine.execute("""SELECT producto.correo_vendedor as correo_vendedor, producto.id_producto as id_producto,
nombre ,precio, cantidad, detalles, descripcion, estado, ruta
FROM producto
Left JOIN imagen
ON producto.id_producto = imagen.id_producto""")
return render_template('usuario/inicio_usuario.html', productos=productos)
|
# -*- coding: utf-8 -*-
import sys
from c_answers import RunAnswer
from kivy.app import App
# kivy.require("1.9.1")
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import ObjectProperty, StringProperty
# NOTA !!!! cambiar la ruta para llamara el archivo de la vista
dialog_question = Builder.load_file('/views/questions.kv')
class Controller(BoxLayout):
# valores default de las vistas
label_1 = 'Grado poco: 1 ,2, 3;\n'
label_2 = 'Grado medio: 4;\n'
label_3 = 'Grado mucho: 5, 6, 7;'
label_leyenda = '[color=000000]' + label_1 + label_2 + label_3 +'[/color]'
question_no = 1
# solo puse el 25 por lo del ciclo, ya que el indice cero tiene la palabra cero
last_question = 25
__list_pregunta = []
__list_answer = []
__list_answer.append('nada')
respuesta_1 = 'grado 1'
respuesta_2 = 'grado 2'
respuesta_3 = 'grado 3'
respuesta_4 = 'grado 4'
respuesta_5 = 'grado 5'
botton_exit = 'Salir del programa'
botton_siguiente = 'Enviar Respuesta'
# lista de preguntas
__list_pregunta.append('nada, pregunta cero no existe')
__list_pregunta.append('1 >> Presto atencion al tema de los sentimientos ??')
__list_pregunta.append('2 >> Me preocupo mucho por lo que siento o dejo de sentir ??')
__list_pregunta.append('3 >> Normalmente dedico tiempo a pensar en mis emociones ?? ')
__list_pregunta.append('4 >> Pienso que merece la pena prestar atencion\na mis emociones y estados de animo ??')
__list_pregunta.append('5 >> Dejo que mis sentimientos afecten mis pensamientos ??')
__list_pregunta.append('6 >> Pienso en mi estado de animo constantemente ??')
__list_pregunta.append('7 >> A menudo pienso en mis sentimientos ??')
__list_pregunta.append('8 >> Presto mucha atencion a como me siento ??')
__list_pregunta.append('9 >> Tengo claros mis sentimientos ??')
__list_pregunta.append('10 >> Frecuentemente puedo definir mis sentimientos ??')
__list_pregunta.append('11 >> Casi siempre se como me siento ??')
__list_pregunta.append('12 >> Normalmente conosco lo que siento sobre alguna persona ??')
__list_pregunta.append('13 >> A menudo me doy cuenta de mis sentimientos en diferentes situaciones ??')
__list_pregunta.append('14 >> Siempre puedo decir con honestidad el como me siento ??')
__list_pregunta.append('15 >> A veces puedo decir cuales son mis emociones ??')
__list_pregunta.append('16 >> Puedo llegar a comprender mis sentimientos ??')
__list_pregunta.append('17 >> Aunque a veces me siento triste, suelo tener una vision optimista ??')
__list_pregunta.append('18 >> Aunque me sienta mal, procuro pensar en cosas agradables ??')
__list_pregunta.append('19 >> Cuando estoy triste pienso en todos los placeres de la vida ??')
__list_pregunta.append('20 >> Hago el intento de tener pensamientos positivos aunque me sienta mal ??')
__list_pregunta.append('21 >> Si doy demasiadas vueltas a las cosas, complicandolas.\nTrato de calmarme ??')
__list_pregunta.append('22 >> Me preocupo por tener un buen estado de animo ??')
__list_pregunta.append('23 >> Tengo mucha energia cuando me siento feliz ??')
__list_pregunta.append('24 >> Cuando estoy enfadado intento cambiar mi estado de animo ??')
# primera pregunta
pregunta = __list_pregunta[1]
answer = '0'
_list_questions = []
# objetos de tipo propiedad
label_warning = ObjectProperty()
dinamic_question = ObjectProperty()
def get_answer(self, grado):
self.answer = grado
if self.answer != '0':
self.label_warning.text = ''
def next_question(self):
if self.answer == '0':
self.label_warning.text = '[color=EFF552]Favor de elegir alguna de las opciones para continuar[/color]'
else:
# aumento el numero de la pregunta en la que vamos
self.question_no += 1
self.__list_answer.append(self.answer)
if self.question_no < self.last_question:
self.dinamic_question.text = self.__list_pregunta[self.question_no]
else:
print 'salimos de pantalla de preguntas ... '
#RunAnswer._construir(self.__list_answer)
salirPreguntas(self.__list_answer)
def exit_program(self):
sys.exit()
class SistemExpertoApp(App):
def build(self):
return Controller()
class Corre:
def __init__(self):
SistemExpertoApp().run()
def salirPreguntas(list_answer):
RunAnswer._construir(list_answer) |
# Programming project 1 - Zain Malik
#import math module
import math
# print statements that state purpose of this program
print("")
print("Welcome to Osprey car rentals.")
print("This program will make car rental calculations for you.\n")
print("At the prompts, please enter the following:")
print("\tYour customer classification code (a character: B (budget), D (daily), or W (weekly))")
print("\tThe number of days the vehicle was rented:")
print("\tOdometer reading at the start of the rental period:")
print("\tOdometer reading at the end of the rental period:\n")
print("")
#Ask if customer wants to continue and while loop for if customer wants to continue..
should_continue = input("Would you like to continue (Y/N) ?: ")
# loop for if customer wants to continue
while should_continue == "y" or should_continue == "Y":
#Ask customer for Classification Code
customer_code = input("\nCustomer code (B, D, or W): ")
# display error if correct classification code not given
while customer_code != "B" and customer_code != "D" and customer_code != "W"\
and customer_code != "b" and customer_code != "d" and customer_code != "w":
print("\n\t*** Invalid customer code. Try again. ***")
customer_code = input("\nCustomer code (B, D, or W): ")
#user needs to input number of days the car was rented
num_days = int(input("\nNumber of days: "))
#variables for odometer reading
#Ask customer for Odometer reading at start
raw_start_reading = input("Odometer reading at the start: ")
#Ask customer for odometer reading at end
raw_end_reading = input("Odometer reading at the end: ")
# other odometer variables
#Odometer reading has 6 digits and the last one is a 10th of a mile so
#100003 is 10000.3
#Divide raw reading by 10 to get actual start and end readings.
start_reading = int(raw_start_reading) /10
end_reading = int(raw_end_reading) / 10
# variables for week - used in Code W
num_weeks_fraction = num_days / 7
num_weeks_absolute = math.ceil(num_weeks_fraction)
if end_reading >= start_reading:
num_miles_driven = end_reading - start_reading
else:
#Because odometer readings only have 6 digits the number sometimes restarts so the reading can start as
#999997 and end as 000005
#this means it started as 99999.7 miles and restarted and ended as 00000.5 miles
#basically this means that user drove 0.8 miles (0.3 from start + 0.5)
#if end number is smaller we have to subtract to get how many miles to add to the end reading.
num_miles_driven = (100000 - start_reading) + end_reading
# Code B variables
# because the base charge is $40 per day
base_chargeB = 40 * num_days
# because the mileage charge is $ 0.25 for each mile driven.
mileage_charge1 = 0.25 * num_miles_driven
totalchargeB = base_chargeB + mileage_charge1
# Code D variables
# base charge for code d
base_chargeD = 60 * num_days
totalchargeD = base_chargeD
if num_miles_driven <= 100:
totalchargeD = base_chargeD
elif num_miles_driven > 100 * num_days:
excessmileage = (num_miles_driven/num_days) - 100
totalchargeD = base_chargeD + (0.25 * excessmileage * num_days)
# Code W variables
totalchargeW = 0
# base charge for code w
base_chargeW = 190 * num_weeks_absolute
if num_miles_driven <= 900:
totalchargeW = base_chargeW
elif 900 < num_miles_driven <= 1500:
totalchargeW = base_chargeW + (100 * num_weeks_absolute)
elif num_miles_driven > 1500:
totalchargeW = base_chargeW+(200 * num_weeks_absolute)+(num_miles_driven - (1500*num_weeks_absolute))*0.25
#variable for charge
totalchargeAll = 0
#charge if customer chose code b
if customer_code == "B" or customer_code == "b":
totalchargeAll = totalchargeB
# charge if customer chose code d
elif customer_code == "D" or customer_code == "d":
totalchargeAll = totalchargeD
# charge if customer chose w
elif customer_code == "W" or customer_code == "w":
totalchargeAll = totalchargeW
# print customer summary
print("")
print("Customer Summary:")
print("\tClassification Code: ",customer_code)
print("\trental period (days): ", num_days)
print("\todometer reading at start: ", raw_start_reading)
print("\todometer reading at end: ", raw_end_reading)
print("\tnumber of miles driven: ",format(num_miles_driven, '.1f'))
# print amount due in customer summary
print("Amount Due: $", format((totalchargeAll), '.2f'))
print("")
# ask customer if they want to continue again
should_continue = input("Would you like to continue (Y/N) ?: ")
# for if customer does not want to continue
else:
should_notcontinue = should_continue == "n" or should_continue == "N"
print("Farewell!")
|
'''
Francesco Giovanelli - March 2019
Utils for generator and discriminator networks
'''
import pandas as pd
import math
import numpy as np
import tensorflow as tf
import keras
from keras import backend as K
from keras.models import Model
from keras.layers import *
from keras.utils import to_categorical
from keras import optimizers
from keras.callbacks import TensorBoard
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from collections import Counter
import matplotlib.pylab as plt
import sys
import logging
from datetime import datetime
import generator
'''
Loads GENERATOR data from file and generates proper dataset (training + validation + test)
'''
def load_generator_data(training_data_file, test_data_file):
#load datasets from files
training_dataframe = pd.read_csv(training_data_file, delimiter="-", names = ["x", "y"])
test_dataframe = pd.read_csv(test_data_file, delimiter="-", names = ["x", "y"])
#TRAINING data
#convert single input column in multiple columns (one column for each 'bit' of status)
x_training_dataframe = training_dataframe['x'].apply(lambda x: pd.Series(list(x))).astype(dtype=np.float32)
#convert labels from string to array one-hot encoded
y_training_dataframe = training_dataframe['y'].apply(lambda y: pd.Series(list(y))).astype(dtype=np.float32)
#TEST data
#convert single input column in multiple columns (one column for each 'bit' of status)
x_test = test_dataframe['x'].apply(lambda x: pd.Series(list(x))).astype(dtype=np.float32)
#convert labels from string to array one-hot encoded
y_test = test_dataframe['y'].apply(lambda y: pd.Series(list(y))).astype(dtype=np.float32)
#split full training dataset into training (2/3) and validation sets (1/3)
x_train, x_val, y_train, y_val = train_test_split(x_training_dataframe, y_training_dataframe, test_size=1/3)
print("train, val, test set shapes:")
print(x_train.shape)
print(x_val.shape)
print(x_test.shape)
return x_train, y_train, x_val, y_val, x_test, y_test
'''
Loads DISCRIMINATOR data from file and generates proper dataset (training + validation + test)
Different from the original loader, beacause it uses two files where each file is a dataset containing both feasible and unfeasible solutions
'''
def load_discriminator_data_v2(train_dataset, test_dataset, validation=True):
# load datasets from files
train_df = pd.read_csv(train_dataset, delimiter="-", names = ["x", "y"])
test_df = pd.read_csv(test_dataset, delimiter="-", names = ["x", "y"])
# shuffle datasets
train_df = shuffle(train_df)
test_df = shuffle(test_df)
# convert single input column in multiple columns (one column for each 'bit' of status)
train_x_df = train_df['x'].apply(lambda x: pd.Series(list(x))).astype(dtype=np.float32)
# extract outputs as floats
train_y_df = train_df['y'].astype(dtype=np.float32)
# convert single input column in multiple columns (one column for each 'bit' of status)
x_test = test_df['x'].apply(lambda x: pd.Series(list(x))).astype(dtype=np.float32)
# extract outputs as floats
y_test = test_df['y'].astype(dtype=np.float32)
# print dataframes structure
print("Train DS and Test DS shapes:")
print(train_x_df.shape)
print(train_y_df.shape)
print(x_test.shape)
print(y_test.shape)
# validation flag off -> return only train & test sets
if validation == False:
return train_x_df, train_y_df, x_test, y_test
# split dataset into training and validation set
x_train, x_val, y_train, y_val = train_test_split(train_x_df, train_y_df, test_size=1/3)
print("Train, val, test set shapes:")
print(x_train.shape)
print(x_val.shape)
print(x_test.shape)
return x_train, y_train, x_val, y_val, x_test, y_test
'''
Loads discriminator data from file and generates a proper training set to use in a GAN network
'''
def load_GAN_data(dataset):
#load datasets from files
train_df = pd.read_csv(dataset, delimiter="-", names = ["x", "y"])
#shuffle datasets
#train_df = shuffle(train_df)
#convert single input column in multiple columns (one column for each 'bit' of status)
x_train = train_df['x'].apply(lambda x: pd.Series(list(x))).astype(dtype=np.float32)
return x_train
'''
Generates a training set for G, composed by noise mixed with sampled data from an existing dataset
A portion of the existing dataset is not used, and it is returned as test set for G
The full test set for G is returned as third element
'''
def create_noise_ds_generator(ds_feasible_gen, num_single_solutions):
print("Creating noise datasets for Generator network...")
feasible_ds = load_GAN_data(dataset=ds_feasible_gen)
feasible_ds_len = feasible_ds.shape[0]
num_feasible_samples = int(feasible_ds_len / 2)
# Split in two parts the DS with feasible solutions, then the first part it is used as training set
feasible_ds = shuffle(feasible_ds)
trainset_gen = feasible_ds[:num_feasible_samples]
testset_gen = feasible_ds[-num_feasible_samples:]
# create collections for feasible solutions based on the number of queens
feasible_solutions = {}
for idx, feas_sol in feasible_ds.iterrows():
num_q = int(np.sum(feas_sol))
feasible_solutions.setdefault(num_q, []).append(list(feas_sol))
# Generate noise
#noise = np.random.binomial(1, 0.065, (1000, 64)).astype(dtype=np.float32)
noise_list = []
for num_q in range(1,8):
# define number of solutions to produce for "num_q" queens
num_sol = num_single_solutions
# generate a smaller amount of solutions with 1 or 2 queens, to have lower total feasibility
if num_q == 1:
num_sol = int(num_single_solutions / 3)
elif num_q == 2:
num_sol = int(num_single_solutions / 2)
for num in range(num_sol):
sol = shuffle(np.array([1] * num_q + [0] * (64-num_q)))
noise_list.append(sol)
# Mix feasible data with noise, so that feasibility is almost equal for the different solution types (num. queens)
# Increase feasibility of solutions with more than 2 queens
for num_q in range(3,8):
count = 0
# define number of solutions to produce for "num_q" queens
num_sol = num_single_solutions
# sample less elements from solutions with 6 or 7 queens: there are less of these feasible solutions, so we don't want to use them all or exceed the total number of solutions available
if num_q == 6:
num_sol = int(num_single_solutions / 2)
elif num_q == 7:
num_sol = int(num_single_solutions / 2)
# solutions with more queens, thus low feasibility -> add feasible solutions
while count < num_sol:
noise_list.append(feasible_solutions[num_q][count])
count += 1
# create noise dataframe
noise = pd.DataFrame(noise_list, dtype='float32')
noise = shuffle(noise)
print("Noise set shape:")
print(noise.shape)
return noise, testset_gen, feasible_ds
'''
The goal is to evaluate the ability of G to create solutions starting from empty ones
Arguments:
generator (model): the generator model to evaluate
full_sol_df (dataframe): dataframe containing the full 92 solutions, derived from the 12 base sol. expanded
full_sol_trainset_df (dataframe): dataframe containing the full solutions for the train set, derived from the 8 base sol. of the training set expanded
num_solutions (int): number of full solutions that G needs to generate
'''
def stochastic_generation(generator, full_sol_file, full_sol_trainset_file, num_solutions=1000, num_queens_to_generate=8):
#load the 92 full solutions, for the N-Queens problem, into a dataframe
full_solutions_df = pd.read_fwf(full_sol_file, widths=[1] * 64, header=None)
#load the train set full solutions, expanded from the 8 base sol., into a dataframe
full_solutions_trainset_df = pd.read_fwf(full_sol_trainset_file, widths=[1] * 64, header=None)
# generate empty solutions (zeros in all positions)
empty_solutions = np.zeros(shape=(num_solutions, full_solutions_df.shape[1]), dtype=np.float32)
# collection of full solutions
full_solutions_generated = []
count = 0
# iterate on each empty solution
for empty_sol in empty_solutions:
#progress bar
perc = count / float(num_solutions)
sys.stdout.write("\rSolutions generation - progress: [{0:50s}] {1:.1f}%".format('#' * int(perc * 50), perc * 100))
# necessary to provide a single solution to G, for its prediction
sol = np.array([empty_sol,])
# counter for number of queens in current solution
num_queens_in_sol = 0
while num_queens_in_sol < num_queens_to_generate:
# get assignments probabilities from G
assignment_prob = generator.predict(sol)
# choose assignment randomly, based on G's probabilities in output
selected_assignment = np.random.choice(64, p=assignment_prob[0])
# check if solutions doesn't already have a queen in the assignment position:
# this is necessary even if G has a masking layer, because the outcome of G comes from the Softmax,
# so even if the Softmax receives values set to 0 from the masking layer, it can assign to it a small probability
if sol[0][selected_assignment] == 0:
# apply assignment on original solution
sol[0][selected_assignment] = 1
# update queens counter
num_queens_in_sol += 1
# solution is complete (8 queens): save it
full_solutions_generated.append(sol[0])
# for progress bar
count += 1
print()
print("Starting criterion evaluation...")
print("----------------------------")
print("1st criterion: n-Queens constraints (row, diagonal, column)")
print("2nd criterion: n-Queens constraints + bias (row, diagonal, column, no queen in central 2x2 square)")
print("3rd criterion: bias (no queen in central 2x2 square)")
print("----------------------------")
first_criterion_valid_count = 0
second_criterion_valid_count = 0
third_criterion_valid_count = 0
# reset counter prog. bar
count = 0
# solutions Series list
sol_collection = []
found = False
# eval 1st criterion (row, column, diagonal)
for full_sol in full_solutions_generated:
#progress bar
perc = count / float(num_solutions)
sys.stdout.write("\r1st criterion evaluation - progress: [{0:50s}] {1:.1f}%".format('#' * int(perc * 50), perc * 100))
full_sol_series = pd.Series(full_sol)
for sol_in_col in sol_collection:
if full_sol_series.equals(sol_in_col):
found = True
break
if not found:
sol_collection.append(full_sol_series)
found = False
# check if solution is valid for the 1st criterion
valid = check_single_solution_feasibility(full_sol_series, full_solutions_df)
if valid:
first_criterion_valid_count += 1
count += 1
# reset counter prog. bar
count = 0
print()
print("Num. unique assignments produced: %d" % len(sol_collection))
# eval 2nd criterion (row, column, diagonal + no queen in 2x2 central box)
for full_sol in full_solutions_generated:
#progress bar
perc = count / float(num_solutions)
sys.stdout.write("\r2nd criterion evaluation - progress: [{0:50s}] {1:.1f}%".format('#' * int(perc * 50), perc * 100))
full_sol_series = pd.Series(full_sol)
# check if solution is valid for the 2nd criterion
valid = check_single_solution_feasibility(full_sol_series, full_solutions_trainset_df)
if valid:
second_criterion_valid_count += 1
count += 1
# reset counter prog. bar
count = 0
print()
# eval 3rd criterion (no queen in 2x2 central box)
for full_sol in full_solutions_generated:
#progress bar
perc = count / float(num_solutions)
sys.stdout.write("\r3rd criterion evaluation - progress: [{0:50s}] {1:.1f}%".format('#' * int(perc * 50), perc * 100))
full_sol_arr = np.array(full_sol)
# check if solution doesn't have any queen in 2x2 central positions
if full_sol_arr[27] == 0 and full_sol_arr[28] == 0 and full_sol_arr[35] == 0 and full_sol_arr[36] == 0:
third_criterion_valid_count += 1
count += 1
print()
print("Generated solutions validity - 1st criterion: %.2f%%" % (first_criterion_valid_count / num_solutions * 100))
print("Generated solutions validity - 2nd criterion: %.2f%%" % (second_criterion_valid_count / num_solutions * 100))
print("Generated solutions validity - 3rd criterion: %.2f%%" % (third_criterion_valid_count / num_solutions * 100))
'''
Prints values of a tensor
Arguments:
tensor (tensor): a tensor
To print a tensor in generator/discriminator use:
my_utils.print_tensor(K.eval(tensor), tf.Session())
'''
def print_tensor(tensor, sess):
with sess.as_default():
print_op = tf.print(tensor, summarize=64)
with tf.control_dependencies([print_op]):
out = tf.add(tensor, tensor)
sess.run(out)
'''
Prints output tensors of a layer
Usage: x = my_utils.print_layer(x, "x=")
'''
def print_layer(layer, message, first_n=2, summarize=64):
return keras.layers.Lambda((
lambda x: tf.Print(x, [x],
message=message,
first_n=first_n,
summarize=summarize)))(layer)
'''
Cheks if a partial solutions for the N-Queens completion problem is feasible or not
by multiplying together all the 92 full solutions and the partial ones.
Arguments:
full_solutions_df: dataframe containing all the 92 full solutions
partial_sol: series containing a single partial solution
Returns:
feasible: True/False, based on the feasibility of the partial solution
'''
def check_single_solution_feasibility(partial_sol, full_solutions_df):
feasible = False
#count the total number of queens in the partial solution
queens_num_altered_sol = partial_sol[(partial_sol >= 0.9)].count()
#for every full solution, compare it with the "altered" partial solution by multiplying them together
#if the total number of queens (1s) for the partial solution changes, then the "altered" partial solution is unfeasible
for idx, full_sol in full_solutions_df.iterrows():
result_solution = partial_sol.multiply(full_sol)
queens_num_new_sol = result_solution[(result_solution >= 0.9)].count()
#check if number of queens is the same of the one in the altered partial solution
#if a match is found, the altered solution is feasible
if queens_num_new_sol == queens_num_altered_sol:
feasible = True
break
return feasible
'''
Performs predictions on a given dataframe
Then evaluates each prediction to check if it's feasible or unfeasible
Returns: feasibility rate for the given dataframe
'''
def check_solutions_feasibility(model, target_df, full_solutions_dataset):
#counter for feasible and unfeasible solutions
tot_unfeasible_count = 0
tot_feasible_count = 0
queens_count = Counter()
#dict for feasibility ratios of different number of filled cells
feas_ratios= {}
#length of dataframe for progress bar
size_df= len(target_df.index)
#counter for progress bar and prediction selection
count = 0
#counter for illegal queens (useful if G does not have the masking layer)
illegal_queens_count = Counter()
#load the 92 full solutions, for the N-Queens problem, into a dataframe
full_solutions_df = pd.read_fwf(full_solutions_dataset, widths=[1] * 64, header=None)
df_copy = target_df.copy(deep=True)
#predict assignments for all partial solutions provided
predictions = predict_proba_v2(model, df_copy)
#TRAINING data
for index, solution in df_copy.iterrows():
#progress bar
perc = count / float(size_df)
sys.stdout.write("\rFeasibility check - progress: [{0:50s}] {1:.1f}%".format('#' * int(perc * 50), perc * 100))
#get prediction corresponding to current solution
prediction = predictions[count]
#increase counter
count +=1
#count the total number of queens in the partial solution
queens_number = solution[(solution >= 0.9)].count()
#get argument with maximum value in prediction -> represents position of next assignment
selected_assignment = np.argmax(prediction)
if solution[selected_assignment] <= 0:
#apply assignment to a copy of the partial solution
solution[selected_assignment] = 1
else:
illegal_queens_count[str(queens_number)] += 1
tot_unfeasible_count += 1
queens_count["UF" + str(queens_number)] += 1
continue
#check if partial solution is feasible or not
is_feasible = check_single_solution_feasibility(solution, full_solutions_df)
if is_feasible:
tot_feasible_count += 1
queens_count["F" + str(queens_number)] += 1
else:
tot_unfeasible_count += 1
queens_count["UF" + str(queens_number)] += 1
for key, value in illegal_queens_count.items():
print("*** Queens assigned to a busy position - %s queens: %d ***" % (key, value))
#compute feasibility ratio
tot_solutions = tot_feasible_count + tot_unfeasible_count
feasibility_rate_global = (tot_feasible_count / tot_solutions)
print()
print("Feasibility ratio: %f" % feasibility_rate_global)
#compute feasibility ratio for different number of filled cells
for num_q in range(0,8):
feas_n = queens_count["F" + str(num_q)]
unfeas_n = queens_count["UF" + str(num_q)]
tot = feas_n + unfeas_n
print("Total n. of %d queens: %d" % (num_q, tot))
if tot != 0:
feas_ratio = (feas_n / tot)
else:
#not found any solution with num_q number of queens -> assume 1.0 ratio?
feas_ratio = 0
#add ratio to dict
feas_ratios[num_q] = feas_ratio
print("Feasibility ratio for %d queens: %f" % (num_q, feas_ratio))
print("------------------")
return feasibility_rate_global, feas_ratios
'''
Evaluates Generator and Full Generator (G+Lambda), to show how much the G can improve with the current Lambda layer
'''
def evaluate_generator_training_ability(generator, generator_full):
'''
testset_gener = load_GAN_data(dataset="DS_GENERATOR/DS.A.NEW.UNIQUES.B.4.txt", smoothing=False)
x_trains = load_GAN_data(dataset = "DS_GENERATOR/DS.A.NEW.UNIQUES.B.4.txt", smoothing=False)
y_trains = load_GAN_data(dataset = "DS_GAN/DS.FEASIBLE.UNIQUES.B.txt", smoothing=False)
# split dataset into training and validation set
x_train, x_val, y_train, y_val = train_test_split(x_trains, y_trains, test_size=1/3)
'''
x_train = load_GAN_data(dataset = "DS_GENERATOR/DS.A.NEW.UNIQUES.B.4.txt", smoothing=True)
y_train = np.ones(shape=(x_train.shape[0], 1))
#evaluate G
# check feasibility of assignments produced by the generator
feasibility_rate, feas_ratios = check_solutions_feasibility(generator, pd.DataFrame(x_train[:1000]), "DS.FULL.SOLUTIONS.txt")
#evaluate FULL G
### TODO: create proper method
#feasibility_rate, feas_ratios = check_solutions_feasibility_fullg(generator_full, pd.DataFrame(testset_gener[:1000]), "DS.FULL.SOLUTIONS.txt")
loss, acc = generator_full.evaluate(x_train, y_train)
print("G loss: %f, G acc: %f" % (loss, acc))
monitor_func = TensorBoard(log_dir='./Risultati/tensorboard_data',
histogram_freq=1, batch_size=32, write_graph=False, write_grads=True, write_images=False, update_freq='epoch')
generator_full.fit(x_train, y_train, validation_data=(x_train[:200], y_train[:200]), epochs=5, callbacks=[monitor_func])
loss, acc = generator_full.evaluate(x_train, y_train)
print("G loss: %f, G acc: %f" % (loss, acc))
#evaluate G
# check feasibility of assignments produced by the generator
feasibility_rate, feas_ratios = check_solutions_feasibility(generator, pd.DataFrame(x_train[:1000]), "DS.FULL.SOLUTIONS.txt")
#evaluate FULL G
### TODO: create proper method
#feasibility_rate, feas_ratios = check_solutions_feasibility_fullg(generator_full, pd.DataFrame(testset_gener[:1000]), "DS.FULL.SOLUTIONS.txt")
'''
Plots feasibility ratios for training, validation, and test sets in a single plot
'''
def plot_feasibility_ratios(feas_ratios_train, feas_ratios_val, feas_ratios_test):
plt.xlabel("Num. filled cells")
plt.ylabel("Feas. ratio")
#Training
lists_tr = sorted(feas_ratios_train.items()) # sorted by key, return a list of tuples
x_tr, y_tr = zip(*lists_tr) # unpack a list of pairs into two tuples
plt.plot(x_tr, y_tr, label="training", c="blue")
#Validation
lists_val = sorted(feas_ratios_val.items()) # sorted by key, return a list of tuples
x_val, y_val = zip(*lists_val) # unpack a list of pairs into two tuples
plt.plot(x_val, y_val, label="validation", c="gold")
#Test
lists_te = sorted(feas_ratios_test.items()) # sorted by key, return a list of tuples
x_te, y_te = zip(*lists_te) # unpack a list of pairs into two tuples
plt.plot(x_te, y_te, label="test", c="red")
plt.legend()
plt.grid()
plt.ylim(ymin=0)
plt.show()
'''
Creates logger for GAN network, that prints both on console and on file
File has timestamp as its name
'''
def create_logger_GAN():
current_datetime = datetime.now().strftime('%Y%m%d-%H.%M')
logging.basicConfig(level=logging.DEBUG,
format='%(message)s',
datefmt='%m-%d %H:%M',
filename="Risultati/Logfile GAN/GAN_test-" + current_datetime + ".txt",
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
return logging
"""
Enhanced prediction method for SVM, takes in consideration also missing class in Y train set
"""
def predict_proba_v2(svm, dataset):
# explicitly set the possible class labels, otherwise SVM will not output prediction for missing classes
all_classes = np.array([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,
21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,
41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,
61,62,63])
probs = svm.predict_proba(dataset)
proba_ordered = np.zeros((probs.shape[0], all_classes.size), dtype=np.float)
sorter = np.argsort(all_classes) # http://stackoverflow.com/a/32191125/395857
idx = sorter[np.searchsorted(all_classes, svm.classes_, sorter=sorter)]
proba_ordered[:, idx] = probs
return proba_ordered
'''
Test masking Lambda layer
'''
def test_masking():
#creation of simulated INPUT values
val_in = np.array([[0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1]])
#conversion to tensor
inputs = tf.convert_to_tensor(val_in, dtype=tf.float32)
#creation of simulated PARTIAL OUTPUT values
val_out = np.array([list(range(64)), list(range(64))])
#conversion to tensor
par_outputs = tf.convert_to_tensor(val_out, dtype=tf.float32)
#test masking of generator
masked_out = generator.masking([inputs, par_outputs])
masked_out = print_layer(masked_out, "masked_out=")
'''
Test merging Lambda layer
'''
def test_merging():
#creation of simulated INPUT values
val_in = np.array([[0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1]])
#conversion to tensor
inputs = tf.convert_to_tensor(val_in, dtype=tf.float32)
#creation of simulated PARTIAL OUTPUT values
assignments = np.array([[0.45, 0.3, 0.0005, 0, 0, 0.6, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0.5, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 2.7, 0, 0, 0, 0, 0,
0, 1.4, 0, 0, 2.2, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0,
0, 0.55, 0, 0, 0, 0, 0.3, 0,
0, 0, 3.1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0.1, 0,
0, 0, 0, 2.5, 0, 0, 0, 0,
3.7, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0.00064, 0, 0, 0, 0]])
#conversion to tensor
assignments_tensor = tf.convert_to_tensor(assignments, dtype=tf.float32)
#test masking of generator
merged = merging_test([inputs, assignments_tensor])
print_tensor(K.eval(merged), tf.Session()) |
""" Пользователь вводит строку из нескольких слов, разделённых пробелами.
Вывести каждое слово с новой строки. Строки необходимо пронумеровать.
Если в слово длинное, выводить только первые 10 букв в слове. """
users_str = input('Введите строку из нескольких слов, рахделенных пробелами: ')
tmp = users_str.split(' ')
for itm in tmp:
print(f'{itm:.10}')
|
# Copyright (c) 2018 Amdocs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkg_resources
import yaml
def get_definition_list():
""" Get API Definition from YAML files. """
api_def = []
definition_dir = __name__[:__name__.rfind(".")]
for f in pkg_resources.resource_listdir(definition_dir, '.'):
if f.endswith(".yaml"):
with pkg_resources.resource_stream(definition_dir, f) as fd:
# TODO(xiaohhui): Should add exception handler to inform user
# of potential error.
api_def.append(yaml.safe_load(fd))
return api_def
|
__author__ = 'Justin'
import googlemaps
from random import choice
import geojson
import networkx as nx
import math
from datetime import datetime
import gmapsKeys
from geopy.distance import vincenty as latlondist
# Load API Keys
APIkeys = gmapsKeys.keys('keys.txt','keyusages.txt','keydates.txt',2500)
key = APIkeys.getKey()
APIkeys.updateKey(key,10)
APIkeys.saveKeys()
print('Given Key',key)
# Load Network from File
fh=open("OSMIntersections.gexf",'rb')
G = nx.read_gexf(fh)
fh.close
print('Number of Graph Nodes',len(G.nodes()))
print('Number of Graph Edges',len(G.edges()))
# Set Network Edge Weights
# for edgetuple in G.edges():
# G[edgetuple[0]][edgetuple[1]]['weight'] = G[edgetuple[0]][edgetuple[1]]['basetime']
# Find k Unique Routes
k = 5
distancelimit = 3
lons = nx.get_node_attributes(G,'lon')
lats = nx.get_node_attributes(G,'lat')
nodesdist = 0
while(nodesdist < distancelimit):
randomnodes = [choice(G.nodes()),choice(G.nodes())]
origin = randomnodes[0]
destination = randomnodes[1]
nodesdist = latlondist([lats[origin],lons[origin]],[lats[destination],lons[destination]]).miles
print('Source:',[lats[randomnodes[0]],lons[randomnodes[0]]])
print('Destination',[lats[randomnodes[1]],lons[randomnodes[1]]])
H = G # Make copy of Network
subgraphnodes = []
paths = []
times = []
for i in range(0,k,1):
path = nx.shortest_path(H,source = randomnodes[0],target = randomnodes[1],weight = 'weight')
pathsubgraph = G.subgraph(path)
time = nx.shortest_path_length(pathsubgraph,path[0],path[-1],weight = 'basetime')
times.append(time)
paths.append(path)
subgraphnodes.extend(path)
mid = int(math.floor(len(path)/2))
delta = int(math.floor(len(path)/6))
H.remove_nodes_from(path[mid-delta:mid+delta])
# Print Paths
for path,time in zip(paths,times):
print(path)
print('Path Time:',time,'(s)')
# Export Routes (geoJSON format)
for index, path in enumerate(paths):
Features = []
print('Path'+str(index),path)
for node in path:
Features.append(geojson.Feature(geometry=geojson.Point((lons[node], lats[node]))))
Collection = geojson.FeatureCollection(Features)
dump = geojson.dumps(Collection)
string = 'Path'+str(index)+'.txt'
text_file = open(string, "w")
text_file.write(dump)
text_file.close()
# Sample Waypoints from Paths (max 23 allowed)
numwaypoints = 3
sampledpaths = []
for path in paths:
if len(path)>numwaypoints:
newpath = []
for i in range(1,len(path),len(path)/numwaypoints):
string = ('via:'+'%.5f' % lats[path[i]]) + ',' + ('%.5f' % lons[path[i]])
newpath.append(string)
if ~(destination in newpath):
string = ('%.5f' % lats[destination]) + ',' + ('%.5f' % lons[destination])
newpath.append(string)
string = ('%.5f' % lats[origin]) + ',' + ('%.5f' % lons[origin])
newpath.insert(0,string)
sampledpaths.append(newpath)
# Analyze Paths
# Find Congestion and Distances for Each Path
Pathbasetimes = []
Pathfulltimes = []
Pathtraffictimes = []
Pathlengths = []
gmaps = googlemaps.Client(key='AIzaSyAVf9cLmfR52ST0VZcFsf-L-HynMTCzZEM')
now = datetime.now()
for path in sampledpaths:
origin = path[0]
destination = path[-1]
waypoints = path[1:-1]
directions_result = gmaps.directions(origin,
destination,
mode="driving", waypoints = waypoints,
departure_time=now, traffic_model = 'best_guess')
# traveltime = directions_result[0]['legs'][0]['duration']['value']
fulltime = directions_result[0]['legs'][0]['duration_in_traffic']['value'] #time in seconds
# traffictime = fulltime-traveltime #time in seconds
totaldistance = directions_result[0]['legs'][0]['distance']['value'] #distance in meters
# Pathbasetimes.append(traveltime)
# Pathtraffictimes.append(traffictime)
Pathfulltimes.append(fulltime)
Pathlengths.append(totaldistance)
later = datetime(now.year,now.month,now.day+1,4,0)
for path in sampledpaths:
origin = path[0]
destination = path[-1]
waypoints = path[1:-1]
directions_result = gmaps.directions(origin,
destination,
mode="driving", waypoints = waypoints,
departure_time=later, traffic_model = 'best_guess')
basetime = directions_result[0]['legs'][0]['duration_in_traffic']['value'] #time in seconds
Pathbasetimes.append(basetime)
for full,base in zip(Pathfulltimes,Pathbasetimes):
traffictime = full-base
Pathtraffictimes.append(traffictime)
print('Times:',Pathbasetimes)
print('Times due to traffic:', Pathtraffictimes)
print('Path Distance:',Pathlengths)
|
import random
import cal_time
ls = list(range(100000))
random.shuffle(ls)
@cal_time.run_time
def shell_sort(ls):
d=len(ls)//2
while d>=1:
for i in range(d,len(ls)):
tmp=ls[i]
while i-d>=0 and tmp<ls[i-d]:
ls[i]=ls[i-d]
i-=d
ls[i]=tmp
d//=2
shell_sort(ls)
|
from django.contrib import admin
from django.urls import path
from .views import *
app_name = "home"
urlpatterns = [
path('',HomeView.as_view(),name = 'home'),
path('product/<slug>',ProductDetailView.as_view(),name = 'product'),
path('search', SearchView.as_view(), name='search'),
path('category/<slug>', CategoryView.as_view(), name='category'),
path('brand/<name>', BrandView.as_view(), name='brand'),
path('signup',register,name = 'signup'),
path('signin',signin,name = 'signin'),
path('mycart',ViewCart.as_view(),name = 'mycart'),
path('add-to-cart/<slug>',cart,name = 'add-to-cart'),
path('delete-cart/<slug>',deletecart,name = 'delete-cart'),
path('delete-single-cart/<slug>',delete_single_cart,name = 'delete-single-cart'),
] |
def corrections(x):
if x > 0:
return '{} is more than zero.'.format(x)
return '{} is equal to or less than zero.'.format(x)
'''
Correct this code so that it takes one argument, x, and returns "x is more than zero"
if x is positive (and nonzero), and otherwise, returns "x is equal to or less than zero."
In both cases, replace x with the actual value of x.
'''
|
from tkinter import *
from tkinter import ttk
from PIL import Image, ImageTk #import Image, ImageTk
root = Tk()
content = ttk.Frame(root, padding=(3,3,12,12))
imageFrame = ttk.Frame(content, borderwidth=5, relief="sunken", width=200, height=100)
imagePILPath = '/home/yuanchueh/Documents/git/measureFromImage/car.png'
imagePIL = Image.open(imagePILPath)
image = ImageTk.PhotoImage(imagePIL)
canvas = Canvas(imageFrame, height=200, width=200)
# basewidth = 150
# wpercent = (basewidth / float(image.size[0]))
# hsize = int((float(image.size[1]) * float(wpercent)))
# print(hsize)
# image = image.resize((basewidth, hsize), Image.ANTIALIAS)
# photo = ImageTk.PhotoImage(image)
# item4 = canvas.create_image(100, 80, image=photo)
# imagePIL = ImageTk.PhotoImage(Image.open(file))
canvas.create_image(0,0,image=image,anchor="nw")
canvas.config(scrollregion=canvas.bbox(ALL))
namelbl = ttk.Label(content, text="Name")
name = ttk.Entry(content)
onevar = BooleanVar()
twovar = BooleanVar()
threevar = BooleanVar()
onevar.set(True)
twovar.set(False)
threevar.set(True)
one = ttk.Checkbutton(content, text="One", variable=onevar, onvalue=True)
two = ttk.Checkbutton(content, text="Two", variable=twovar, onvalue=True)
three = ttk.Checkbutton(content, text="Three", variable=threevar, onvalue=True)
ok = ttk.Button(content, text="Okay")
cancel = ttk.Button(content, text="Cancel")
content.grid(column=0, row=0, sticky=(N, S, E, W))
canvas.grid(column=0, row=0, columnspan=3, rowspan=2, sticky=(N, S, E, W))
# canvas.grid(row=0, column=0, sticky=N+S+E+W)
namelbl.grid(column=3, row=0, columnspan=2, sticky=(N, W), padx=5)
name.grid(column=3, row=1, columnspan=2, sticky=(N, E, W), pady=5, padx=5)
one.grid(column=0, row=3)
two.grid(column=1, row=3)
three.grid(column=2, row=3)
ok.grid(column=3, row=3)
cancel.grid(column=4, row=3)
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
content.columnconfigure(0, weight=3)
content.columnconfigure(1, weight=3)
content.columnconfigure(2, weight=3)
content.columnconfigure(3, weight=1)
content.columnconfigure(4, weight=1)
content.rowconfigure(1, weight=1)
root.mainloop()
|
# Generated by Django 3.0.6 on 2020-05-07 10:32
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={},
),
migrations.RemoveField(
model_name='post',
name='created',
),
migrations.RemoveField(
model_name='post',
name='published',
),
migrations.RemoveField(
model_name='post',
name='slug',
),
migrations.AddField(
model_name='post',
name='created_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='post',
name='published_date',
field=models.DateTimeField(blank=True, null=True),
),
]
|
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic.base import TemplateView
from django.utils.decorators import method_decorator
from django.shortcuts import render_to_response, RequestContext
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-02-01 15:50
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Alcohol',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alcohol_day', models.CharField(blank=True, max_length=4)),
('alcohol_week', models.FloatField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='BikeStats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avg_speed', models.FloatField(blank=True, null=True)),
('avg_power', models.FloatField(blank=True, null=True)),
('avg_speed_per_mile', models.FloatField(blank=True, null=True)),
('avg_cadence', models.FloatField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='ExerciseAndReporting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('workout_easy_hard', models.CharField(blank=True, choices=[('easy', 'Easy'), ('medium', 'Medium'), ('hard', 'Hard')], max_length=10)),
('workout_type', models.CharField(blank=True, choices=[('trademil run', 'Trademil Run'), ('outdoor run', 'Outdoor Run'), ('bike', 'Bike'), ('swim', 'Swim'), ('elliptical', 'Elliptical')], max_length=20)),
('workout_time', models.CharField(blank=True, max_length=10)),
('workout_location', models.TextField(blank=True)),
('workout_duration', models.CharField(blank=True, max_length=10)),
('maximum_elevation_workout', models.IntegerField(blank=True, null=True)),
('minutes_walked_before_workout', models.CharField(blank=True, max_length=10)),
('distance_run', models.FloatField(blank=True, null=True)),
('distance_bike', models.FloatField(blank=True, null=True)),
('distance_swim', models.FloatField(blank=True, null=True)),
('distance_other', models.FloatField(blank=True, null=True)),
('pace', models.CharField(blank=True, max_length=10)),
('avg_heartrate', models.TextField(blank=True)),
('avg_exercise_heartrate', models.FloatField(blank=True, null=True)),
('elevation_gain', models.IntegerField(blank=True, null=True)),
('elevation_loss', models.IntegerField(blank=True, null=True)),
('effort_level', models.PositiveIntegerField(blank=True, null=True)),
('dew_point', models.FloatField(blank=True, null=True)),
('temperature', models.FloatField(blank=True, null=True)),
('humidity', models.FloatField(blank=True, null=True)),
('temperature_feels_like', models.FloatField(blank=True, null=True)),
('wind', models.FloatField(blank=True, null=True)),
('hrr', models.CharField(blank=True, max_length=10)),
('hrr_start_point', models.IntegerField(blank=True, null=True)),
('hrr_beats_lowered', models.IntegerField(blank=True, null=True)),
('sleep_resting_hr_last_night', models.IntegerField(blank=True, null=True)),
('vo2_max', models.FloatField(blank=True, null=True)),
('running_cadence', models.IntegerField(blank=True, null=True)),
('nose_breath_prcnt_workout', models.FloatField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('water_consumed_workout', models.FloatField(blank=True, null=True)),
('chia_seeds_consumed_workout', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(20)])),
('fast_before_workout', models.CharField(blank=True, choices=[('yes', 'Yes'), ('no', 'No')], max_length=3)),
('pain', models.CharField(blank=True, choices=[('yes', 'Yes'), ('no', 'No')], max_length=3)),
('pain_area', models.TextField(blank=True)),
('stress_level', models.CharField(blank=True, choices=[('low', 'low'), ('medium', 'medium'), ('high', 'high')], max_length=6)),
('sick', models.CharField(blank=True, choices=[('yes', 'Yes'), ('no', 'No')], max_length=3)),
('drug_consumed', models.CharField(blank=True, choices=[('yes', 'Yes'), ('no', 'No')], max_length=3)),
('drug', models.TextField(blank=True)),
('medication', models.TextField(blank=True)),
('smoke_substance', models.CharField(blank=True, choices=[('yes', 'Yes'), ('no', 'No')], max_length=3)),
('exercise_fifteen_more', models.CharField(blank=True, choices=[('yes', 'Yes'), ('no', 'No')], max_length=3)),
('workout_elapsed_time', models.CharField(blank=True, max_length=10)),
('timewatch_paused_workout', models.CharField(blank=True, max_length=10)),
('exercise_consistency', models.FloatField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(7)])),
('heartrate_variability_stress', models.IntegerField(blank=True, null=True)),
('fitness_age', models.IntegerField(blank=True, null=True)),
('workout_comment', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='Food',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prcnt_non_processed_food', models.FloatField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('non_processed_food', models.TextField(blank=True)),
('processed_food', models.TextField(blank=True)),
('diet_type', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='Grades',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('overall_health_grade', models.CharField(blank=True, choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ('F', 'F'), ('N/A', 'N/A')], max_length=3)),
('overall_health_gpa', models.FloatField(blank=True, null=True)),
('movement_non_exercise_steps_grade', models.CharField(blank=True, choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ('F', 'F'), ('N/A', 'N/A')], max_length=3)),
('movement_consistency_grade', models.CharField(blank=True, choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ('F', 'F'), ('N/A', 'N/A')], max_length=3)),
('avg_sleep_per_night_grade', models.CharField(blank=True, choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ('F', 'F'), ('N/A', 'N/A')], max_length=3)),
('avg_sleep_per_night_gpa', models.FloatField(blank=True, null=True)),
('exercise_consistency_grade', models.CharField(blank=True, choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ('F', 'F'), ('N/A', 'N/A')], max_length=3)),
('exercise_consistency_score', models.FloatField(blank=True, null=True)),
('overall_workout_grade', models.CharField(blank=True, choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ('F', 'F'), ('N/A', 'N/A')], max_length=3)),
('overall_workout_gpa', models.FloatField(blank=True, null=True)),
('workout_duration_grade', models.CharField(blank=True, choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ('F', 'F'), ('N/A', 'N/A')], max_length=3)),
('workout_duration_gpa', models.FloatField(blank=True, null=True)),
('workout_effortlvl_grade', models.CharField(blank=True, choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ('F', 'F'), ('N/A', 'N/A')], max_length=3)),
('workout_effortlvl_gpa', models.FloatField(blank=True, null=True)),
('avg_exercise_hr_grade', models.CharField(blank=True, choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ('F', 'F'), ('N/A', 'N/A')], max_length=3)),
('avg_exercise_hr_gpa', models.FloatField(blank=True, null=True)),
('prcnt_unprocessed_food_consumed_grade', models.CharField(blank=True, choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ('F', 'F'), ('N/A', 'N/A')], max_length=3)),
('alcoholic_drink_per_week_grade', models.CharField(blank=True, choices=[('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ('F', 'F'), ('N/A', 'N/A')], max_length=3)),
('sleep_aid_penalty', models.FloatField(blank=True, null=True)),
('ctrl_subs_penalty', models.FloatField(blank=True, null=True)),
('smoke_penalty', models.FloatField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Sleep',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sleep_per_wearable', models.CharField(blank=True, max_length=10)),
('sleep_per_user_input', models.CharField(blank=True, max_length=10)),
('sleep_aid', models.CharField(blank=True, choices=[('yes', 'Yes'), ('no', 'No')], max_length=3)),
('sleep_bed_time', models.CharField(blank=True, max_length=20)),
('sleep_awake_time', models.CharField(blank=True, max_length=20)),
('deep_sleep', models.CharField(blank=True, max_length=10)),
('light_sleep', models.CharField(blank=True, max_length=10)),
('awake_time', models.CharField(blank=True, max_length=10)),
('sleep_comments', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='Steps',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('non_exercise_steps', models.PositiveIntegerField(blank=True, null=True)),
('exercise_steps', models.PositiveIntegerField(blank=True, null=True)),
('total_steps', models.PositiveIntegerField(blank=True, null=True)),
('floor_climed', models.PositiveIntegerField(blank=True, null=True)),
('movement_consistency', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='SwimStats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pace_per_100_yard', models.FloatField(blank=True, null=True)),
('total_strokes', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='UserQuickLook',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateField()),
('updated_at', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='swimstats',
name='user_ql',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='swim_stats_ql', to='quicklook.UserQuickLook'),
),
migrations.AddField(
model_name='steps',
name='user_ql',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='steps_ql', to='quicklook.UserQuickLook'),
),
migrations.AddField(
model_name='sleep',
name='user_ql',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='sleep_ql', to='quicklook.UserQuickLook'),
),
migrations.AddField(
model_name='grades',
name='user_ql',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='grades_ql', to='quicklook.UserQuickLook'),
),
migrations.AddField(
model_name='food',
name='user_ql',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='food_ql', to='quicklook.UserQuickLook'),
),
migrations.AddField(
model_name='exerciseandreporting',
name='user_ql',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='exercise_reporting_ql', to='quicklook.UserQuickLook'),
),
migrations.AddField(
model_name='bikestats',
name='user_ql',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='bike_stats_ql', to='quicklook.UserQuickLook'),
),
migrations.AddField(
model_name='alcohol',
name='user_ql',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='alcohol_ql', to='quicklook.UserQuickLook'),
),
migrations.AlterUniqueTogether(
name='userquicklook',
unique_together=set([('user', 'created_at')]),
),
]
|
#!/usr/local/bin/python3
import os
import re
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from download import app
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
if len(sys.argv) > 1:
cmd = sys.argv[1]
if cmd == 'test' or cmd == 'once':
sys.exit(app.run_once())
sys.exit(app.main()) |
#In Python, you can use a built-in function "reversed",
# which will return a new list with reversed data, or call a .reverse() method,
# which will modify the original list and return it.
a=["apple","bat","cat"]
print(a)
c=a.reverse()
print(a)
|
s = input()
if s.count('1') % 2 == 0:
s += '0'
else:
s += '1'
print(s) |
# -*- coding: utf-8 -*-
"""Module responsible for setting up and handling API endpoints"""
from geopy.distance import great_circle
from flask import abort, jsonify, make_response
from flask_restful import Resource, reqparse, fields, marshal
from tour.database import db
from tour.extensions import auth
from tour.public.models import Point
from tour.user.models import User
point_fields = {
'name': fields.String,
'category': fields.String,
'public': fields.Boolean,
'latitude': fields.String,
'longitude': fields.String,
}
point_fields_near = {
'name': fields.String,
'category': fields.String,
'public': fields.Boolean,
'latitude': fields.String,
'longitude': fields.String,
'distance': fields.String
}
@auth.verify_password
def verify_password(username, password):
"""Verify user password"""
user = User.query.filter_by(username=username).first()
if not user or not user.check_password(password):
return False
return True
@auth.error_handler
def unauthorized():
"""Unauthorized handler"""
return make_response(jsonify({'message': 'Unauthorized access'}), 403)
class PointsListAPI(Resource):
"""API resource to handle endpoints and http methods"""
decorators = [auth.login_required]
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('name', type=str, required=True,
help='No point name provided', location='json')
self.reqparse.add_argument('category', type=str, required=True,
help='No point category provided', location='json')
self.reqparse.add_argument('public', type=bool, required=False, location='json')
self.reqparse.add_argument('latitude', type=str, required=False, location='json')
self.reqparse.add_argument('longitude', type=str, required=False, location='json')
super(PointsListAPI, self).__init__()
def get(self):
"""GET method handler"""
query = Point.query.all()
points = []
for point in query:
point = {
'name': point.name,
'category': point.category,
'public': point.public,
'latitude': point.latitude,
'longitude': point.longitude
}
points.append(point)
return {'points': [marshal(point, point_fields) for point in points]}
def post(self):
"""POST method handler"""
args = self.reqparse.parse_args()
name = args['name'],
category = args['category'],
public = args['public'] if args['public'] is not None else False
latitude = args['latitude'] if args['latitude'] is not None else '0'
longitude = args['longitude'] if args['longitude'] is not None else '0'
point = Point(name=name,
category=category,
public=public,
latitude=latitude,
longitude=longitude)
db.session.add(point)
db.session.commit()
point = {
'name': name[0],
'category': category[0],
'public': public,
'latitude': latitude,
'longitude': longitude
}
return {'point': marshal(point, point_fields)}, 201
class PointsAPI(Resource):
"""API resource to handle endpoints and http methods by point id"""
decorators = [auth.login_required]
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('name', type=str, location='json')
self.reqparse.add_argument('category', type=str, location='json')
self.reqparse.add_argument('public', type=bool, location='json')
self.reqparse.add_argument('latitude', type=int, location='json')
self.reqparse.add_argument('longitude', type=int, location='json')
super(PointsAPI, self).__init__()
def get(self, id):
"""GET method handler point by id"""
query = Point.query.filter_by(id=id).first()
if query is not None:
point = {
'name': query.name,
'category': query.category,
'public': query.public,
'latitude': query.latitude,
'longitude': query.longitude
}
else:
abort(404)
return {'point': marshal(point, point_fields)}
def delete(self, id):
"""DELETE method handler point by id"""
query = Point.query.filter_by(id=id).first()
if query is not None:
db.session.delete(query)
db.session.commit()
else:
abort(404)
return {'result': True}
class PointsNearAPI(Resource):
"""
API resource to handle endpoints and http methods to find nearby points
given a location.
"""
decorators = [auth.login_required]
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('latitude', type=str, required=True, location='json')
self.reqparse.add_argument('longitude', type=str, required=True, location='json')
super(PointsNearAPI, self).__init__()
def post(self):
"""POST method handler for a given location"""
max_distance = 5.0
args = self.reqparse.parse_args()
latitude = args['latitude']
longitude = args['longitude']
points = []
query = Point.query.all()
current_location = (float(latitude), float(longitude))
for point in query:
target = (float(point.latitude), float(point.longitude))
distance = great_circle(current_location, target).kilometers
if distance <= max_distance:
point = {
'name': point.name,
'category': point.category,
'public': point.public,
'latitude': latitude,
'longitude': longitude,
'distance': distance
}
points.append(point)
return {'points': [marshal(point, point_fields_near) for point in points]}
|
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
import sklearn.model_selection
data = pd.read_csv('/data-out/abalone.csv')
data.replace({'Sex': {'M': 1, 'F': -1, 'I': 0}}, inplace=True)
y = data['Rings']
X = data.drop(columns=['Rings'])
anw = ''
for k in range(1, 51):
clf = RandomForestRegressor(random_state=1, n_estimators=k)
gener = sklearn.model_selection.KFold(n_splits=5, random_state=42, shuffle=True)
clf.fit(X, y)
accuracy = np.mean(sklearn.model_selection.cross_val_score(clf, cv=gener, X=X, y=y, scoring='r2'))
if round(accuracy, 2) > 0.52:
print(accuracy, k)
anw = str(k)
break
with open('/data-out/RandomForestCLF.txt', 'w') as f:
f.write(str(anw))
f.close()
|
import os
from PIL import Image
import numpy as np
import random
import re
import tensorflow as tf
from augmentationHelper import get_random_augment
SPLIT_FACTOR = "$"
def image_name(image_path):
regex = ".*[\\/|\\\](.*)[\\/|\\\](.*).jpg"
m = re.match(regex, image_path)
return m.group(1) + "_" + m.group(2)
def read_image(path, resize_image=(), augment=False):
image = Image.open(path, 'r')
if image.mode != 'RGB':
image = image.convert('RGB')
if augment:
image = get_random_augment(image, resize_image)
if len(resize_image) > 0:
image = image.resize(resize_image, Image.NEAREST)
image = np.array(image).astype(np.float32)
return image
def read_dataset_map(data_map_path, shuffle=False):
with open(data_map_path, "r") as lf:
lines_list = lf.read().splitlines()
if shuffle:
random.shuffle(lines_list)
lines = [line.split(SPLIT_FACTOR) for line in lines_list]
images, labels = [], []
if len(lines) > 0:
images, labels = zip(*lines)
labels = [int(label) for label in labels]
return images, np.array(labels).astype(np.int)
class DataLoader:
def __init__(self, name, dataset_file, cls_num, input_size, output_path, augment=False):
self.classes_num = cls_num
self.input_size = input_size
self.augment = augment
self.name = name
self.output_path = output_path
self.paths_logger = []
self.labels_logger = []
self.batch_idx = 0
self.datasets = read_dataset_map(dataset_file, shuffle=True)
unique_labels = np.unique(self.datasets[1])
assert len(unique_labels) == cls_num
new_labels = np.arange(0, len(unique_labels))
self.labels_map = dict(zip(unique_labels, new_labels))
print(self.labels_map)
self.batches_idx = 0
self.epochs = 0
def read_batch_with_details(self, batch_size):
all_paths, all_labels = self.datasets
# takes the next batch, if it finish the epoch it'll start new epoch
indices = list(range(self.batches_idx, min(self.batches_idx + batch_size, len(all_paths))))
if len(indices) < batch_size: # new epoch
self.batches_idx = 0
rest = batch_size - len(indices)
indices += list(range(self.batches_idx, min(self.batches_idx + rest, len(all_paths))))
self.epochs += 1
self.batches_idx += batch_size
batch_images = np.zeros((batch_size, self.input_size[0], self.input_size[1], 3))
paths = []
labels = []
b_idx = 0
for i in indices:
batch_images[b_idx, :, :, :] = read_image(all_paths[i], self.input_size, augment=self.augment)
paths.append(all_paths[i])
labels.append(self.labels_map[all_labels[i]])
b_idx += 1
hot_vecs = tf.keras.utils.to_categorical(np.array(labels), num_classes=self.classes_num)
return batch_images, hot_vecs, paths, labels
def read_batch(self, batch_size):
batch_images, hot_vecs, paths, labels = self.read_batch_with_details(batch_size)
self.paths_logger += paths
self.labels_logger += labels
return batch_images, hot_vecs
def __del__(self):
with open(os.path.join(self.output_path, "{}.txt".format(self.name)), 'w') as f:
for i in range(len(self.paths_logger)):
f.write("{}{}{}\n".format(self.paths_logger[i], SPLIT_FACTOR, self.labels_logger[i]))
|
#ipconfig getifaddr en0 get just my local ip for mac
#hostname -I get just my local ip for linux
import subprocess
import time
HOST_AND_HOSTNAME = {}
HOST_AND_MAC = {}
CURRENT_IP = ""
GATWAY = ""
def get_local_ip():
global CURRENT_IP
global GATWAY
out = subprocess.Popen(['ipconfig','getifaddr','en0'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout,stderr = out.communicate()
CURRENT_IP = str(stdout.decode()).strip()
GATWAY = CURRENT_IP.replace(CURRENT_IP.split('.')[-1], '1')
def get_nmap_ping_scan():
get_local_ip()
host = CURRENT_IP.replace(CURRENT_IP.split('.')[-1], '1')+'/24'
global HOST_AND_HOSTNAME
hosts = []
batcmd="nmap -T5 -sn "+host+" | grep 'Nmap'"
interfaces = []#this will hold the interfaces found
out = subprocess.Popen(batcmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(.5)
stdout,stderr = out.communicate()# gets output and errors
for x in stdout.decode().split('\n'):#loops through each line of the decoded output
if "Nmap" in x:
hosts.append(host)
try:
#print(x.split('for ')[1])
parser = x.split('for ')[1]
if '(' in parser:# if it has a hostname
HOST_AND_HOSTNAME[parser.split(' (')[0]]=parser.split(' (')[1][:-1]#adds {hostname:ip}
else:
HOST_AND_HOSTNAME['no host name']=parser#if no hostname give it a value of not host name
except:
pass
def get_mac(ip):
batcmd="arp -n "+ip
mac = []#this will hold the interfaces found
out = subprocess.Popen(batcmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout,stderr = out.communicate()
mac = [x for x in stdout.decode().split(' ') if ':' in x]#list comprehention to make a list of macs
HOST_AND_MAC[ip]=mac
#get_mac('192.168.86.37')
|
# JTSK-350112
# raise_exc.py
# Taiyr Begeyev
# t.begeyev@jacobs-university.de
# three different exception classes derived from Exception
class OwnException1(Exception): pass
class OwnException2(Exception): pass
class OwnException3(Exception): pass
def something(choice):
"""
Throw exceptions depending on the value of choice
"""
if choice == 1:
raise OwnException1(999)
elif choice == 2:
raise OwnException2("Error has happened")
elif choice == 3:
raise OwnException3(1.23)
def main():
# call the function 5 times
try:
something(4)
something(5)
something(1)
something(2)
something(3)
except OwnException1 as exc:
print(exc)
except OwnException2 as exc:
print(exc)
except OwnException3 as exc:
print(exc)
else:
print("No exception occured")
finally:
print("Execution is finished")
main() |
import os
# basedir = os.path.abspath(os.path.dirname(__file__))
# /home/mushcat/webnotebook
class config:
"""docstring for config"""
SECRET_KEY = os.environ.get('SECRET_KEY') or "r4Nd0mS5cRe7"
SQLALCHEMY_TRACK_MODIFICATIONS = True
REMEMBER_COOKIE_DURATION = 86400
@staticmethod
def init_app(app):
pass
class DevConfig(config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///data-dev.db'
class TestConfig(config):
TESTING = True
class Production(config):
"""docstring for production"""
pass
config = {
'development': DevConfig,
'testing': TestConfig,
'production': Production,
'default': DevConfig
} |
DATE_FMT = "%Y-%m-%d"
YEAR_FMT = "%Y"
START_DATE_HELP_STRING = "The start date for the data download in format 'YYYY-MM-DD'"
END_DATE_HELP_STRING = "The end date for the data download in format 'YYYY-MM-DD'"
FILE_PATH_HELP_STRING = "The file path to store the downloaded data. Must include filename and .pkl file extension."
GRID_MODEL_HELP_STRING = (
"A string supported by powersimdata.input.grid.Grid.SUPPORTED_MODELS"
)
GRID_MODEL_DEFAULT = "usa_tamu"
REGION_CHOICES = ["Texas", "Eastern", "Western"]
|
"""This is a substantially improved version of the older Interpreter.py demo
It creates a simple GUI JPython console window with simple history
as well as the ability to interupt running code (with the ESC key).
Like Interpreter.py, this is still just a demo, and needs substantial
work before serious use.
"""
class C:
if a<1:
a=1
return a
# Essentially static class variables
# ignore the newline above me
# blort
# foo
protocol_version = "HTTP/1.0"
|
from Cython.Build import cythonize
from distutils.core import setup
setup(name="Hello world app",
ext_modules=cythonize("hello_world.pyx")
) |
#!/usr/bin/env python
# this is modified csdata.py
from __future__ import print_function
import fastjet as fj
import fjcontrib
import fjext
import fjtools
import tqdm
import argparse
import os
import numpy as np
import array
import copy
import random
import uproot
import pandas as pd
import time
from pyjetty.mputils import logbins
from pyjetty.mputils import MPBase
from pyjetty.mputils import BoltzmannEvent
from pyjetty.mputils import CEventSubtractor
from pyjetty.mputils import RTreeWriter
from pyjetty.mputils import DataIO, DataBackgroundIO
from pyjetty.mputils import fill_tree_data, JetAnalysis, JetAnalysisWithRho
from pyjetty.mputils import ColorS, pwarning, perror, pinfo, pdebug
from alice_efficiency import AliceChargedParticleEfficiency
import ROOT
ROOT.gROOT.SetBatch(True)
class EmbeddingOutput(MPBase):
def __init__(self, **kwargs):
self.configure_from_args(args=None)
super(EmbeddingOutput, self).__init__(**kwargs)
self.copy_attributes(self.args)
self.outf = None
self.sd = fjcontrib.SoftDrop(0, self.sd_zcut, self.jetR)
def initialize_output(self, output_name=None):
if output_name:
if self.output_filename != output_name:
self.output_filename = output_name
if self.outf:
if self.outf.GetName() != self.output_filename:
pinfo('closing output file', self.outf.GetName())
self.outf.Write()
self.outf.Close()
self.outf = None
else:
return True
if self.outf is None:
self.outf = ROOT.TFile(self.output_filename, 'recreate')
self.outf.cd()
self.tdet = ROOT.TTree('tdet', 'tdet')
self.twdet = RTreeWriter(tree=self.tdet, name='Output Tree detector level pp simulation')
self.tpp = ROOT.TTree('tpp', 'tpp')
self.twpp = RTreeWriter(tree=self.tpp, name='Output Tree pp simulation')
self.th = ROOT.TTree('th', 'th')
self.twh = RTreeWriter(tree=self.th, name='Output Tree pp simulation embedded into PbPb')
pinfo('new output file', self.outf.GetName())
def close(self):
if self.outf:
pinfo('closing output file', self.outf.GetName())
self.outf.Write()
self.outf.Close()
self.outf = None
def _fill_det_level(self, jet):
self.twdet.fill_branch('pt_det', jet.pt())
self.twdet.fill_branch('pt_phi', jet.phi())
self.twdet.fill_branch('pt_eta', jet.eta())
def fill_det_level(self, iev=-1, jets=[]):
if len(jets) > 0:
self.twdet.fill_branch('iev', iev)
_tmp = [self._fill_det_level(j) for j in jets]
self.twdet.fill_tree()
def fill_pp_pairs(self, iev, jm):
self.twpp.fill_branch('iev', iev)
self.twpp.fill_branch('det', jm[0])
self.twpp.fill_branch('part', jm[1])
self.twpp.fill_branch('dpt', jm[0].pt() - jm[1].pt())
self.twpp.fill_branch('dR', jm[0].delta_R(jm[1]))
self.twpp.fill_tree()
def fill_emb_3(self, iev, jm):
# pdebug('@fill: jm[0]', jm[0], 'jm[1]', jm[1], 'jm[2]', jm[2])
self.twh.fill_branch('iev', iev)
self.twh.fill_branch('det', jm[0])
self.twh.fill_branch('part', jm[1])
self.twh.fill_branch('hybr', jm[2])
self.twh.fill_branch('dpt_pp', jm[0].pt() - jm[1].pt())
self.twh.fill_branch('dpt_emb', jm[2].pt() - jm[0].pt())
self.twh.fill_branch('dR_pp', jm[0].delta_R(jm[1]))
self.twh.fill_branch('dR_emb', jm[0].delta_R(jm[2]))
sd0 = self.sd.result(jm[0])
self.twh.fill_branch('sd_det', sd0)
sd0_pe1 = fj.PseudoJet()
sd0_pe2 = fj.PseudoJet()
sd0_has_parents = sd0.has_parents(sd0_pe1, sd0_pe2)
self.twh.fill_branch('sd_det_p1', sd0_pe1)
self.twh.fill_branch('sd_det_p2', sd0_pe2)
sdi0 = fjcontrib.get_SD_jet_info(sd0)
self.twh.fill_branch('sd_det_zg', sdi0.z)
self.twh.fill_branch('sd_det_Rg', sdi0.dR)
sd1 = self.sd.result(jm[1])
self.twh.fill_branch('sd_part', sd1)
sd1_pe1 = fj.PseudoJet()
sd1_pe2 = fj.PseudoJet()
sd1_has_parents = sd1.has_parents(sd1_pe1, sd1_pe2)
self.twh.fill_branch('sd_part_p1', sd1_pe1)
self.twh.fill_branch('sd_part_p2', sd1_pe2)
sdi1 = fjcontrib.get_SD_jet_info(sd1)
self.twh.fill_branch('sd_part_zg', sdi1.z)
self.twh.fill_branch('sd_part_Rg', sdi1.dR)
sd2 = self.sd.result(jm[2])
self.twh.fill_branch('sd_emb', sd2)
sd2_pe1 = fj.PseudoJet()
sd2_pe2 = fj.PseudoJet()
sd2_has_parents = sd2.has_parents(sd2_pe1, sd2_pe2)
self.twh.fill_branch('sd_emb_p1', sd2_pe1)
self.twh.fill_branch('sd_emb_p2', sd2_pe2)
sdi2 = fjcontrib.get_SD_jet_info(sd2)
self.twh.fill_branch('sd_emb_zg', sdi2.z)
self.twh.fill_branch('sd_emb_Rg', sdi2.dR)
m02_1 = -1
m02_2 = -1
if sd0_has_parents and sd2_has_parents:
m02_1 = fjtools.matched_pt(sd2_pe1, sd0_pe1)
m02_2 = fjtools.matched_pt(sd2_pe2, sd0_pe2)
self.twh.fill_branch('sd_det_emb_mpt1', m02_1)
self.twh.fill_branch('sd_det_emb_mpt2', m02_2)
self.twh.fill_branch('sd_det_split', sd0_has_parents)
self.twh.fill_branch('sd_part_split', sd1_has_parents)
self.twh.fill_branch('sd_emb_split', sd2_has_parents)
self.twh.fill_tree()
# make it a class
class Embedding(MPBase):
def add_arguments_to_parser(parser):
parser.add_argument('-o', '--output-filename', default="output.root", type=str)
parser.add_argument('datalistAA', help='run through a file list', default='', type=str)
parser.add_argument('simulationpp', help='run through a file list', default='', type=str)
parser.add_argument('--jetR', default=0.4, type=float)
parser.add_argument('--alpha', default=0, type=float)
parser.add_argument('--dRmax', default=0.25, type=float)
parser.add_argument('--sd-zcut', default=0.1, type=float)
parser.add_argument('--overwrite', help="overwrite output", default=False, action='store_true')
parser.add_argument('--benchmark', help='benchmark pthat setting - 80 GeV', default=False, action='store_true')
parser.add_argument('--jetptcut', help='remove jets below the cut', default=1.e-3, type=float)
parser.add_argument('--nev', help='number of events to run', default=0, type=int)
parser.add_argument('--max-eta', help='max eta for particles', default=0.9)
def __init__(self, **kwargs):
self.configure_from_args(tree_name='tree_Particle', tree_name_gen='tree_Particle_gen', args=None)
super(Embedding, self).__init__(**kwargs)
self.copy_attributes(self.args)
self.jet_def = fj.JetDefinition(fj.antikt_algorithm, self.jetR)
if self.benchmark:
self.jet_selector = fj.SelectorPtMin(80.0) & fj.SelectorPtMax(100.0) & fj.SelectorAbsEtaMax(self.max_eta - 1.05 * self.jetR)
# jet_selector_cs = fj.SelectorPtMin(50.0) & fj.SelectorAbsEtaMax(max_eta - 1.05 * self.jetR)
else:
self.jet_selector = fj.SelectorAbsEtaMax(self.max_eta - 1.05 * self.jetR)
self.parts_selector = fj.SelectorAbsEtaMax(self.max_eta)
self.output = EmbeddingOutput(args=self.args)
# self.output.copy_attributes(self)
self.sd = fjcontrib.SoftDrop(0, self.sd_zcut, self.jetR)
self.ja_part = JetAnalysis( jet_R=self.jetR, jet_algorithm=fj.antikt_algorithm,
jet_pt_min=5., particle_eta_max=self.max_eta)
self.ja_det = JetAnalysis( jet_R=self.jetR, jet_algorithm=fj.antikt_algorithm,
jet_pt_min=self.jetptcut, particle_eta_max=self.max_eta)
self.ja_hybrid = JetAnalysis( jet_R=self.jetR, jet_algorithm=fj.antikt_algorithm,
jet_pt_min=5., particle_eta_max=self.max_eta)
self.dataPbPb = DataBackgroundIO( name='Data PbPb', file_list=self.datalistAA)
self.det_sim = DataIO( name='Sim Pythia Detector level', file_list=self.simulationpp, random_file_order=False)
self.part_sim = DataIO( name='Sim Pythia Particle level', file_list=self.simulationpp, random_file_order=False,
tree_name='tree_Particle_gen')
self.cs = None
if self.dRmax > 0:
self.cs = CEventSubtractor( alpha=self.alpha, max_distance=self.dRmax, max_eta=self.max_eta,
bge_rho_grid_size=0.25, max_pt_correct=100)
def run(self):
# need to change this for data to drive...
delta_t = 0
start_t = time.time()
iev = 1
# while self.det_sim.load_event() and self.part_sim.load_event():
while self.det_sim.load_event():
iev = iev + 1
if self.nev > 0:
if iev > self.nev:
iev = iev - 1
break
if iev % 1000 == 0:
delta_t = time.time() - start_t
pinfo('processing event', iev, ' - ev/sec =', iev/delta_t, 'elapsed =', delta_t)
# find jets on detector level
if len(self.det_sim.particles) < 1:
pwarning(iev, 'event skipped N detector parts', len(self.det_sim.particles))
continue
self.ja_det.analyze_event(self.det_sim.particles)
_jets_det = self.ja_det.jets
# _x = [pdebug(' -d ', j) for j in _jets_det]
if len(_jets_det) < 1:
continue
_too_high_pt = [p.pt() for j in _jets_det for p in j.constituents() if p.pt() > 100.]
if len(_too_high_pt) > 0:
pwarning(iev, 'a likely fake high pT particle(s)', _too_high_pt, '- skipping whole event')
continue
_output_fname = os.path.expanduser(os.path.expandvars(self.det_sim.file_io.file_input))
_output_fname = _output_fname.replace("/", "_")
self.output.initialize_output(_output_fname)
self.output.fill_det_level(iev, _jets_det)
# load the corresponding event on particle level
self.part_sim.open_afile(afile=self.det_sim.file_io.file_input)
if not self.part_sim.load_event_with_loc(self.det_sim.event.run_number, self.det_sim.event.ev_id, 0):
perror('unable to load partL event run#:', self.det_sim.event.run_number, 'ev_id:', self.det_sim.event.ev_id)
continue
if self.det_sim.event.run_number != self.part_sim.event.run_number:
perror('run# missmatch detL:', self.det_sim.event.run_number, 'partL:', self.part_sim.event.run_number)
continue
if self.det_sim.event.ev_id != self.part_sim.event.ev_id:
perror('ev_id# missmatch detL:', self.det_sim.event.ev_id, 'partL:',self.part_sim.event.ev_id)
continue
# find jets on particle level
if len(self.part_sim.particles) < 1:
pwarning(iev, 'event skipped N particle parts', len(self.part_sim.particles))
continue
self.ja_part.analyze_event(self.part_sim.particles)
_jets_part = self.ja_part.jets
# _x = [pdebug(' -p ', j) for j in _jets_part]
if len(_jets_part) < 1:
continue
# match in pp simulations
_det_part_matches = []
_n_matches = 0
_part_psjv = self.ja_part.jets_as_psj_vector()
for j_det in _jets_det:
_mactches_pp = fjtools.matched_Reta(j_det, _part_psjv, 0.6 * self.jetR)
#_mactches_pp = fjtools.matched_Ry(j_det, _part_psjv, 0.6 * self.jetR)
_n_matches = _n_matches + len(_mactches_pp)
if len(_mactches_pp) > 1:
pwarning('event:', iev, 'jet pt=', j_det.pt(), 'more than one match in pp jets', [i for i in _mactches_pp])
if len(_mactches_pp) == 1:
j_part = _part_psjv[_mactches_pp[0]]
# pinfo('j_det', j_det, 'j_part', j_part)
_det_part_matches.append([ j_det, j_part])
self.output.fill_pp_pairs(iev, [j_det, j_part])
if _n_matches < 1:
if _n_matches < 1:
pwarning('event:', iev, '- no matched jets in simulation!?', len(_det_part_matches))
# here embedding to PbPb data
_offset = 10000
while _offset < len(self.det_sim.particles):
_offset = _offset + 1000
pwarning('increasing bg index offset to', _offset)
_PbPb_loaded = 0
while _PbPb_loaded == 0:
if not self.dataPbPb.load_event(offset=_offset):
perror('unable to load next PbPb event')
_PbPb_loaded = -1
else:
_hybrid_event = self.dataPbPb.particles
_nparts_hybrid_no_emb = len(_hybrid_event)
if _nparts_hybrid_no_emb < 1:
pwarning('hybrid event with no particles! trying another one')
_PbPb_loaded = 0
else:
_PbPb_loaded = 1
if _PbPb_loaded < 0:
perror('unable to load PbPb event - permanent - bailing out here.')
break
_tmp = [_hybrid_event.push_back(p) for p in self.det_sim.particles]
if self.cs:
cs_parts = self.cs.process_event(_hybrid_event)
rho = self.cs.bge_rho.rho()
self.ja_hybrid.analyze_event(cs_parts)
else:
self.ja_hybrid.analyze_event(_hybrid_event)
_hybrid_matches = []
_hybrid_psjv = self.ja_hybrid.jets_as_psj_vector()
for m in _det_part_matches:
j_det = m[0]
j_part = m[1]
_mactches_hybrid = fjtools.matched_Reta(j_det, _hybrid_psjv, 0.6 * self.jetR)
if len(_mactches_hybrid) > 1:
pwarning('event:', iev, 'jet pt=', j_det.pt(), 'more than one match in hybrid jets', [i for i in _mactches_hybrid])
if len(_mactches_hybrid) == 1:
# m.append(_hybrid_psjv[_mactches_hybrid[0]])
j_hybr = _hybrid_psjv[_mactches_hybrid[0]]
# pdebug('L302', 'j_det', j_det, 'j_part', j_part, 'j_hybr', j_hybr)
_hybrid_matches.append([j_det, j_part, j_hybr])
self.output.fill_emb_3(iev, [j_det, j_part, j_hybr])
_n_matches_hybrid = len(_hybrid_matches)
if _n_matches_hybrid < 1:
if _n_matches_hybrid < 1:
pwarning('event:', iev, '- no matched jets in embedding!?', _n_matches_hybrid)
delta_t = time.time()-start_t
pinfo('processed events', iev, ' - ev/sec =', iev/delta_t, 'elapsed =', delta_t)
self.output.close()
def main():
parser = argparse.ArgumentParser(description='pythia8 fastjet on the fly', prog=os.path.basename(__file__))
Embedding.add_arguments_to_parser(parser)
args = parser.parse_args()
if args.output_filename == 'output.root':
args.output_filename = 'output_data_emb_CS_alpha_{}_dRmax_{}_SDzcut_{}.root'.format(args.alpha, args.dRmax, args.sd_zcut)
if args.jetptcut > -100:
args.output_filename = 'output_data_emb_CS_alpha_{}_dRmax_{}_SDzcut_{}_jpt_{}.root'.format(args.alpha, args.dRmax, args.sd_zcut, args.jetptcut)
# if os.path.isfile(args.output_filename):
# if not args.overwrite:
# print('[i] output', args.output_filename, 'exists - use --overwrite to do just that...')
# return
# print the banner first
fj.ClusterSequence.print_banner()
print()
embd = Embedding(args=args)
print(embd)
embd.run()
if __name__ == '__main__':
main()
|
"""
Copyright (C) 2020 SunSpec Alliance
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import csv
import json
import copy
import sunspec2.mdef as mdef
ADDRESS_OFFSET = 'Address Offset'
GROUP_OFFSET = 'Group Offset'
NAME = 'Name'
VALUE = 'Value'
COUNT = 'Count'
TYPE = 'Type'
SIZE = 'Size'
SCALE_FACTOR = 'Scale Factor'
UNITS = 'Units'
ACCESS = 'RW Access (RW)'
MANDATORY = 'Mandatory (M)'
STATIC = 'Static (S)'
LABEL = 'Label'
DESCRIPTION = 'Description'
NOTES = 'Notes'
columns = [ADDRESS_OFFSET, GROUP_OFFSET, NAME, VALUE, COUNT, TYPE, SIZE, SCALE_FACTOR,
UNITS, ACCESS, MANDATORY, STATIC, LABEL, DESCRIPTION, NOTES]
empty_row = [''] * len(columns)
ADDRESS_OFFSET_IDX = columns.index(ADDRESS_OFFSET)
GROUP_OFFSET_IDX = columns.index(GROUP_OFFSET)
NAME_IDX = columns.index(NAME)
VALUE_IDX = columns.index(VALUE)
COUNT_IDX = columns.index(COUNT)
TYPE_IDX = columns.index(TYPE)
SIZE_IDX = columns.index(SIZE)
SCALE_FACTOR_IDX = columns.index(SCALE_FACTOR)
UNITS_IDX = columns.index(UNITS)
ACCESS_IDX = columns.index(ACCESS)
MANDATORY_IDX = columns.index(MANDATORY)
STATIC_IDX = columns.index(STATIC)
LABEL_IDX = columns.index(LABEL)
DESCRIPTION_IDX = columns.index(DESCRIPTION)
NOTES_IDX = columns.index(NOTES)
def idx(row, attr, mandatory=False):
try:
return row.index(attr)
except:
if mandatory:
raise ValueError('Missing required attribute column: %s' % (attr))
def row_is_empty(row, idx):
for e in row[idx:]:
if e is not None and e != '':
return False
return True
def find_name(entities, name):
for e in entities:
if e[mdef.NAME] == name:
return e
def element_type(row):
type_idx = idx(row, TYPE, mandatory=True)
def from_spreadsheet(spreadsheet):
model_def = {}
row = spreadsheet[0]
address_offset_idx = idx(row, ADDRESS_OFFSET)
group_offset_idx = idx(row, GROUP_OFFSET)
name_idx = idx(row, NAME, mandatory=True)
value_idx = mdef.to_number_type(idx(row, VALUE, mandatory=True))
count_idx = mdef.to_number_type(idx(row, COUNT, mandatory=True))
type_idx = idx(row, TYPE, mandatory=True)
size_idx = mdef.to_number_type(idx(row, SIZE, mandatory=True))
scale_factor_idx = mdef.to_number_type(idx(row, SCALE_FACTOR, mandatory=True))
units_idx = idx(row, UNITS, mandatory=True)
access_idx = idx(row, ACCESS, mandatory=True)
mandatory_idx = idx(row, MANDATORY, mandatory=True)
static_idx = idx(row, STATIC, mandatory=True)
label_idx = idx(row, LABEL)
description_idx = idx(row, DESCRIPTION)
has_notes = False
# if notes col not present, notes_idx will be None
notes_idx = idx(row, NOTES)
if notes_idx and row[notes_idx] == 'Notes':
has_notes = True
row_num = 1
group = None
point = None
comments = []
parent = ''
for row in spreadsheet[1:]:
row_num += 1
name = row[name_idx]
value = mdef.to_number_type(row[value_idx])
etype = row[type_idx]
label = description = notes = ''
if len(row) > label_idx:
label = row[label_idx]
if len(row) > description_idx:
description = row[description_idx]
if has_notes:
notes = row[notes_idx]
if notes is None:
notes = ''
# point
if etype in mdef.point_type_info:
# point
if group:
if not group.get(mdef.POINTS):
group[mdef.POINTS] = []
if find_name(group[mdef.POINTS], name) is not None:
raise Exception('Duplicate point definition in group %s: %s' % (group[mdef.NAME], name))
else:
raise Exception('Point %s defined outside of group' % name)
if etype == mdef.TYPE_STRING:
size = mdef.to_number_type(row[size_idx])
else:
size = mdef.point_type_info[etype]['len']
sf = mdef.to_number_type(row[scale_factor_idx])
units = row[units_idx]
access = row[access_idx]
mandatory = row[mandatory_idx]
static = row[static_idx]
point = {mdef.NAME: name}
if etype:
point[mdef.TYPE] = etype
if size is not None and size != '':
point[mdef.SIZE] = size
if sf:
point[mdef.SF] = sf
if units:
point[mdef.UNITS] = units
if access:
point[mdef.ACCESS] = access
if mandatory:
point[mdef.MANDATORY] = mandatory
if static:
point[mdef.STATIC] = static
if label:
point[mdef.LABEL] = label
if description:
point[mdef.DESCRIPTION] = description
if has_notes:
point[mdef.NOTES] = notes
if value is not None and value != '':
point[mdef.VALUE] = value
if comments:
point[mdef.COMMENTS] = list(comments)
group[mdef.POINTS].append(point)
# set the model id
if not parent and name == mdef.MODEL_ID_POINT_NAME:
model_def[mdef.ID] = value
comments = []
# group
elif etype in mdef.group_types:
path = name.split('.')
group = model_def.get(mdef.GROUP)
parent = ''
if len(path) > 1:
parent = group[mdef.NAME]
for g in path[1:-1]:
group = find_name(group[mdef.GROUPS], g)
if group is None:
raise Exception('Unknown parent group id %s in group id %s' % (g, group))
parent += '.%s' % group[mdef.NAME]
else:
if group is not None:
raise Exception('Redefintion of top-level group %s with %s' % (group[mdef.ID], name))
if parent:
name = '%s.%s' % (parent, path[-1])
else:
name = path[-1]
new_group = {mdef.NAME: path[-1], mdef.TYPE: etype}
if label:
new_group[mdef.LABEL] = label
if description:
new_group[mdef.DESCRIPTION] = description
if has_notes:
new_group[mdef.NOTES] = notes
if comments:
new_group[mdef.COMMENTS] = list(comments)
comments = []
count = mdef.to_number_type(row[count_idx])
if count is not None and count != '':
new_group[mdef.COUNT] = count
if group is None:
model_def[mdef.GROUP] = new_group
else:
if not group.get(mdef.GROUPS):
group[mdef.GROUPS] = []
group[mdef.GROUPS].append(new_group)
group = new_group
# symbol - has name and value with no type
elif name and value is not None and value != '':
if point is None:
raise Exception('Unknown point for symbol %s' % name)
if not point.get(mdef.SYMBOLS):
point[mdef.SYMBOLS] = []
if find_name(point[mdef.SYMBOLS], name) is not None:
raise Exception('Duplicate symbol definition in point %s: %s' % (point[mdef.ID], name))
symbol = {mdef.NAME: name, mdef.VALUE: value}
point[mdef.SYMBOLS].append(symbol)
if label:
symbol[mdef.LABEL] = label
if description:
symbol[mdef.DESCRIPTION] = description
if has_notes:
symbol[mdef.NOTES] = notes
if comments:
symbol[mdef.COMMENTS] = list(comments)
comments = []
elif not row_is_empty(row, 1):
raise ValueError('Invalid spreadsheet entry row %s: %s' % (row_num, row))
# comment - no name, value, or type
elif row[0]:
comments.append(row[0])
# blank line - comment with nothing in column 1
return model_def
def to_spreadsheet(model_def):
# check if model_def has notes attr by searching string
mdef_str = json.dumps(model_def)
has_notes = '\"notes\"' in mdef_str
c_columns = copy.deepcopy(columns)
if has_notes:
spreadsheet = [columns]
else:
c_columns.remove('Notes')
spreadsheet = [c_columns]
to_spreadsheet_group(spreadsheet, model_def[mdef.GROUP], has_notes, addr_offset=0)
return(spreadsheet)
def to_spreadsheet_group(ss, group, has_notes, parent='', addr_offset=None):
# process comments
for c in group.get(mdef.COMMENTS, []):
to_spreadsheet_comment(ss, c, has_notes=has_notes)
# add group info
row = None
if has_notes:
row = [''] * len(columns)
else:
row = [''] * (len(columns) - 1)
name = group.get(mdef.NAME, '')
if name:
if parent:
name = '%s.%s' % (parent, name)
row[NAME_IDX] = name
else:
raise Exception('Group missing name attribute')
row[TYPE_IDX] = group.get(mdef.TYPE, '')
row[COUNT_IDX] = group.get(mdef.COUNT, '')
row[LABEL_IDX] = group.get(mdef.LABEL, '')
row[DESCRIPTION_IDX] = group.get(mdef.DESCRIPTION, '')
if has_notes:
row[NOTES_IDX] = group.get(mdef.NOTES, '')
ss.append(row)
# process points
group_offset = 0
for p in group.get(mdef.POINTS, []):
plen = to_spreadsheet_point(ss, p, has_notes=has_notes, addr_offset=addr_offset, group_offset=group_offset)
if addr_offset is not None:
addr_offset += plen
if group_offset is not None:
group_offset += plen
# process groups
addr_offset = None
for g in group.get(mdef.GROUPS, []):
to_spreadsheet_group(ss, g, has_notes=has_notes, parent=name, addr_offset=addr_offset)
def to_spreadsheet_point(ss, point, has_notes, addr_offset=None, group_offset=None):
# process comments
for c in point.get(mdef.COMMENTS, []):
to_spreadsheet_comment(ss, c, has_notes=has_notes)
# add point info
row = None
if has_notes:
row = [''] * len(columns)
else:
row = [''] * (len(columns) - 1)
name = point.get(mdef.NAME, '')
if name:
row[NAME_IDX] = name
else:
raise Exception('Point missing name attribute')
ptype = point.get(mdef.TYPE, '')
if ptype != '':
row[TYPE_IDX] = ptype
else:
raise Exception('Point %s missing type attribute' % name)
if addr_offset is not None:
row[ADDRESS_OFFSET_IDX] = addr_offset
elif group_offset is not None:
row[GROUP_OFFSET_IDX] = group_offset
access = point.get(mdef.ACCESS, '')
if access != mdef.ACCESS_RW:
access = ''
row[ACCESS_IDX] = access
mandatory = point.get(mdef.MANDATORY, '')
if mandatory != mdef.MANDATORY_TRUE:
mandatory = ''
row[MANDATORY_IDX] = mandatory
static = point.get(mdef.STATIC, '')
if static != mdef.STATIC_TRUE:
static = ''
row[STATIC_IDX] = static
row[UNITS_IDX] = point.get(mdef.UNITS, '')
row[SCALE_FACTOR_IDX] = mdef.to_number_type(point.get(mdef.SF, ''))
if ptype == mdef.TYPE_STRING:
row[SIZE_IDX] = mdef.to_number_type(point.get(mdef.SIZE, ''))
else:
row[SIZE_IDX] = mdef.point_type_info[ptype]['len']
row[VALUE_IDX] = mdef.to_number_type(point.get(mdef.VALUE, ''))
row[LABEL_IDX] = point.get(mdef.LABEL, '')
row[DESCRIPTION_IDX] = point.get(mdef.DESCRIPTION, '')
if has_notes:
row[NOTES_IDX] = point.get(mdef.NOTES, '')
ss.append(row)
# process symbols
symbols = point.get(mdef.SYMBOLS, [])
if symbols:
symbols = sorted(symbols, key=lambda sy: sy['value'])
for s in symbols:
to_spreadsheet_symbol(ss, s, has_notes=has_notes)
# return point length
try:
plen = mdef.point_type_info[ptype]['len']
except KeyError:
raise Exception('Unknown point type %s for point %s' % (ptype, name))
if not plen:
try:
plen = int(row[SIZE_IDX])
except ValueError:
raise Exception('Point size is for point %s not an iteger value: %s' % (name, row[SIZE_IDX]))
return plen
def to_spreadsheet_symbol(ss, symbol, has_notes):
# process comments
for c in symbol.get(mdef.COMMENTS, []):
to_spreadsheet_comment(ss, c, has_notes=has_notes)
# add symbol info
row = None
if has_notes:
row = [''] * len(columns)
else:
row = [''] * (len(columns) - 1)
name = symbol.get(mdef.NAME, '')
if name:
row[NAME_IDX] = name
else:
raise Exception('Symbol missing name attribute')
value = symbol.get(mdef.VALUE, '')
if value != '':
row[VALUE_IDX] = value
else:
raise Exception('Symbol %s missing value' % name)
row[LABEL_IDX] = symbol.get(mdef.LABEL, '')
row[DESCRIPTION_IDX] = symbol.get(mdef.DESCRIPTION, '')
if has_notes:
row[NOTES_IDX] = symbol.get(mdef.NOTES, '')
ss.append(row)
def to_spreadsheet_comment(ss, comment, has_notes):
# add comment info
row = None
if has_notes:
row = [''] * len(columns)
else:
row = [''] * (len(columns) - 1)
row[0] = comment
ss.append(row)
def spreadsheet_equal(ss1, ss2):
count = len(ss1)
if count != len(ss2):
raise Exception('Different length: %s %s' % (count, len(ss2)))
for i in range(count):
if ss1[i] != ss2[i]:
raise Exception('Line %s different: %s %s' % (i + 1, ss1[i], ss2[i]))
return True
def from_csv(filename=None, csv_str=None):
return from_spreadsheet(spreadsheet_from_csv(filename=filename, csv_str=csv_str))
def to_csv(model_def, filename=None, csv_str=None):
spreadsheet_to_csv(to_spreadsheet(model_def), filename=filename, csv_str=csv_str)
def spreadsheet_from_csv(filename=None, csv_str=None):
spreadsheet = []
file = ''
if filename:
import sys
file = open(filename)
if file:
for row in csv.reader(file):
if len(row) > 0:
# filter out informative offset information from the normative model definition
if row[TYPE_IDX] and row[TYPE_IDX] != TYPE:
row[ADDRESS_OFFSET_IDX] = ''
row[GROUP_OFFSET_IDX] = ''
if row[VALUE_IDX]:
row[VALUE_IDX] = mdef.to_number_type(row[VALUE_IDX])
if row[COUNT_IDX]:
row[COUNT_IDX] = mdef.to_number_type(row[COUNT_IDX])
if row[SIZE_IDX]:
row[SIZE_IDX] = mdef.to_number_type(row[SIZE_IDX])
if row[SCALE_FACTOR_IDX]:
row[SCALE_FACTOR_IDX] = mdef.to_number_type(row[SCALE_FACTOR_IDX])
spreadsheet.append(row)
return spreadsheet
def spreadsheet_to_csv(spreadsheet, filename=None, csv_str=None):
file = None
if filename:
file = open(filename, 'w')
writer = csv.writer(file, lineterminator='\n')
for row in spreadsheet:
writer.writerow(row)
file.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.