text stringlengths 8 6.05M |
|---|
#!/opt/rocks/bin/python
#
# This is program is used to create disks for VMs. It serves as a helper
# program to rocks-pygrub.
#
# @Copyright@
#
# Rocks(r)
# www.rocksclusters.org
# version 5.6 (Emerald Boa)
# version 6.1 (Emerald Boa)
#
# Copyright (c) 2000 - 2013 The Regents of the University of California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice unmodified and in its entirety, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. All advertising and press materials, printed or electronic, mentioning
# features or use of this software must display the following acknowledgement:
#
# "This product includes software developed by the Rocks(r)
# Cluster Group at the San Diego Supercomputer Center at the
# University of California, San Diego and its contributors."
#
# 4. Except as permitted for the purposes of acknowledgment in paragraph 3,
# neither the name or logo of this software nor the names of its
# authors may be used to endorse or promote products derived from this
# software without specific prior written permission. The name of the
# software includes the following terms, and any derivatives thereof:
# "Rocks", "Rocks Clusters", and "Avalanche Installer". For licensing of
# the associated name, interested parties should contact Technology
# Transfer & Intellectual Property Services, University of California,
# San Diego, 9500 Gilman Drive, Mail Code 0910, La Jolla, CA 92093-0910,
# Ph: (858) 534-5815, FAX: (858) 534-7345, E-MAIL:invent@ucsd.edu
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# @Copyright@
#
# $Log: rocks-create-vm-disks,v $
# Revision 1.7 2012/11/27 00:49:42 phil
# Copyright Storm for Emerald Boa
#
# Revision 1.6 2012/05/06 05:49:53 phil
# Copyright Storm for Mamba
#
# Revision 1.5 2012/03/17 03:01:04 clem
# Now rocks-create-vm-disks support both kvm and xen
#
# Revision 1.4 2011/07/23 02:31:46 phil
# Viper Copyright
#
# Revision 1.3 2010/09/07 23:53:34 bruno
# star power for gb
#
# Revision 1.2 2009/05/01 19:07:35 mjk
# chimi con queso
#
# Revision 1.1 2009/04/08 19:18:57 bruno
# retooled rocks-pygrup to work with libvirt
#
#
import os
import os.path
import sys
import string
import tempfile
import urllib
import getopt
import pwd
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], '',
[ "hostname=" ] )
except getopt.GetoptError:
sys.exit(1)
#
# get the hostname
#
hostname = None
for o, a in opts:
if o in ("--hostname",):
hostname = a
if hostname == None:
print "must supply a host name"
#
# parameters from the config file that pertain to disks
#
vmParameters = { 'disk' : [], 'disksize' : [] }
vmKernelConfig = '/etc/xen/rocks/%s' % hostname
#
# Read vmKernelConfig file.
#
try:
file = open(vmKernelConfig, 'r')
output = []
for line in file.readlines():
if not line or len(line) <= 1:
token = ''
val = ''
elif line.find('=') >= 0 :
(token, val) = line.split('=', 1)
else:
(token, val) = (line, '')
token = token.strip()
val = val.strip()
if vmParameters.has_key(token):
if token == 'disk':
vmParameters[token].append(val)
elif token == 'disksize':
#
# disk size is in GB
#
v = int(val) * 1000 * 1000 * 1000
vmParameters[token].append(v)
else:
vmParameters[token] = val
file.close()
except:
pass
#
# Create the disk file(s), if it doesn't exist
#
kvm=False
try:
pwd.getpwnam("qemu")[2]
#kvm we need to change ownership
kvm=True
except:
pass
i = 0
for diskfile in vmParameters['disk']:
if not os.path.exists(diskfile):
if not os.path.exists(os.path.dirname(diskfile)):
os.makedirs(os.path.dirname(diskfile), 0700)
if kvm:
cmd = "chown qemu " + os.path.dirname(os.path.dirname(diskfile))
os.system(cmd)
cmd = "chown qemu " + os.path.dirname(diskfile)
os.system(cmd)
cmd = 'dd if=/dev/zero of=%s bs=1 count=1 ' % diskfile
cmd += 'seek=%d ' % (vmParameters['disksize'][i] - 1)
cmd += '> /dev/null 2>&1'
os.system(cmd)
if kvm:
#qemu system we need to change file ownership
cmd = "chown qemu:qemu " + str(diskfile)
os.system(cmd)
i += 1
|
#!/usr/bin/python
import glob
from parse import parse
import lc_tools
from subprocess import call, Popen, PIPE
import sys, os, inspect
import cPickle
import cfg
import uuid
import shutil
class MissingRequiredParameterError(Exception):
'''Exception that is raised when a required parameter is not provided in a function call.
'''
def __init__(self,value):
self.value = value
def __str__(self):
return str(self.value)
class MissingRequiredReturnKeyError(Exception):
'''Exception raised when a function reports that it returns a certain parameter but does not in fact return it.
'''
def __init__(self,value):
self.value = value
def __str__(self):
return str(self.value)
class myFeature(object):
'''
'''
def __init__(self,requires,provides):
"""
'requires' must be a list of variable names required by the function, 'provides'
must be a list of the key names of the returned dictionary - the features calculated by
a particular function
'requires' and 'provides' are set as attributes.
"""
self.requires=requires
self.provides=provides
def __call__(self,f):
def wrapped_f(*args,**kwargs):
for required_arg in self.requires:
if required_arg not in args and required_arg not in kwargs:
raise MissingRequiredParameterError("Required arg %s not provided in function call." % required_arg)
result_dict = f(*args,**kwargs)
for provided in self.provides:
if provided not in result_dict:
raise MissingRequiredReturnKeyError("Key %s not present in function return value." % provided)
return result_dict
return wrapped_f
class DummyFile(object):
def write(self, x): pass
def execute_functions_in_order(script_fname='testfeature1.py',features_already_known={"t":[1,2,3],"m":[1,23,2],"e":[0.2,0.3,0.2],"coords":[22,33]},script_fpath="here"):
''' Parses the script (which must have function definitions with decorators specifying the
required parameters and those which are provided by each function) and executes the
functions defined in that script such that all functions whose outputs are required
as inputs of other functions are called first...
'''
# for docker container:
import sys
sys.path.append("/home/mltp")
if script_fpath != "here":
import sys
sys.path.append(script_fpath.replace("/"+script_fname,""))
else:
script_fpath = script_fname
thismodule = __import__(script_fname.replace(".py",""))
try:
with open(script_fpath) as f:
all_lines = f.readlines()
except IOError:
with open("/home/mltp/"+script_fname) as f:
all_lines = f.readlines()
fnames_req_prov_dict = {}
all_required_params = []
all_provided_params = []
for i in range(len(all_lines)-1):
if "@myFeature" in all_lines[i] and "def " in all_lines[i+1]:
reqs_provs_1 = parse("@myFeature(requires={requires}, provides={provides})",all_lines[i].strip())
func_name = parse("def {funcname}({args}):", all_lines[i+1].strip())
fnames_req_prov_dict[func_name.named['funcname']] = {"requires":eval(reqs_provs_1.named["requires"]),"provides":eval(reqs_provs_1.named["provides"])}
all_required_params = list(set(all_required_params + list(set(eval(reqs_provs_1.named["requires"])))))
all_provided_params = list(set(all_provided_params + list(set(eval(reqs_provs_1.named["provides"])))))
all_required_params = [x for x in all_required_params if x not in features_already_known]
for reqd_param in all_required_params:
if reqd_param not in all_provided_params:
raise Exception("Not all of the required parameters are provided by the functions in this script (required parameter '%s')."%str(reqd_param))
funcs_round_1 = []
func_queue = []
funcnames = fnames_req_prov_dict.keys()
i=0
func_rounds = {}
all_extracted_features = {}
# redirect stdout temporarily:
save_stdout = sys.stdout
sys.stdout = DummyFile()
while len(funcnames) > 0:
func_rounds[str(i)] = []
for funcname in funcnames:
reqs_provs_dict = fnames_req_prov_dict[funcname]
reqs = reqs_provs_dict['requires']
provs = reqs_provs_dict['provides']
if len(set(all_required_params) & set(reqs)) > 0:
func_queue.append(funcname)
else:
func_rounds[str(i)].append(funcname)
all_required_params = [x for x in all_required_params if x not in provs]
arguments = {}
for req in reqs:
if req in features_already_known:
arguments[req] = features_already_known[req]
elif req in all_extracted_features:
arguments[req] = all_extracted_features[req]
func_result = getattr(thismodule, funcname)(**arguments)
all_extracted_features = dict(all_extracted_features.items() + func_result.items())
funcnames.remove(funcname)
i+=1
sys.stdout = save_stdout
return all_extracted_features
def docker_installed():
from subprocess import call, PIPE
try:
x=call(["docker"], stdout=PIPE,stderr=PIPE)
return True
except OSError:
return False
def docker_extract_features(script_fpath,features_already_known={},ts_datafile_path=None,ts_data=None):
'''
Spins up / runs a docker container which does all the script excecution/feature extraction inside,
and whose output is captured and returned here.
Input parameters:
- ts_datafile_path
- ts_data must be either list of lists or tuples each containing t,m(,e) for a single epoch or None,
in which case ts_datafile_path must not be None
'''
if "t" not in features_already_known or "m" not in features_already_known: ## get ts data and put into features_already_known
if ts_datafile_path is None and ts_data is None:
raise ValueError("No time series data provided! ts_datafile_path is None and ts_data is None !!")
tme = []
if ts_datafile_path: # path to ts data file
# parse ts data and put t,m(,e) into features_already_known
with open(ts_datafile_path) as f:
all_lines = f.readlines()
for i in range(len(all_lines)):
if all_lines[i].strip() == "":
continue
else:
tme.append(all_lines[i].strip().split(","))
else: # ts_data passed directly
# parse ts data and put t,m(,e) into features_already_known
if type(ts_data) == list:
if len(ts_data) > 0:
if type(ts_data[0]) in [list, tuple] and type(ts_data[0][0]) == float: # ts_data already in desired format
tme = ts_data
elif type(ts_data[0]) == str and "," in ts_data[0]:
for el in ts_data:
if el not in ["\n",""]:
tme.append(el.split(","))
else:
raise ValueError("ts_data is an empty list")
elif type(ts_data) == str:
all_lines = ts_data.strip().split("\n")
for i in range(len(all_lines)):
if all_lines[i].strip() == "":
continue
else:
tme.append(all_lines[i].strip().split(","))
if len(tme) > 0:
if all(len(this_tme) == 3 for this_tme in tme):
T,M,E = zip(*tme)
T = [float(el) for el in T]
M = [float(el) for el in M]
E = [float(el) for el in E]
features_already_known["t"] = T
features_already_known["m"] = M
features_already_known["e"] = E
elif all(len(this_tme) == 2 for this_tme in tme):
T,M = zip(*tme)
T = [float(el) for el in T]
M = [float(el) for el in M]
features_already_known["t"] = T
features_already_known["m"] = M
else:
raise Exception("custom_feature_tools.py - docker_extract_features() - not all elements of tme are the same length.")
container_name = str(uuid.uuid4())[:10]
path_to_tmp_dir = os.path.join("/tmp", container_name)
os.mkdir(path_to_tmp_dir)
# copy custom features defs script and pickle the relevant tsdata file into docker temp directory
status_code = call(["cp", script_fpath, os.path.join(path_to_tmp_dir, "custom_feature_defs.py")])
with open(os.path.join(path_to_tmp_dir, "features_already_known.pkl"), "wb") as f:
cPickle.dump(features_already_known,f)
try:
# the (linux) command to run our docker container which will automatically generate features:
cmd = ["docker", "run",
"-v", "%s:/home/mltp" % cfg.PATH_TO_PROJECT_DIRECTORY,
"-v", "%s:/home/mltp/copied_data_files" % path_to_tmp_dir,
"--name=%s" % container_name,
"extract_custom_features"]
# execute command
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
# grab outputs
stdout, stderr = process.communicate()
print "\n\ndocker container stdout:\n\n", stdout, "\n\ndocker container stderr:\n\n", stderr, "\n\n"
# copy all necessary files produced in docker container to host
cmd = ["docker", "cp", "%s:/tmp/results_dict.pkl" % container_name, path_to_tmp_dir]
status_code = call(cmd, stdout=PIPE, stderr=PIPE)
print "/tmp/results_dict.pkl", "copied to host machine - status code %s" % str(status_code)
# load results from copied .pkl file
with open(os.path.join(path_to_tmp_dir, "results_dict.pkl"), "rb") as f:
results_dict = cPickle.load(f)
except:
raise
finally:
# Delete used container
cmd = ["docker", "rm", "-f", container_name]
status_code = call(cmd)#, stdout=PIPE, stderr=PIPE)
print "Docker container deleted."
# Remove tmp dir
shutil.rmtree(path_to_tmp_dir,ignore_errors=True)
return results_dict
def test_new_script(script_fname='testfeature1.py', script_fpath="here",docker_container=False):
if script_fpath == "here":
script_fpath = os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))), script_fname)
features_already_known_list = []
all_fnames = False
try:
all_fnames = glob.glob(os.path.join(cfg.PATH_TO_PROJECT_DIRECTORY, ".sample_lcs/dotastro_*.dat"))[:1]
except:
pass
if is_running_in_docker_container()==True and (not all_fnames or len(all_fnames)==0) and False:
try:
all_fnames = glob.glob("/home/mltp/.sample_lcs/dotastro_*.dat")[:1]
except:
all_fnames = False
if not all_fnames or len(all_fnames)==0:
print "all_fnames:", all_fnames
raise Exception("No test lc files read in...")
else:
for fname in all_fnames:
t,m,e = parse_csv_file(fname)
features_already_known_list.append({"t":t,"m":m,"e":e,"coords":[0,0]})
features_already_known_list.append({"t":[1,2,3],"m":[50,51,52],"e":[0.3,0.2,0.4],"coords":[-11,-55]})
features_already_known_list.append({"t":[1],"m":[50],"e":[0.3],"coords":2})
all_extracted_features_list = []
for known_featset in features_already_known_list:
if docker_installed()==True:
print "Extracting features inside docker container..."
newfeats = docker_extract_features(script_fpath=script_fpath,features_already_known=known_featset)
else:
newfeats = execute_functions_in_order(script_fname=script_fname,features_already_known=known_featset,script_fpath=script_fpath)
all_extracted_features_list.append(newfeats)
return all_extracted_features_list
def list_features_provided(script_fpath):
#script_fname = script_fname.strip().split("/")[-1]
#with open(os.path.join(os.path.join(cfg.UPLOAD_FOLDER,"custom_feature_scripts/"),script_fname)) as f:
# all_lines = f.readlines()
with open(script_fpath) as f:
all_lines = f.readlines()
fnames_req_prov_dict = {}
all_required_params = []
all_provided_params = []
for i in range(len(all_lines)-1):
if "@myFeature" in all_lines[i] and "def " in all_lines[i+1]:
reqs_provs_1 = parse("@myFeature(requires={requires}, provides={provides})",all_lines[i].strip())
func_name = parse("def {funcname}({args}):", all_lines[i+1].strip())
fnames_req_prov_dict[func_name.named['funcname']] = {"requires":eval(reqs_provs_1.named["requires"]),"provides":eval(reqs_provs_1.named["provides"])}
all_required_params = list(set(all_required_params + list(set(eval(reqs_provs_1.named["requires"])))))
all_provided_params = list(set(all_provided_params + list(set(eval(reqs_provs_1.named["provides"])))))
return all_provided_params
def parse_csv_file(fname,sep=',',skip_lines=0):
f = open(fname)
linecount = 0
t,m,e=[[],[],[]]
for line in f:
line=line.strip()
if linecount >= skip_lines:
if len(line.split(sep))==3:
ti,mi,ei = line.split(sep)
t.append(float(ti)); m.append(float(mi)); e.append(float(ei))
elif len(line.split(sep))==2:
ti,mi = line.split(sep)
t.append(float(ti)); m.append(float(mi))
else:
linecount-=1
linecount+=1
#print linecount-1, "lines of data successfully read."
f.close()
return [t,m,e]
def generate_custom_features(custom_script_path,path_to_csv,features_already_known,ts_data=None):
if path_to_csv not in [None,False]:
t,m,e = parse_csv_file(path_to_csv)
elif ts_data not in [None,False]:
if len(ts_data[0]) == 3:
t,m,e = zip(*ts_data)
if len(ts_data[0]) == 2:
t,m = zip(*ts_data)
else:
print "predict_class.predict:"
print "path_to_csv:", path_to_csv
print "ts_data:", ts_data
raise Exception("Neither path_to_csv nor ts_data provided...")
features_already_known['t'] = t
features_already_known['m'] = m
if e and len(e)==len(m):
features_already_known['e'] = e
if is_running_in_docker_container()==True:
all_new_features = execute_functions_in_order(script_fname=custom_script_path.split("/")[-1],features_already_known=features_already_known,script_fpath=custom_script_path)
else:
if docker_installed() == True:
print "Generating custom features inside docker container..."
all_new_features = docker_extract_features(script_fpath=custom_script_path,features_already_known=features_already_known)
else:
print "Generating custom features WITHOUT docker container..."
all_new_features = execute_functions_in_order(script_fname=custom_script_path.split("/")[-1],features_already_known=features_already_known,script_fpath=custom_script_path)
return all_new_features
def is_running_in_docker_container():
import subprocess
proc = subprocess.Popen(["cat","/proc/1/cgroup"],stdout=subprocess.PIPE)
output = proc.stdout.read()
print output
if "/docker/" in output:
in_docker_container=True
else:
in_docker_container=False
return in_docker_container
if __name__ == "__main__":
import subprocess
import sys
encoding = sys.stdout.encoding or 'utf-8'
docker_container = is_running_in_docker_container()
x = test_new_script(docker_container=docker_container)
print(str(x).encode(encoding))
sys.stdout.write( str(x).encode(encoding) )
if docker_container:
pass
|
#!/usr/bin/env python
"""
Contains a function which calculates the total mag for a ptf source.
Intended to be called by ptf_master.py and maintenance_ptf_events_add_column.py
"""
import os, sys
import numpy
def calculate_total_mag(candid_dict):
""" Assumes candid_dict contains keys like:
{'f_aper': 411.60500000000002,
'filter': 'R',
'flux_aper': 219.90899999999999,
'lmt_mg_new': 21.305,
'mag': 20.993200000000002,
'mag_ref': 0.0,
'pos_sub': True,
'sub_zp': 27.899999999999999,
'ub1_zp_ref': 25.699999999999999,
'ujd': 2454938.7807800001}
"""
total_mag = 0
if candid_dict['pos_sub']:
#total_mag = -2.5 * numpy.log10(numpy.abs(candid_dict['f_aper'] * numpy.power(10.,(-0.4*(candid_dict['sub_zp'] - candid_dict['ub1_zp_ref']))) + candid_dict['flux_aper'] )) + candid_dict['ub1_zp_ref']
total_mag = -2.5 * numpy.log10((candid_dict['f_aper'] * numpy.power(10.,(-0.4*(candid_dict['sub_zp'] - candid_dict['ub1_zp_ref']))) + candid_dict['flux_aper'] )) + candid_dict['ub1_zp_ref']
else:
### The problem with this following case is that we are setting the total_mag to a limit, which doesn't work with the current lightcurve fitting / features code (since they do not incorporate limits in their feature calculations):
#if candid_dict['lmt_mg_new'] <= candid_dict['mag_ref']:
# # limiting_mag is (not) fainter than reference_mag
# # then total_mag = limit_mag = (upper limmit)
# total_mag = candid_dict['lmt_mg_new']
#else:
if True:
# limiting_mag is fainter than reference_mag
### TODO: want some condition where detection in negative_sub == No and in pos_sub==No
### -> this is when the reference source completely matches the mag for an epoch
### -> then total_mag = ref_mag
# Assuming detection in negative sub:
#total_mag = -2.5 * numpy.log10(numpy.abs(-1. * candid_dict['f_aper'] * numpy.power(10.,(-0.4*(candid_dict['sub_zp'] - candid_dict['ub1_zp_ref']))) + candid_dict['flux_aper'] )) + candid_dict['ub1_zp_ref']
total_mag = -2.5 * numpy.log10((-1. * candid_dict['f_aper'] * numpy.power(10.,(-0.4*(candid_dict['sub_zp'] - candid_dict['ub1_zp_ref']))) + candid_dict['flux_aper'] )) + candid_dict['ub1_zp_ref']
### TODO: Josh says that if "mag in negative sub" < limit_mag - ref_mag is NOT True:
### -> then total_mag - limit_mag,
### - but to me this seems like a non-detecion, were we just revert to only using
### knowledge of the upper limits.
return total_mag
|
# Generated by Django 3.1.7 on 2021-03-31 00:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_auto_20210330_1732'),
]
operations = [
migrations.AddField(
model_name='entry',
name='updated_date',
field=models.DateTimeField(auto_now=True, verbose_name='updated_date'),
),
migrations.AlterField(
model_name='entry',
name='creation_date',
field=models.DateField(auto_now_add=True, verbose_name='creation_date'),
),
]
|
from Pages.basic_forms_page import BasicForm
from Utils.locators import FormPageLocators
from Utils.test_data import Data
import time
from Utils.Logger import Logging
import allure
from allure_commons.types import AttachmentType
@allure.severity(allure.severity_level.NORMAL)
class Test_Form:
logger = Logging.loggen()
##################
@allure.severity(allure.severity_level.BLOCKER)
def test_input_form(self, test_setup):
self.logger.info("*************** Test_001_Calculation *****************")
self.logger.info("*************** Form Test Started *****************")
self.driver = test_setup
self.driver.get(FormPageLocators.FormUrl)
self.obj = BasicForm(self.driver)
self.obj.input_numbers(Data.get_valid_number1(), Data.get_valid_number2())
time.sleep(2)
self.obj.click_total_button()
self.logger.info("**** Form Test Passed ****")
time.sleep(3)
self.driver.save_screenshot(".\\Screenshots\\" + "test_form.png")
allure.attach(self.driver.get_screenshot_as_png(), name="testForm", attachment_type=AttachmentType.PNG)
# close browser
self.driver.close()
# pytest -v -s --alluredir=".\AllureReports" Tests\test_basic_form.py
# pytest -v --html=PytestReports\basic_form_report.html Tests\test_basic_form.py
|
import boto
import random
from boto.s3.key import Key
conn = boto.connect_s3()
bucket = conn.create_bucket('magicalunicorn')
# Read small, medium, and large
f = open('small.txt', 'r')
small = f.read()
f = open('medium.txt', 'r')
medium = f.read()
f = open('large.txt', 'r')
large = f.read()
for i in range(0, 4739):
value = random.randint(0, 2)
k = Key(bucket)
k.key = i
str_to_set = None
if value == 0:
str_to_set = small
elif value == 1:
str_to_set = medium
else:
str_to_set = large
k.set_contents_from_string(str_to_set)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-10-20 18:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resolution', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='resolutions',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('height', models.IntegerField()),
('width', models.IntegerField()),
('pixel_density', models.DecimalField(decimal_places=6, max_digits=9)),
('time_stamp', models.DateTimeField(auto_now_add=True, verbose_name=b'date created')),
],
),
migrations.DeleteModel(
name='Greeting',
),
]
|
# settings for falcony Rest api
APP_NAME = 'Falcony'
dbConfig = {
'DbName': 'falcony',
'Username': 'falcony',
'Password': 'falcony',
'Host': 'localhost',
'Port': 5432
}
db_url = 'postgresql://%s:%s@%s/%s' % (dbConfig['Username'], dbConfig['Password'], dbConfig['Host'], dbConfig['DbName'])
|
from setuptools import setup, find_packages
# Must be same as `stlist.__version__`
version = "1.0.0"
setup(
name="stlist",
version="1.0.0",
url="https://github.com/FoleyDiver/stlist",
download_url=f"https://github.com/FoleyDiver/stlist/archive/v{version}.tar.gz",
license="BSD",
description="stlist is a library for reading/writing binary plists.",
author="Steve Foley",
author_email="sfoley1988@gmail.com",
packages=find_packages(include=["stlist", "stlist.*"]),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup",
],
)
|
from django.db import models
from django.utils import timezone
from django.conf import settings
from datetime import timedelta
# Create your models here.
class CaptchaStore(models.Model):
key = models.IntegerField(unique=True)
answer = models.CharField(max_length=10)
added_time = models.DateTimeField(auto_now=True)
@classmethod
def clean_expire(self):
self.objects.filter(
added_time__lte=timezone.now() - timedelta(minutes=settings.CAPTCHA['age'])
).delete()
|
'''
An anagram is the result of rearranging the letters of a word to produce
a new word (see wikipedia).
Note: anagrams are case insensitive
Complete the function to return true if the two arguments given
are anagrams of each other; return false otherwise.
Examples
"foefet" is an anagram of "toffee"
"Buckethead" is an anagram of "DeathCubeK"
'''
def is_anagram(test, original):
return sorted(test.lower()) == sorted(original.lower())
|
import random
import numpy as np
from scipy import interpolate
from torch import distributions, nn
import torch.nn.functional as f
from torch.utils.data import Dataset
from config import *
class ImageSet(Dataset):
def __init__(self, input, result, boundaries):
self.input = input
self.result = result
self.boundaries = boundaries
def __getitem__(self, item):
return self.input[item], self.result[item], self.boundaries[item]
def __len__(self):
return len(self.input)
class Network(torch.nn.Module):
def __init__(self):
super(Network, self).__init__()
self.conv1 = nn.Conv2d(3, 30, kernel_size=5)
self.bn_conv1 = nn.BatchNorm2d(30)
self.conv2 = nn.Conv2d(30, 60, kernel_size=5)
self.bn_conv2 = nn.BatchNorm2d(60)
self.linear1 = nn.Linear(320 * 3, 1256)
self.linear2 = nn.Linear(1256, k + k * n + n * l * k + k * n)
def forward(self, x: torch.Tensor):
x = f.relu(f.max_pool2d(self.bn_conv1(self.conv1(x)), kernel_size=2))
x = f.relu(f.max_pool2d(self.bn_conv2(self.conv2(x)), kernel_size=2))
x = x.view(-1, 320 * 3)
x = f.relu(self.linear1(x))
x = self.linear2(x)
x = torch.cat([torch.softmax(x[:, :k], dim=1), x[:, k:]], 1)
return x
def prepare_input(image):
image_mod = image.copy()
x = np.zeros((image_x * image_y,))
y = np.zeros((image_x * image_y,))
z = np.zeros((image_x * image_y,))
counter = 0
for i, row in enumerate(image_mod):
for j, a in enumerate(row):
x[counter] = j
y[counter] = i
z[counter] = a
counter += 1
img_center_x = image_x / 2
img_center_y = image_y / 2
dx = random.randint(-max_random, max_random)
dy = random.randint(-max_random, max_random)
hole_beg_x = img_center_x - hole_size_x / 2 + dx
hole_end_x = img_center_x + hole_size_x / 2 + dx
hole_beg_y = img_center_y - hole_size_y / 2 + dy
hole_end_y = img_center_y + hole_size_y / 2 + dy
mask = []
# remove center rectangle
for a in range(image_x * image_y):
if not hole_beg_x < x[a] < hole_end_x or not hole_beg_y < y[a] < hole_end_y:
mask.append(a)
x = x[mask]
y = y[mask]
z = z[mask]
# move points to fill hole
x_old = np.copy(x)
y_old = np.copy(y)
b1 = hole_end_y - hole_end_x
b2 = hole_end_y + hole_beg_x
for a in range(len(x)):
if hole_beg_x <= x[a] <= hole_end_x or hole_beg_y <= y[a] <= hole_end_y:
if x[a] + b1 > y[a] and -x[a] + b2 > y[a]:
d = (hole_end_y + hole_beg_y) / (2 * hole_beg_y)
c = (2 - 2 * d) / hole_size_x
y[a] *= c * abs(x[a] - (hole_beg_x + hole_end_x) / 2) + d
elif x[a] + b1 < y[a] and -x[a] + b2 < y[a]:
d = (hole_end_x + hole_beg_x) / (2 * hole_beg_x)
c = (2 - 2 * d) / hole_size_y
x[a] *= c * abs(y[a] - (hole_beg_y + hole_end_y) / 2) + d
elif x[a] + b1 > y[a] > -x[a] + b2:
d = (hole_end_y + hole_beg_y) / (2 * hole_beg_y)
c = (2 - 2 * d) / hole_size_x
y[a] = image_y - (image_y - y[a]) * c * abs(x[a] - (hole_beg_x + hole_end_x) / 2) + d
elif x[a] + b1 < y[a] < -x[a] + b2:
d = (hole_end_x + hole_beg_x) / (2 * hole_beg_x)
c = (2 - 2 * d) / hole_size_y
x[a] = image_x - (image_x - x[a]) * c * abs(y[a] - (hole_beg_y + hole_end_y) / 2) + d
x_2 = np.arange(0, 28, 1)
y_2 = np.arange(0, 28, 1)
x_2, y_2 = np.meshgrid(x_2, y_2)
z_new = interpolate.griddata((x, y), z, (x_2, y_2), method='linear')
x_new = interpolate.griddata((x, y), x_old, (x_2, y_2), method='linear')
y_new = interpolate.griddata((x, y), y_old, (x_2, y_2), method='linear')
return np.stack([z_new, x_new, y_new]), ((hole_beg_x, hole_end_x), (hole_beg_y, hole_end_y))
def loss_function(x: torch.Tensor, orig, boundaries, k, l, n):
hole_beg_x = boundaries[0][0].int()
hole_end_x = boundaries[0][1].int()
hole_beg_y = boundaries[1][0].int()
hole_end_y = boundaries[1][1].int()
x = x.view((len(x), -1))
sum = torch.tensor(0).double().to(device)
p: torch.Tensor = x[:, :k].reshape(-1, k)
m: torch.Tensor = x[:, k:k + k * n].reshape(-1, k, n)
A: torch.Tensor = x[:, k + k * n:k + k * n + n * l * k].reshape(-1, k, n, l)
d: torch.Tensor = x[:, k + k * n + n * l * k:].reshape(-1, k, n)
dist = distributions.lowrank_multivariate_normal.LowRankMultivariateNormal(m, A, torch.abs(d))
layers = torch.stack([orig[i, hole_beg_x[i]:hole_end_x[i], hole_beg_y[i]:hole_end_y[i]] for i in range(len(x))])
sum = sum - (p.log() + dist.log_prob(layers.reshape(len(layers), 1, -1))).sum()
return sum
|
# def sum(x, y):
# return x + y
# x = int(input('请输入x: '));
# y = int(input('请输入y: '));
# print('x + y = ', sum(x, y));
def sum(*numbers):
sum = 0
for n in numbers:
sum = sum + n
return sum
print(sum(1, 2, 3, 4)) |
# !/usr/bin/python27
# coding: utf8
import re
from midpoint import midp
# f = open('E:\\test\\sac2.hoc', 'r')
# axon = ['0' for n in range(166)]
# dend = ['0' for n in range(29)]
# apic = ['0' for n in range(67)]
def compute_mid():
lst1 = []
global f
#global filepoint
#f.seek(filepoint, 0)
charl = f.readline()
chars = ''.join(charl)
charspattern1 = re.compile(r'.*\}') # charspattern1: }
m1 = re.match(charspattern1, chars)
while not m1:
p3dadd = re.compile('.*pt3dadd.*')
if re.search(p3dadd, chars):
m = re.match(r'.+pt3dadd\((.+)\)', chars)
lst = m.group(1).split(', ')
lst = lst[:3]
lst1.append(lst)
charl = f.readline()
chars = ''.join(charl)
m1 = re.match(charspattern1, chars)
#filepoint = f.tell()
return midp(lst1[:])
def hocmidp(filename):
global f
global dend
global axon
global apic
axon = ['0' for n in range(166)]
dend = ['0' for n in range(29)]
apic = ['0' for n in range(67)]
fn = 'E:\\test\\%s' % filename
print fn
f = open(fn, 'r')
try:
while True :
#global filepoint
charl = f.readline()
if '' == charl:
break
chars = ''.join(charl)
chars = chars.replace(' ', '')
charspattern = re.compile(r'''
(dend|axon|apic)
(\[.+\])? #[count]
\{pt3dclear\(\)
''', re.VERBOSE)
s = re.search(charspattern, chars)
if s :
# print "found it!"
# print '%-40s # %i' % (chars. num)
m = re.match(charspattern,chars)
if m.group(1) == 'dend':
s = re.compile(r'\[.+\]')
try:
if re.match(s, m.group(2)):
i = m.group(2).lstrip('\[')
i = i.rstrip('\]')
i = int(i)
#print i
dend[i] = compute_mid()
except TypeError:
dend[0] = compute_mid()
# lst1 = []
# charl = f.readline()
# chars = ''.join(charl)
# charspattern1 = re.compile(r'\.\}') #charspattern1: }
# m1 = re.match(charspattern1,chars)
# while not m1:
# p3dadd = re.compile('pt3dadd')
# if re.search(p3dadd,chars):
# m = re.match(r'/.+pt3dadd\((\.+)\)')
# lst = m.group(1).split(', ').pop()
# lst1.append(lst)
# charl = f.readline()
# chars = ''.join(charl)
# m1 = re.match(charspattern1, chars)
# dend[i] = midp(lst1)
elif m.group(1) == 'axon':
s = re.compile(r'\[.+\]')
try:
if re.match(s, m.group(2)):
i = m.group(2).lstrip('\[')
i = i.rstrip('\]')
i = int(i)
axon[i] = compute_mid()
except TypeError:
axon[0] = compute_mid()
elif m.group(1) == 'apic':
s = re.compile(r'\[.+\]')
try:
if re.match(s, m.group(2)):
i = m.group(2).lstrip('\[')
i = i.rstrip('\]')
i = int(i)
apic[i] = compute_mid()
except TypeError:
apic[0] = compute_mid()
finally:
f.close()
for i in range(len(dend)):
dend[i] = dend[i].tolist()
for i in range(len(axon)):
axon[i] = axon[i].tolist()
for i in range(len(apic)):
apic[i] = apic[i].tolist()
ret = dend[:]
ret.extend(apic)
ret.extend(axon)
# print dend
# print apic
return ret
if __name__ == "__main__":
print ('This is main of module "hocmidp.py"')
print hocmidp('sac1.hoc')
|
import matplotlib
import numpy
import pandas as pd
import scipy.signal as signal
# This module gets path of the file from command line. After that data is
# filtered, cleaned and processed.
#
# Created global data windows as list.
data_windows_ecg = []
data_windows_gsr = []
def filtering(datafile):
fs = 500
h = signal.firwin(fs, cutoff=1, window="hanning")
filtered_data = filter(h,1,datafile)
return filtered_data
def windowing_ecg(filtered_ecg, fs):
interval = 60*fs
moving_length = 10*fs
for start in range(0, filtered_ecg, moving_length):
for window in range(start, filtered_ecg, interval):
data_windows_ecg.append(window)
return data_windows_ecg
def windowing_gsr(filtered_gsr, fs):
interval = 60*fs
moving_length = 10*fs
for start in range(0, filtered_gsr, moving_length):
for window in range(start, filtered_gsr, interval):
data_windows_gsr.append(window)
return data_windows_gsr
|
# Sales tax
# Computing the sale with taxes added
# Anatoli Penev
# 26.11.2017
# Main function
def main():
price = float(input("Enter the amount of purchase "))
calctax(price)
# Calculation function and constants
def calctax(price):
state_tax = 0.04
county_tax = 0.02
state_sale = price * state_tax
county_sale = price * county_tax
total_tax = county_tax + state_tax
total_sale = price + total_tax
print("Amount of purchase = ", format(price, ',.2f'), "\n",
"State tax = ", format(state_tax, ',.2f'), "\n",
"County tax =", format(county_tax, ',.2f'), "\n",
"State sale with tax= ", format(state_sale, ',.2f'), "\n",
"County sale with tax=", format(county_sale, ',.2f'), "\n",
"Total sale tax =", format(total_tax, ',.2f'), "\n",
"Total of sale = ", format(total_sale, ',.2f'), "\n")
main()
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from functools import partial
from pathlib import Path
from textwrap import dedent
from typing import List, Optional, Tuple
from fix_deprecated_globs_usage import SCRIPT_RESTRICTIONS, generate_possibly_new_build, warning_msg
from pants.util.contextutil import temporary_dir
Result = Optional[List[str]]
def run_on_build_file(content: str) -> Tuple[Result, Path]:
with temporary_dir() as tmpdir:
build = Path(tmpdir, "BUILD")
build.write_text(content)
result = generate_possibly_new_build(build)
return result, build
def assert_rewrite(
*, original: str, expected: str, include_field_after_sources: bool = True
) -> None:
template = dedent(
"""\
# A stray comment
python_library(
name="lib",
{sources_field}
{dependencies_field}
)
python_binary(
name="bin",
dependencies=[
':lib',
],
)
"""
)
dependencies_field = (
dedent(
"""\
dependencies=[
'src/python/pants/util',
],
"""
)
if include_field_after_sources
else ""
)
result, _ = run_on_build_file(
template.format(sources_field=original, dependencies_field=dependencies_field),
)
if original == expected:
assert result is None
else:
assert (
result
== template.format(
sources_field=expected, dependencies_field=dependencies_field
).splitlines()
)
def assert_warning_raised(
caplog,
*,
warning_num: int,
build_file_content: str,
line_number: int,
field_name: str,
replacement: str,
script_restriction: str,
) -> None:
result, build = run_on_build_file(build_file_content)
assert result is None # i.e., the script will not change the BUILD
assert caplog.records[warning_num].msg == warning_msg(
build_file=build,
lineno=line_number,
field_name=field_name,
replacement=replacement,
script_restriction=script_restriction,
)
def test_no_op_when_already_valid() -> None:
valid_entries = [
"sources=['foo.py'],",
"sources=['!ignore.py'],",
"sources=[],",
"sources=['foo.py', '!ignore.py'],",
]
for entry in valid_entries:
assert_rewrite(original=entry, expected=entry)
def test_includes() -> None:
assert_rewrite(original="sources=globs(),", expected="sources=[],")
assert_rewrite(original="sources=zglobs(),", expected="sources=[],")
assert_rewrite(original="sources=globs('foo.py'),", expected="sources=['foo.py'],")
assert_rewrite(original="sources=zglobs('foo.py'),", expected="sources=['foo.py'],")
assert_rewrite(
original="sources=globs('foo.py', 'bar.py'),", expected="sources=['foo.py', 'bar.py'],"
)
assert_rewrite(
original="sources=zglobs('foo.py', 'bar.py'),", expected="sources=['foo.py', 'bar.py'],"
)
def test_excludes() -> None:
assert_rewrite(original="sources=globs(exclude=[]),", expected="sources=[],")
# `exclude` elements are strings
assert_rewrite(
original="sources=globs(exclude=['ignore.py']),", expected="sources=['!ignore.py'],"
)
assert_rewrite(
original="sources=globs(exclude=['ignore.py', 'ignore2.py']),",
expected="sources=['!ignore.py', '!ignore2.py'],",
)
# `exclude` elements are globs
assert_rewrite(
original="sources=globs(exclude=[globs('ignore.py')]),", expected="sources=['!ignore.py'],"
)
assert_rewrite(
original="sources=globs(exclude=[globs('ignore.py'), globs('ignore2.py')]),",
expected="sources=['!ignore.py', '!ignore2.py'],",
)
# `exclude` elements are lists
assert_rewrite(
original="sources=globs(exclude=[['ignore.py']]),", expected="sources=['!ignore.py'],"
)
assert_rewrite(
original="sources=globs(exclude=[[globs('ignore.py')]]),",
expected="sources=['!ignore.py'],",
)
# `exclude` elements are all of the above
assert_rewrite(
original="sources=globs(exclude=['ignore1.py', globs('ignore2.py'), ['ignore3.py'], [globs('ignore4.py')]]),",
expected="sources=['!ignore1.py', '!ignore2.py', '!ignore3.py', '!ignore4.py'],",
)
# check that `exclude` plays nicely with includes
assert_rewrite(
original="sources=globs('foo.py', 'bar.py', exclude=['ignore1.py', 'ignore2.py']),",
expected="sources=['foo.py', 'bar.py', '!ignore1.py', '!ignore2.py'],",
)
def test_normalizes_rglobs() -> None:
# Expand when the path component starts with a `*`
assert_rewrite(original="sources=rglobs('*'),", expected="sources=['**/*'],")
assert_rewrite(original="sources=rglobs('*.txt'),", expected="sources=['**/*.txt'],")
assert_rewrite(original="sources=rglobs('test/*.txt'),", expected="sources=['test/**/*.txt'],")
assert_rewrite(original="sources=rglobs('*/*/*.txt'),", expected="sources=['**/*/*/**/*.txt'],")
# Don't expand in these cases
assert_rewrite(original="sources=rglobs('foo.py'),", expected="sources=['foo.py'],")
assert_rewrite(original="sources=rglobs('test_*'),", expected="sources=['test_*'],")
assert_rewrite(original="sources=rglobs('**/*'),", expected="sources=['**/*'],")
# Check the intersection with the `exclude` clause
assert_rewrite(
original="sources=rglobs('foo.py', exclude=['*']),", expected="sources=['foo.py', '!*'],"
)
assert_rewrite(
original="sources=globs('foo.py', exclude=[rglobs('*')]),",
expected="sources=['foo.py', '!**/*'],",
)
def test_correctly_formats_rewrite() -> None:
# Preserve the original `sources` prefix, including whitespace
assert_rewrite(original="sources=globs('foo.py'),", expected="sources=['foo.py'],")
assert_rewrite(original="sources = globs('foo.py'),", expected="sources = ['foo.py'],")
assert_rewrite(original="sources =globs('foo.py'),", expected="sources =['foo.py'],")
assert_rewrite(original="sources= globs('foo.py'),", expected="sources= ['foo.py'],")
assert_rewrite(original="sources = globs('foo.py'),", expected="sources = ['foo.py'],")
assert_rewrite(original="sources = globs('foo.py'),", expected="sources = ['foo.py'],")
assert_rewrite(original=" sources=globs('foo.py'),", expected=" sources=['foo.py'],")
# Strip stray trailing whitespace
assert_rewrite(original="sources=globs('foo.py'), ", expected="sources=['foo.py'],")
# Preserve whether the original used single quotes or double quotes
assert_rewrite(original="""sources=globs("foo.py"),""", expected="""sources=["foo.py"],""")
assert_rewrite(
original="""sources=globs("double.py", "foo.py", 'single.py'),""",
expected="""sources=["double.py", "foo.py", "single.py"],""",
)
# Always use a trailing comma
assert_rewrite(
original="sources=globs('foo.py')",
expected="sources=['foo.py'],",
include_field_after_sources=False,
)
# Maintain insertion order for includes
assert_rewrite(
original="sources=globs('dog.py', 'cat.py'),", expected="sources=['dog.py', 'cat.py'],"
)
def test_warns_when_sources_shares_a_line(caplog) -> None:
assert_warning = partial(
assert_warning_raised,
caplog,
field_name="sources",
replacement="['foo.py']",
script_restriction=SCRIPT_RESTRICTIONS["sources_must_be_distinct_line"],
)
assert_warning(
build_file_content="files(sources=globs('foo.py'))", warning_num=0, line_number=1
)
assert_warning(
build_file_content=dedent(
"""\
files(
name='bad', sources=globs('foo.py'),
)
"""
),
warning_num=1,
line_number=2,
)
def test_warns_when_sources_is_multiline(caplog) -> None:
assert_warning = partial(
assert_warning_raised,
caplog,
field_name="sources",
replacement='["foo.py", "bar.py"]',
script_restriction=SCRIPT_RESTRICTIONS["sources_must_be_single_line"],
line_number=3,
)
assert_warning(
build_file_content=dedent(
"""\
files(
name='bad',
sources=globs(
"foo.py",
"bar.py",
),
)
"""
),
warning_num=0,
# We can't easily infer whether to use single vs. double quotes
replacement='["foo.py", "bar.py"]',
)
assert_warning(
build_file_content=dedent(
"""\
files(
name='bad',
sources=globs('foo.py',
'bar.py'),
)
"""
),
warning_num=1,
replacement="['foo.py', 'bar.py']",
)
def test_warns_on_comments(caplog) -> None:
assert_warning_raised(
caplog,
build_file_content=dedent(
"""\
files(
sources=globs('foo.py'), # a comment
)
"""
),
warning_num=0,
line_number=2,
replacement="['foo.py']",
field_name="sources",
script_restriction=SCRIPT_RESTRICTIONS["no_comments"],
)
def test_warns_on_bundles(caplog) -> None:
def assert_no_op(build_file_content: str) -> None:
result, _ = run_on_build_file(build_file_content)
assert result is None
assert_no_op(
dedent(
"""\
jvm_app(
bundles=[],
)
"""
)
)
assert_no_op(
dedent(
"""\
jvm_app(
bundles=[
bundle(fileset=[]),
],
)
"""
)
)
assert_no_op(
dedent(
"""\
jvm_app(
bundles=[
bundle(fileset=['foo.java', '!ignore.java']),
],
)
"""
)
)
assert_warning_raised(
caplog,
build_file_content=dedent(
"""\
jvm_app(
bundles=[
bundle(fileset=globs('foo.java')),
],
)
"""
),
warning_num=0,
field_name="bundle(fileset=)",
line_number=3,
replacement="['foo.java']",
script_restriction=SCRIPT_RESTRICTIONS["no_bundles"],
)
def check_multiple_bad_bundle_entries(
build_file_content: str,
warning_slice: slice,
*,
replacements_and_line_numbers: List[Tuple[str, int]],
) -> None:
result, build = run_on_build_file(build_file_content)
assert result is None
for record, replacement_and_line_number in zip(
caplog.records[warning_slice], replacements_and_line_numbers
):
replacement, line_number = replacement_and_line_number
assert record.message == warning_msg(
build_file=build,
lineno=line_number,
field_name="bundle(fileset=)",
replacement=replacement,
script_restriction=SCRIPT_RESTRICTIONS["no_bundles"],
)
check_multiple_bad_bundle_entries(
dedent(
"""\
jvm_app(
bundles=[
bundle(fileset=globs('foo.java')),
bundle(fileset=globs('bar.java')),
],
)
"""
),
warning_slice=slice(1, 3),
replacements_and_line_numbers=[("['foo.java']", 3), ("['bar.java']", 4)],
)
check_multiple_bad_bundle_entries(
dedent(
"""\
jvm_app(
bundles=[bundle(fileset=globs('foo.java')), bundle(fileset=globs('bar.java'))],
)
"""
),
warning_slice=slice(3, 5),
replacements_and_line_numbers=[("['foo.java']", 2), ("['bar.java']", 2)],
)
def test_warns_on_variables(caplog) -> None:
result, build = run_on_build_file(
dedent(
"""\
files(
sources=globs(VARIABLE, VAR2),
)
"""
)
)
assert result is None
assert f"Could not parse the globs in {build} at line 2." in caplog.records[0].message
result, build = run_on_build_file(
dedent(
"""\
files(
sources=globs('foo.py', exclude=[VAR1, [VAR2], glob(VAR3)]),
)
"""
)
)
assert result is None
assert f"Could not parse the exclude globs in {build} at line 2." in caplog.records[1].message
|
numero = int(input())
horas = int(input())
valor_hora = float(input())
salario = valor_hora * horas
print(f'NUMBER = {numero}\nSALARY = U$ %.2f' % salario)
|
import os
def clear_previous_map(path_to_clear:str): #https://www.w3schools.com/python/python_file_remove.asp
if os.path.exists(path_to_clear):
os.remove(path_to_clear)
|
from eve import Eve
from machine import more_than
# def before_insert(resource_name, documents):
# if resource_name == 'request':
# for document in documents:
# print(document)
# document['more_than_one_hour'] = more_than(1)
# document['more_than_two_hours'] = more_than(2)
def before_get(resource_name, response):
print(response)
if resource_name == 'request':
response['_items']=[{'more_than_one_hour': more_than(1), 'more_than_two_hours': more_than(2)}]
app = Eve(settings='settings.py')
# app.on_insert += before_insert
app.on_fetched_resource += before_get
app.run(debug=True)
|
import os
import telebot
from telebot import *
from random import randint
import subprocess
API_KEY = "Your API Key"
bot = telebot.TeleBot(API_KEY, parse_mode=None)
@bot.message_handler(commands=['start'])
def start(message):
bot.reply_to(message, 'Hey there, I am AppGenie!')
@bot.message_handler(commands=['greet', 'hi', 'hey', 'hello'])
def greet(message):
responses = ['Howdy', 'Hey there', 'Hello', 'Hi back!', 'Hi hi', 'Hello there']
bot.send_message(message.chat.id, responses[randint(0, len(responses) - 1)])
def die():
exit()
@bot.message_handler(commands=['killme'])
def killme(message):
bot.reply_to(message, 'Good Bye.' )
die()
@bot.message_handler(commands=['open'])
def open_app(message):
markup = types.ReplyKeyboardMarkup()
markup.one_time_keyboard=True
firefox = types.KeyboardButton('FireFox 🌐')
vscode = types.KeyboardButton('VS Code 💻')
terminal = types.KeyboardButton('Terminal 📟')
spotify = types.KeyboardButton('Spotify 🎵')
whatsapp = types.KeyboardButton('WhatsApp 🟢')
discord = types.KeyboardButton('Discord 🎮')
telegram = types.KeyboardButton('FDM ⬇️')
netflix = types.KeyboardButton('Netflix 📺')
vlc = types.KeyboardButton('VLC 💿')
# fdm = types.KeyboardButton('FDM 📲')
markup.row(firefox, vscode, terminal)
markup.row(spotify, whatsapp, discord)
markup.row(telegram, netflix, vlc)
app_to_open = bot.send_message(message.chat.id, 'Which app would like to open?', reply_markup=markup)
bot.register_next_step_handler(app_to_open, start_app)
def start_app(message):
print(message.text)
if message.text == 'FireFox 🌐':
subprocess.call(r'C:\Program Files\Mozilla Firefox\firefox.exe')
elif message.text == 'VS Code 💻':
subprocess.call(r'C:\Program Files\Microsoft VS Code\Code.exe')
elif message.text == 'Terminal 📟':
subprocess.call(r'C:\Users\JasonPC\AppData\Local\Microsoft\WindowsApps\wt.exe')
elif message.text == 'Spotify 🎵':
subprocess.call(r'C:\Users\JasonPC\AppData\Local\Microsoft\WindowsApps\Spotify.exe')
elif message.text == 'WhatsApp 🟢':
subprocess.call(r'C:\Users\JasonPC\AppData\Local\WhatsApp\WhatsApp.exe')
elif message.text == 'Discord 🎮':
subprocess.call(r'C:\Users\JasonPC\AppData\Local\Discord\app-1.0.9001\Discord.exe')
elif message.text == 'FDM ⬇️':
subprocess.call(r'C:\Program Files\FreeDownloadManager.ORG\Free Download Manager\fdm.exe')
elif message.text == 'Netflix 📺':
os.system('Start netflix:')
elif message.text == 'VLC 💿':
subprocess.call(r'C:\Program Files\VideoLAN\VLC\vlc.exe')
bot.polling()
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#################################################################################################
# #
# plot_grating_focus.py: update grating focus plots #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 12, 2021 #
# #
#################################################################################################
import os
import sys
import re
import random
import numpy
import time
import Chandra.Time
import matplotlib as mpl
if __name__ == '__main__':
mpl.use('Agg')
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import matplotlib.lines as lines
#
#--- reading directory list
#
path = '/data/mta/Script/Grating/Focus/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append pathes to private folders to a python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
import mta_common_functions as mcf
import find_moving_average as mavg #---- contains moving average routine
#----------------------------------------------------------------------------------------------
#-- update_focus_data_plot: update grating focus plots ---
#----------------------------------------------------------------------------------------------
def update_focus_data_plot():
"""
update grating focus plots
input: none, but <data_dir>/acis_hetg, acis_letg, hrc_letg
output: <web_dir>/Plots/<d_file>_<ax_lrf/streak_lrf>_focus.phg
"""
for d_file in ['acis_hetg', 'acis_letg', 'hrc_letg']:
out = read_focus_data(d_file)
time = out[0]
try:
a_set = [out[1], out[2], out[5], out[7]]
except:
continue
[ctime, c_set] = remove_non_data(time, a_set)
titles = ['AX LRF at 10% Peak', 'AX LRF at 50% Peak', 'Gaussian FWHM']
outname = web_dir + 'Plots/' + d_file + '_ax_lrf_focus.png'
y_label = 'Width (microns)'
y_limits = [[50,110], [20, 60], [20,60]]
plot_data(ctime, c_set, titles, outname, y_label, y_limits)
try:
s_set = [out[3], out[4], out[6], out[8]]
except:
continue
[ctime, c_set] = remove_non_data(time, a_set)
titles = ['Streak LRF at 10% Peak', 'Streak LRF at 50% Peak', 'Gaussian FWHM']
outname = web_dir + 'Plots/' + d_file + '_streak_lrf_focus.png'
y_label = 'Width (microns)'
y_limits = [[50, 110], [20, 60], [20, 60]]
plot_data(ctime, c_set, titles, outname, y_label, y_limits)
#----------------------------------------------------------------------------------------------
#-- remove_non_data: removing non (-999) data and data outside of the useable valeus --
#----------------------------------------------------------------------------------------------
def remove_non_data(x, t_set):
"""
removing non (-999) data and data outside of the useable valeus
input: x --- time
t_set --- a list of 4 lists; first three are value list and last one error list
output: x --- cleaned time entry
o_set --- a list of cleaned three value lists. no error list are retured
"""
x = numpy.array(x)
yarray = []
for k in range(0, 4):
yarray.append(numpy.array(t_set[k]))
for k in range(0, 2):
index = (yarray[k] > 0) & (yarray[k] < 100)
x = x[index]
for m in range(0, 4):
yarray[m] = yarray[m][index]
index = yarray[3] < 10
x = x[index]
for m in range(0, 4):
yarray[m] = yarray[m][index]
return [x, [list(yarray[0]), list(yarray[1]), list(yarray[2])]]
#----------------------------------------------------------------------------------------------
#-- plot_data: plot data --
#----------------------------------------------------------------------------------------------
def plot_data(xdata, y_set, titles, outname, y_label, y_limits):
"""
plot data
input: xdata --- x data
ydata --- y data
grating --- tile of the data
outname --- output plot file; assume it is png
output: hetg_all_focus.png, metg_all_focus.png, letg_all_focus.png
"""
#
#--- set sizes
#
fsize = 18
color = 'blue'
color2 = 'red'
marker = '.'
psize = 8
lw = 3
alpha = 0.3
width = 10.0
height = 10.0
resolution = 200
xmin = 1999
xmax = max(xdata)
diff = xmax - int(xmax)
if diff > 0.7:
xmax = int(xmax) + 2
else:
xmax = int(xmax) + 1
diff = xmax - xmin
xpos = xmin + 0.02 * diff
#
#--- close everything opened before
#
plt.close('all')
#
#--- set font size
#
mpl.rcParams['font.size'] = fsize
props = font_manager.FontProperties(size=fsize)
plt.subplots_adjust(hspace=0.08)
#
#--- set plotting range
#
for k in range(0, 3):
plt.subplots_adjust(hspace=0.08)
ymin = y_limits[k][0]
ymax = y_limits[k][1]
diff = ymax - ymin
ypos = ymax - 0.1 * diff
panel = '31' + str(k+1)
ax = plt.subplot(panel)
ax.set_autoscale_on(False)
ax.set_xbound(xmin,xmax)
ax.set_xlim(left=xmin, right=xmax, auto=False)
ax.set_ylim(bottom=ymin, top=ymax, auto=False)
plt.plot(xdata, y_set[k], color=color, marker=marker, markersize=psize, lw=0)
plt.tight_layout()
[x, y] = remove_extreme(xdata, y_set[k])
[xv, movavg, sigma, min_sv, max_sv, ym, yb, yt, y_sig] \
= mavg.find_moving_average(x, y, 1.0, 3, nodrop=0)
#
#--- plot envelopes
#
plt.plot(xv, yb, color=color2, marker=marker, markersize=0, lw=lw, alpha=alpha)
plt.plot(xv, ym, color=color2, marker=marker, markersize=0, lw=lw, alpha=alpha)
plt.plot(xv, yt, color=color2, marker=marker, markersize=0, lw=lw, alpha=alpha)
#
#--- add label
#
plt.text(xpos, ypos, titles[k], color=color)
if k == 2:
plt.xlabel('Time (year)')
else:
plt.setp(ax.get_xticklabels(), visible=False)
if k == 1:
plt.ylabel(y_label)
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(width, height)
plt.tight_layout()
plt.savefig(outname, format='png', dpi=resolution)
plt.close('all')
#----------------------------------------------------------------------------------------------
#-- remove_extreme: remove extreme data points --
#----------------------------------------------------------------------------------------------
def remove_extreme(x, y):
"""
remove extreme data points
input: x --- a list of x data
y --- a list of y data
output: [x, y]
"""
x = numpy.array(x)
y = numpy.array(y)
avg = numpy.mean(y)
sig = numpy.std(y)
bot = avg - 3.0 * sig
top = avg + 3.0 * sig
index = (y >0) & (y < 300)
x = x[index]
y = y[index]
return [x, y]
#----------------------------------------------------------------------------------------------
#-- read_focus_data: read data file and extract data needed --
#----------------------------------------------------------------------------------------------
def read_focus_data(infile):
"""
read data file and return lists of times and values
input: infile --- data file name
output: t_list --- a list of time data
c1_list --- a list of data (ax slf 10%)
c2_list --- a list of data (ax slf 50%)
c3_list --- a list of data (streak slf 10%)
c4_list --- a list of data (streak slf 50%)
c5_list --- a list of data (ax slf fwhm)
c6_list --- a list of data (streak slf fwhm)
c7_list --- a list of data (ax slf fwhm error)
c8_list --- a list of data (streak slf fwhm error)
"""
infile = data_dir + infile
print("Data: " + str(infile))
data = mcf.read_data_file(infile)
t_list = []
c1_list = []
c2_list = []
c3_list = []
c4_list = []
c5_list = []
c6_list = []
c7_list = []
c8_list = []
for ent in data:
atemp = re.split('\s+', ent)
try:
t = mcf.chandratime_to_fraq_year(float(atemp[0]))
v1 = float(atemp[1])
v2 = float(atemp[2])
v3 = float(atemp[3])
v4 = float(atemp[4])
v5 = float(atemp[5])
v6 = float(atemp[6])
v7 = float(atemp[6])
v8 = float(atemp[6])
except:
continue
t_list.append(t)
c1_list.append(v1)
c2_list.append(v2)
c3_list.append(v3)
c4_list.append(v4)
c5_list.append(v5)
c6_list.append(v6)
c7_list.append(v7)
c8_list.append(v8)
return [t_list, c1_list, c2_list, c3_list, c4_list, c5_list, c6_list, c7_list, c8_list]
#---------------------------------------------------------------------------------------------
if __name__ == "__main__":
update_focus_data_plot()
|
from .text_writer import *
|
for i in range(6, 0, -1):
print(i) |
import random
import os
import definitions
import wsdm.ts.helpers.persons.persons as p_lib
import wsdm.ts.helpers.nationalities.nationalities as nat_lib
import wsdm.ts.helpers.professions.professions as prof_lib
import wsdm.ts.helpers.train.common_train as common_train
NEGATIVE_EXAMPLES_COUNT = 10
persons = common_train.init_persons()
def init_negative_nationalities():
global persons
result = common_train.init_nationalities_empty_dict()
for nationality in result:
while len(result[nationality]) < NEGATIVE_EXAMPLES_COUNT:
person = random.choice(persons)
if person not in result[nationality] and common_train.is_nationality_negative(person, nationality):
result[nationality].append(person)
print(nationality, person)
return result
def init_positive_nationalities():
global persons
result = common_train.init_nationalities_empty_dict()
total_count = 0
while total_count < len(result) * NEGATIVE_EXAMPLES_COUNT:
person = random.choice(persons)
positive_nationality = common_train.get_positive_nationality(person)
if positive_nationality != None and person not in result[positive_nationality]:
result[positive_nationality].append(person)
total_count += 1
print(total_count, positive_nationality, person)
return result
def init_negative_professions():
global persons
result = common_train.init_professions_empty_dict()
for profession in result:
similarity_words = prof_lib.get_similarity_words(profession)
while len(result[profession]) < NEGATIVE_EXAMPLES_COUNT:
person = random.choice(persons)
if person not in result[profession] and common_train.is_profession_negative(person, profession):
result[profession].append(person)
print(profession, person)
return result
def init_positive_professions():
global persons
result = common_train.init_professions_empty_dict()
total_count = 0
while total_count < len(result) * NEGATIVE_EXAMPLES_COUNT:
person = random.choice(persons)
positive_profession = common_train.get_positive_profession(person)
if positive_profession != None and person not in result[positive_profession]:
result[positive_profession].append(person)
total_count += 1
print(total_count, positive_profession, person)
return result
if __name__ == '__main__':
positive_nationalities = init_positive_nationalities()
negative_nationalities = init_negative_nationalities()
common_train.save_train_data(positive_nationalities, negative_nationalities, os.path.join(definitions.TRAINING_DIR, "custom_nationality.train"))
positive_professions = init_positive_professions()
negative_professions = init_negative_professions()
common_train.save_train_data(positive_professions, negative_professions, os.path.join(definitions.TRAINING_DIR, "custom_profession.train"))
|
from pyelasticsearch import ElasticSearch
from pyelasticsearch.client import ConnectionError
from django.conf import settings
from collections import defaultdict
import hashlib
import math
from .base import BaseBackend
class ElasticSearchBackend(BaseBackend):
def __init__(self, es_url='http://localhost:9200/', batch_size=10, **kwargs):
"""
Do what is necessary to create/open the index.
"""
self.batch_size = batch_size
self.batch_count = 0
self.es_url = es_url
self.fast = kwargs.get('fast', False)
if kwargs.get('noisy', False):
from logging import getLogger, StreamHandler, DEBUG
import sys
logger = getLogger('pyelasticsearch')
logger.setLevel(DEBUG)
logger.addHandler(StreamHandler(sys.stdout))
self.es = ElasticSearch(self.es_url)
try:
self.es.count('*')
except ConnectionError:
print "Error connecting to ElasticSearch server!"
raise
self.urls = defaultdict(set) #track urls to be deleted before committing new content
self.batches = defaultdict(list) #site: [list of docs]
def create_index(self, name):
name = name.lower()
try:
self.es.create_index(name)
self.update_mapping(name)
except Exception, e:
print e
return
def update_mapping(self, name):
#update the ES mapping, which is roughly equivalent to a schema
mapping = {"page":
{"properties":
{"content":{"type":"string", 'boost':1.0},
"hash":{"type":"string"},
"headings":{"type":"string", 'boost':3.0},
"title":{"type":"string", 'boost': 5.0},
"site":{'type':'string', 'index':'not_analyzed'},
"url":{"type":"multi_field",
"fields": {
"url": {"type":"string", "index":"analyzed"},
"exact": {"type":"string", "index":"not_analyzed"}
}
}
}
}
}
self.es.put_mapping(name, 'page', mapping)
def add_page(self, url, title, content, site, headings='', commit=False):
"""
Adds a page to the index and commits the batch if specified.
"""
hsh = self._hash(content)
doc = {'url': url,
'site': site,
'title': title,
'content': content,
'headings':headings,
'hash':hsh}
self.urls[site].add(url)
self.batches[site].append(doc)
self.batch_count += 1
if commit or self.batch_count > self.batch_size:
self.commit()
def delete_by_url(self, site, url):
"""
Hack for inability to specify more than one key field.
"""
#@todo: When pyelasticsearch's delete_by_query works, use it here
results = self.es.search(index=site.lower(), doc_type='page', query={'query':{'term':{'url.exact':url}}})
ids = [hit['_id'] for hit in results['hits']['hits']]
for id in ids:
print "Deleting %s" % id
self.es.delete(index=site.lower(), doc_type='page', id=id)
def commit(self):
print "Committing."
if not self.fast:
#nuke things in this batch in case the content changed
for site, urls in self.urls.items():
for url in urls:
self.delete_by_url(site, url)
for site, docs in self.batches.items():
self.es.bulk_index(index=site.lower(),
doc_type='page',
docs=docs,
id_field='hash') #id_field=hash ensure uniqueness
self.batches = defaultdict(list) #reset docs
self.urls = defaultdict(set) #reset urls
self.batch_count = 0
def _hash(self, content):
content = unicode(content)
hsh = "%s" % (hashlib.sha1(content.encode('utf-8')).hexdigest())
return hsh
def search(self, query, *args, **kwargs):
"""
Performs a search and returns results.
@todo: Need to figure out how to wildcard indexes (aka sites) or otherwise provide them here.
Expected format of results:
{'pagecount': int,
'hits': [
{'title': string,
'highlights': string,
'url': string
}
]}
"""
pagenum = kwargs.pop('pagenum', 1)
per_page = kwargs.pop('pagelen', 100)
sites = kwargs.pop('sites', None)
if sites is not None:
site_names = ','.join([site.lower() for site in sites])
else:
site_names = '_all'
start = (pagenum-1)*per_page
size = per_page
es_query = {'from':start,
'size':size,
'query':
{'filtered':
{'query':
{'query_string':
{'query':query}
}
}
},
'highlight':
{'pre_tags':['<b>', '<font color="blue">'],
'post_tags':['</b>', '</font>'],
'fields':
{'content': {}}
}
}
results = self.es.search(index=site_names, query=es_query)
ret = {'hits':[]}
total_hits = results['hits']['total']
for res in results['hits']['hits']:
row = res['_source']
row['score'] = res['_score']
row['highlight'] = '... '.join([h for h in res['highlight']['content']])
ret['hits'].append(row)
ret['pagecount'] = int(math.ceil(float(total_hits) / float(per_page)))
ret['total_hits'] = total_hits
return ret
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 15 21:51:51 2016
@author: Utkarsh Rastogi
"""
from setWallpaper import setWallpaper
from bs4 import BeautifulSoup
import requests
import datetime
import urllib
import os
from time import sleep
bing_path = 'http://www.bing.com'
bing_url = 'http://www.bing.com/HPImageArchive.aspx?format=xml&idx=0&n=1&mkt=ru-RU'
def main():
try:
#extracting url of bing image
req = requests.get(bing_url)
soup = BeautifulSoup(req.text,'lxml')
soup_url = soup.find('url').text
url = bing_path + soup_url
except:
print "Connection Error. Check your connection and try again."
sleep(5)
exit()
#extracting time and setting name of bing image
time = datetime.datetime.now()
wp_name = 'bing ' + time.strftime('%d-%m-%y') + '.jpg'
#setting target directory as a folder in desktop
username = os.environ.get( "USERNAME" )
target_dir = 'C:\Users\\' + username + '\Desktop\\bing\\'
#creating target directory if not present
if not os.path.exists(target_dir):
os.makedirs(target_dir)
pic_path = target_dir + wp_name
try:
print "Downloading Bing Wallpaper."
#downloading image to set path
urllib.urlretrieve(url, pic_path)
print "Setting Bing Wallpaper as Desktop Background."
#setting downloaded pic as wallpaper
setWallpaper(pic_path)
print "Desktop Backgroung is set."
except:
print "Unable to retrieve Bing image."
sleep(5)
exit()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 14 19:30:19 2019
@author: Osama
"""
import matplotlib.pyplot as plt
import random
import os
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
def plotResults(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
def preprocessImdb(dataDirectory,
maxWords,
maxLength,
IsValidationDataNeeded=False,
trainingSamplesNo=0,
ValidationSamplesNo=0):
labels = []
texts = []
for label_type in ['neg', 'pos']:
dir_name = os.path.join(dataDirectory, label_type)
for fname in os.listdir(dir_name):
try:
if fname[-4:] == '.txt':
f = open(os.path.join(dir_name, fname), encoding="utf8")
texts.append(f.read())
f.close()
if label_type == 'neg':
labels.append(0)
else:
labels.append(1)
except:
continue
combined = list(zip(texts, labels))
random.shuffle(combined)
texts[:], labels[:] = zip(*combined)
tokenizer = Tokenizer(num_words=maxWords)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
data = pad_sequences(sequences, maxlen=maxLength)
labels = np.asarray(labels)
# indices = np.arange(data.shape[0])
# np.random.shuffle(indices)
# data = data[indices]
# labels = labels[indices]
if IsValidationDataNeeded==True:
trainingData = data[:trainingSamplesNo]
trainingLabels = labels[:trainingSamplesNo]
validationData = data[trainingSamplesNo: trainingSamplesNo + ValidationSamplesNo]
validationLabels = labels[trainingSamplesNo: trainingSamplesNo + ValidationSamplesNo]
return word_index, trainingData, trainingLabels, validationData, validationLabels
else:
return word_index, data, labels
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 23 11:17:30 2019
@author: clair
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 22 17:29:27 2019
@author: clair
"""
from imutils.video import VideoStream
import argparse
import imutils
import cv2
from imutils.video import FPS
from random import randint
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", type=str,
help="path to input video file")
ap.add_argument("-t", "--tracker", type=str, default="kcf",
help="OpenCV object tracker type")
args = vars(ap.parse_args())
trackers = {
"kcf": cv2.TrackerKCF_create,
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create
}
#tracker = trackers[args["tracker"]]()
fps = None
bboxes = []
colors = []
multiTracker = cv2.MultiTracker_create()
vs = cv2.VideoCapture(args["video"])
while True:
frame = vs.read()
frame = frame[1] if args.get("video", False) else frame
if frame is None:
break
#resize frame
frame = imutils.resize(frame, width=500)
(H, W) = frame.shape[:2]
if bboxes != []:
if len(bboxes) >= 1:
# (success, boxes) = tracker.update(frame)
# if success:
# (x, y, w, h) = [int(v) for v in boxes]
# cv2.rectangle(frame, (x,y), (x+w,y+h), colors[0], 2)
# else:
(success, boxes) = multiTracker.update(frame)
if success:
for i, box in enumerate(boxes):
(x, y, w, h) = [int(v) for v in box]
cv2.rectangle(frame, (x,y), (x+w,y+h), colors[i], 2)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("s"):
bb = cv2.selectROI("Frame", frame, fromCenter=False, showCrosshair=True)
print(bb)
bboxes.append(bb)
color = (randint(64, 255), randint(64, 255), randint(64, 255))
colors.append(color)
multiTracker.add(trackers[args["tracker"]](), frame, bb)
# tracker.init(frame, bb)
fps = FPS().start()
elif key == ord("q"):
break
if not args.get("video", False):
vs.stop()
else:
vs.release()
cv2.destroyAllWindows()
cv2.waitKey(1)
|
#!/usr/bin/env python3
# Author: John Sigmon
# Last updated: April 14, 2018 by Kate Baumli
import pandas as pd
def main():
print('Cleaning your data...')
filename = 'transcripts.csv'
target_filename = 'clean_transcripts.csv'
filepath = '../data/kaggle-data/'
df = pd.read_csv(filepath + filename)
# Remove non-ascii first
df.transcript.replace({r'[^\x00-\x7F]+':''}, regex=True, inplace=True)
patts = [".", "?", "!", "\'", "(Laughter)", "(Applause)", "(", ")", "\"", "\n ", "-", ";", ":"]
repl = ["\n", "", "Laughter\n", "Applause\n"]
# Replace everything
df['clean_transcripts'] = df.transcript.str.replace(patts[0], repl[0])
df['clean_transcripts'] = df.clean_transcripts.str.replace(patts[1], repl[0])
df['clean_transcripts'] = df.clean_transcripts.str.replace(patts[2], repl[0])
df['clean_transcripts'] = df.clean_transcripts.str.replace(patts[3], repl[1])
df['clean_transcripts'] = df.clean_transcripts.str.replace(patts[4], repl[2])
df['clean_transcripts'] = df.clean_transcripts.str.replace(patts[5], repl[3])
df['clean_transcripts'] = df.clean_transcripts.str.replace(patts[6], repl[1])
df['clean_transcripts'] = df.clean_transcripts.str.replace(patts[7], repl[1])
df['clean_transcripts'] = df.clean_transcripts.str.replace(patts[8], repl[1])
df['clean_transcripts'] = df.clean_transcripts.str.replace(patts[9], repl[1])
df['clean_transcripts'] = df.clean_transcripts.str.replace(patts[10], repl[1])
df['clean_transcripts'] = df.clean_transcripts.str.replace(patts[11], repl[1])
# Make everything lower case
df['clean_transcripts'] = df['clean_transcripts'].str.lower()
df.to_csv(filepath + target_filename)
print('Your new .csv has been written to {}'.format(filepath
+ target_filename))
if __name__ == "__main__":
main()
|
import paho.mqtt.client as mqtt
class IoT():
def __init__(self):
pass
def on_connect(self,client,userdata,flags,rc):
print('Connected with result code:'+str(rc))
client.subscribe('Publish/#')
def on_message(self,client,userdata,msg):
print(str(msg.payload.decode('utf-8')))
## print(client.publish("/Subscribe",'from pc'))
if str(msg.payload.decode('utf-8'))=='on':
print(client.publish("/Subscribe",'Lights on'))
elif str(msg.payload.decode('utf-8'))=='off':
print(client.publish("/Subscribe",'Lights off'))
if __name__=='__main__':
IoT=IoT()
client=mqtt.Client()
client.on_connect=IoT.on_connect
client.on_message=IoT.on_message
client.connect('m16.cloudmqtt.com',12939,60)
client.username_pw_set("xilebdfu","MknOzEMGsFs0")
client.loop_forever()
|
class Items(object):
def __init__(self):
self.flight_item = {}
self.product_item = {} |
# import math module
import math
# math module contains lots of pre-build functions
print("The square root of 16 is", math.sqrt(16))
# some modules contain useful constants
print("Pi is:", math.pi) |
# coding=utf-8
import unittest
from . import interface_factory
interface = interface_factory.get_interface_obj()
class ButtonTests(unittest.TestCase):
"""
These test the BM button API available in Payments Standard and up. This
is the cheapest and most direct route towards accepting payments.
"""
def test_create_button(self):
"""
Tests the creation of a simple button. This particular one is not
stored on the PayPal account.
"""
button_params = {
'BUTTONCODE': 'ENCRYPTED',
'BUTTONTYPE': 'BUYNOW',
'BUTTONSUBTYPE': 'SERVICES',
'BUYNOWTEXT': 'PAYNOW',
'L_BUTTONVAR0': 'notify_url=http://test.com',
'L_BUTTONVAR1': 'amount=5.00',
'L_BUTTONVAR2': 'item_name=Testing',
'L_BUTTONVAR3': 'item_number=12345',
}
response = interface.bm_create_button(**button_params)
self.assertEqual(response.ACK, 'Success')
|
Git is a collaboration tool that is universally used by programmers
Below are the commands in git we will be using the most:
git init - initialize a connection between that folder and git
git add - show off your updates / differences between the repos
git commit -m "message here" - add a message to this instance
git pull origin branch_name - pull down the updates/changes from a repository
git push origin branch_name - push up your changes to a repository
git checkout -b branch_name - change to a different branch on your local machine, or create a new branch on your local machine
git status - tells you what changes are in this folder and what branch you are on
common commands
ls - list the files in the folder
mkdir - make directory, make a folder
touch - make a file
pwd - print working directory, this will give you the path in your computer where you currently are
cd - change directory
cd by itself will bring you to your root folder
cd .. will bring you up one folder
cd folder_name will take you to that folder
Instance Methods
.get - "acts as is key in this dictionary"
.values - gives a list of values
.keys - gives a list of keys
.items - gives the keys and values as tuples in a list
Python's built in functions
len()
range()
split()
pop()
append()
DateTime
import datetime
from datetime import datetime
Built in functions:
zip()
recursive functions
with file_name as name_for_code:
Read Files: Identify them
.read()
.readline()
.readlines()
|
from discord import Game
from discord.ext import commands
dbot_version = "1.6"
command_prefix = '+'
startup_extensions = ["conversion", "general", "interaction", "maths", "misc", "people", "rng"]
bot = commands.Bot(command_prefix)
@bot.event
async def on_message(message):
if message.author == bot.user:
return
else:
try:
triggerinmsg, contentofmsg = message.content.split(' ', 1)
triggerinmsg_l = triggerinmsg.lower()
message.content = triggerinmsg_l + " " + contentofmsg
except ValueError:
triggerinmsg = message.content.split(' ', 1)[0]
triggerinmsg_l = triggerinmsg.lower()
message.content = triggerinmsg_l
await bot.process_commands(message)
@bot.command(hidden=True,
description="Can load additional extensions into DTbot (devs, mods and admins only)",
brief="Load an extension")
@commands.has_any_role("The Dark Lords", "Administrator", "Dbot Dev", "DTbot Dev", "Tanya")
async def load(extension_name : str):
try:
bot.load_extension(extension_name)
except (AttributeError, ImportError) as e:
await bot.say("```py\n{}: {}\n```".format(type(e).__name__, str(e)))
return
await bot.say("{} loaded.".format(extension_name))
@bot.command(hidden=True,
description="Unload an extension (devs, mods, and admins only)",
brief="Unload an extension")
@commands.has_any_role("The Dark Lords", "Administrator", "Dbot Dev", "DTbot Dev", "Tanya")
async def unload(extension_name : str):
bot.unload_extension(extension_name)
await bot.say("{} unloaded.".format(extension_name))
@bot.command(hidden=True,
description="First unload and then immediately reload a module",
brief="Reload an extension")
@commands.has_any_role("The Dark Lords", "Administrator", "Dbot Dev", "Tanya")
async def reload(extension_name : str):
bot.unload_extension(extension_name)
await bot.say("{} unloaded.".format(extension_name))
try:
bot.load_extension(extension_name)
except (AttributeError, ImportError) as e:
await bot.say("```py\n{}: {}\n```".format(type(e).__name__, str(e)))
return
await bot.say("{} loaded.".format(extension_name))
@bot.command(hidden=True,
description='Shutdown command for the bot, only usable by developer roles',
brief='Shutdown the bot')
@commands.has_any_role("Dbot Dev", "DTbot Dev", "Tanya")
async def shutdownbot(passcode: str):
if passcode == '':
# passcode not in public release
await bot.logout()
else:
pass
# online confirmation
@bot.event
async def on_ready():
await bot.change_presence(game=Game(name=command_prefix + "help (v. " + dbot_version + ")"))
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
if __name__ == "__main__":
for extension in startup_extensions:
try:
bot.load_extension(extension)
except Exception as e:
exc = '{}: {}'.format(type(e).__name__, e)
print('Failed to load extension {}\n{}'.format(extension, exc))
bot.run('')
|
# -*- coding: utf-8 -*-
from keras.models import Sequential
from keras.optimizers import SGD
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, \
merge, Reshape, Activation
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras import backend as K
from sklearn.metrics import log_loss
def conv2d_bn(x, nb_filter, nb_row, nb_col,
border_mode='same', subsample=(1, 1),
name=None):
"""
Utility function to apply conv + BN for Inception V3.
"""
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
bn_axis = 1
x = Convolution2D(nb_filter, nb_row, nb_col,
subsample=subsample,
activation='relu',
border_mode=border_mode,
name=conv_name)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name)(x)
return x
def inception_v3_model(img_rows, img_cols, channel=1, num_classes=None):
"""
Inception-V3 Model for Keras
Model Schema is based on
https://github.com/fchollet/deep-learning-models/blob/master/inception_v3.py
ImageNet Pretrained Weights
https://github.com/fchollet/deep-learning-models/releases/download/v0.2/inception_v3_weights_th_dim_ordering_th_kernels.h5
Parameters:
img_rows, img_cols - resolution of inputs
channel - 1 for grayscale, 3 for color
num_classes - number of class labels for our classification task
"""
channel_axis = 1
img_input = Input(shape=(channel, img_rows, img_cols))
x = conv2d_bn(img_input, 32, 3, 3, subsample=(2, 2), border_mode='valid')
x = conv2d_bn(x, 32, 3, 3, border_mode='valid')
x = conv2d_bn(x, 64, 3, 3)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, border_mode='valid')
x = conv2d_bn(x, 192, 3, 3, border_mode='valid')
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0, 1, 2: 35 x 35 x 256
for i in range(3):
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = merge([branch1x1, branch5x5, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(i))
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, subsample=(2, 2), border_mode='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3,
subsample=(2, 2), border_mode='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = merge([branch3x3, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed3')
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
subsample=(2, 2), border_mode='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 3, 3,
subsample=(2, 2), border_mode='valid')
branch_pool = AveragePooling2D((3, 3), strides=(2, 2))(x)
x = merge([branch3x3, branch7x7x3, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed8')
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = merge([branch3x3_1, branch3x3_2],
mode='concat', concat_axis=channel_axis,
name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = merge([branch3x3dbl_1, branch3x3dbl_2],
mode='concat', concat_axis=channel_axis)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch3x3, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(9 + i))
# Fully Connected Softmax Layer
x_fc = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)
x_fc = Flatten(name='flatten')(x_fc)
x_fc = Dense(1000, activation='softmax', name='predictions')(x_fc)
# Create model
model = Model(img_input, x_fc)
# Load ImageNet pre-trained data
model.load_weights('imagenet_models/inception_v3_weights_th_dim_ordering_th_kernels.h5')
# Truncate and replace softmax layer for transfer learning
# Cannot use model.layers.pop() since model is not of Sequential() type
# The method below works since pre-trained weights are stored in layers but not in the model
x_newfc = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)
x_newfc = Flatten(name='flatten')(x_newfc)
# x_newfc = Dense(num_classes, activation='softmax', name='predictions')(x_newfc)
x_newfc = Dense(1, activation='sigmoid', name='predictions')(x_newfc)
# Create another model with our customized softmax
model = Model(img_input, x_newfc)
# # Learning rate is changed to 0.001
# sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
return model
import keras
import cv2
import glob
import matplotlib.pyplot as plt
import numpy as np
from keras import models, optimizers, backend
from keras.layers import core, convolutional, pooling, Dropout
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
def preprocess(img):
rows, cols, _ = img.shape
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), -90, 1.4)
dst = cv2.warpAffine(img, M, (cols, rows), flags=cv2.INTER_AREA)
crop_img = dst[59:-1, :]
x = cv2.resize(crop_img, (299, 299))
return
def augment_brightness_camera_images(image):
image1 = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
image1 = np.array(image1, dtype=np.float64)
random_bright = .5 + np.random.uniform()
image1[:, :, 2] = image1[:, :, 2] * random_bright
image1[:, :, 2][image1[:, :, 2] > 255] = 255
image1 = np.array(image1, dtype=np.uint8)
image1 = cv2.cvtColor(image1, cv2.COLOR_HSV2RGB)
return image1
def preprocess_image_file_train(line_data):
image = cv2.imread(line_data)
# finding angle
jpg_index = line_data.find('.jpg')
first_index = line_data.find('--') + 3
last_index = line_data.find('--', first_index) + 2
y_steer = float(line_data[first_index:last_index - 2])
image = augment_brightness_camera_images(image)
image = preprocess(image)
image = np.array(image)
ind_flip = np.random.randint(2)
if ind_flip == 0:
image = cv2.flip(image, 1)
y_steer = -y_steer
return image, y_steer
def generate_train_from_PD_batch(data, batch_size=32):
batch_images = np.zeros((batch_size, 299, 299, 3))
batch_steering = np.zeros(batch_size)
while 1:
a = 0
while a < batch_size - 1:
i_line = np.random.randint(len(data))
line_data = data[i_line]
x, y = preprocess_image_file_train(line_data)
if (not (y < -0.1 or y > 0.1)):
if np.random.random_sample() < 0.6:
continue
else:
a = a + 1
else:
a = a + 1
batch_images[a] = x
batch_steering[a] = y
# for i_batch in range(batch_size):
# i_line = np.random.randint(len(data))
# line_data = data[i_line]
# keep_pr = 0
# #x,y = preprocess_image_file_train(line_data)
# x,y = preprocess_image_file_train(line_data)
# #x = x.reshape(1, x.shape[0], x.shape[1], x.shape[2])
# #y = np.array([[y]])
# if(not (y<-0.1 or y > 0.1):
# if(np.random.randint(1)==1):
# i_batch
# batch_images[i_batch] = x
# batch_steering[i_batch] = y
# figure()
# plt.imshow(x)
# plt.imshow()
yield batch_images, batch_steering
finalset = []
new_size_row = 320
new_size_col = 320
model = inception_v3_model(new_size_col, new_size_row, channel=3)
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='mean_squared_error', optimizer='adam')
model.summary()
for x in glob.glob('LaptopController/GroundFloor?/*'):
jpg_index = x.find('.jpg')
first_index = x.find('--') + 3
last_index = x.find('--', first_index) + 2
speed = x[last_index:jpg_index]
if speed == 's1':
angle = x[first_index:last_index - 2]
finalset.append(x)
model.fit_generator(generate_train_from_PD_batch(finalset, 64), samples_per_epoch=1200, verbose=1)
model_json = './model_small.json'
model_h5 = './model_small.h5'
model.save(model_json, model_h5)
|
line = "/file/name/
|
from django.db import models
from django.core.urlresolvers import reverse
from django.db.models.signals import post_delete
from django.dispatch import receiver
# Create your models here.
class Album(models.Model):
#user = models.ForeignKey(User, default = 1)
artist = models.CharField(max_length = 250)
title = models.CharField(max_length = 500)
year = models.CharField(max_length = 4)
logo = models.FileField()
is_favorite = models.BooleanField(default = False)
def get_absolute_url(self):
return reverse('music:detail', kwargs={'pk': self.pk})
def __str__(self):
return self.title + ' - ' + self.artist
class Song(models.Model):
album = models.ForeignKey(Album, on_delete = models.CASCADE)
title = models.CharField(max_length = 250)
file = models.FileField(default = '')
track_number = models.IntegerField(default = 1)
is_favorite = models.BooleanField(default = False)
def get_absolute_url(self):
return reverse('music:detail', kwargs={'pk': self.album.id})
def __str__(self):
return self.title
@receiver(post_delete, sender = Song)
def song_post_delete(sender, **kwargs):
song = kwargs['instance']
storage = song.file.storage
path = song.file.path
storage.delete(path)
@receiver(post_delete, sender = Album)
def album_post_delete(sender, **kwargs):
album = kwargs['instance']
storage = album.logo.storage
path = album.logo.path
storage.delete(path)
|
#!/usr/bin/env python3
'''
Label image.
Usage : ./manual_labeler.py --meme_dir=./meme_cut/ --output_dir=./output_xml/
'''
import sys
from subprocess import call
import subprocess
import json
import argparse
import io
import os
import re
from pathlib import Path
from lxml import objectify
def get_args_parser():
parser = argparse.ArgumentParser(description='Directories for processing')
parser.add_argument('-i','--meme_dir', type=str, required=True, help='Directory of a input images.')
parser.add_argument('-o','--output_dir', type=str, required=True, help='Directory of a output xml.')
parser.add_argument('-w','--overwrite', default=False, help='Overwrite xml.')
parser.add_argument('-e', '--auto_empty_label', default=False, help='Label with empty sentence automatically.')
args = parser.parse_args()
return args
def json2xml(json_obj, line_padding=""):
result_list = list()
json_obj_type = type(json_obj)
if json_obj_type is list:
for sub_elem in json_obj:
result_list.append(json2xml(sub_elem, line_padding))
return "\n".join(result_list)
if json_obj_type is dict:
for tag_name in json_obj:
sub_obj = json_obj[tag_name]
result_list.append("%s<%s>" % (line_padding, tag_name))
result_list.append(json2xml(sub_obj, "\t" + line_padding))
result_list.append("%s</%s>" % (line_padding, tag_name))
return "\n".join(result_list)
return "%s%s" % (line_padding, json_obj)
def rm_gbg_from_xml(value):
value = str(value).replace('\t','').replace('\n','')
value = re.search(r'\s{0,}(.*)', value).group(1)
return value
def run_tagger(args):
in_dir = os.path.abspath(args.meme_dir) + '/'
out_dir = os.path.abspath(args.output_dir) + '/'
overwrite_flag = args.overwrite
auto_flag = args.auto_empty_label
if not os.path.exists(out_dir):
os.makedirs(out_dir)
episodes = os.listdir(in_dir)
episodes.sort()
# iterate meme dir.
for episode in episodes:
# xml episode folders should not have whitespace in name.
images = os.listdir(str(in_dir)+'/'+str(episode))
epi_name = episode.replace(' ', '_')
if not os.path.exists(out_dir+'/'+epi_name):
os.makedirs(out_dir +'/'+epi_name)
if episode == '.ipynb_checkpoints':
continue
print('\n## Episode : ',episode)
images.sort()
for image in images:
path = in_dir +episode+ '/' +image
if not path.lower().endswith(('.png', '.jpg', '.jpeg')):
continue
x_path = out_dir + epi_name +'/'+ image
pre, ext = os.path.splitext(x_path)
x_path = pre + '.xml'
xml_file = Path(x_path)
ori_txt = ''
if xml_file.exists():
if not overwrite_flag:
print('xml already exist : %s ' %( x_path.rsplit('/',1)[1]))
continue
else :
with open(x_path,'r') as f:
xml_str = f.read()
if xml_str:
xml_root = objectify.fromstring(xml_str)
ori_txt = rm_gbg_from_xml(xml_root['object']['name'])
print('Label -> %s, \noriginal label : [%s]\n: ' %(image, ori_txt) , end='')
res_txt = None
if not auto_flag:
try:
res_txt = input()
if res_txt == '0':
print('skipped.')
continue
elif res_txt == '9':
print('episode skipped.')
break
res_txt = re.sub(r'\t{1,}', ' ', res_txt)
res_txt = re.sub(r'\n{1,}', ' ', res_txt)
res_txt = re.sub(r'\s{1,}', ' ', res_txt)
res_txt = re.search(r'\s{0,}(.*)', res_txt).group(1)
except KeyboardInterrupt:
print('\n## Cancled : %s ' %(epi_name+'/' +x_path.rsplit('/',1)[1]))
sys.exit()
else:
print('Auto empty labeling')
print('label :[%s] ' %(res_txt), end='')
with open(x_path, 'w') as f:
s = '{"annotation" : {"folder" : "'+ episode +'", "filename" : "'+ image +'", "segmented": 0, "object" : {"name" : "'+res_txt+'", "pose" : "Unspecified", "truncated" : 0, "occluded" : 0, "difficult" : 0, "vector" : 0} }}'
j = json.loads(s)
f.write(json2xml(j))
f.close()
print('saved.')
def main():
print("\n# Manual labeler \n\n Input '0' to skip label.")
print(" '9' to skip episode.\n Ctrl+c to cancel.")
args = get_args_parser()
run_tagger(args) # xml
print('\nLabeling done.')
print('overwrite mode : %s' %(args.overwrite))
print('Labeling & Generate .xml done.\n')
if __name__ == '__main__':
main()
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
import os
from textwrap import dedent
from typing import Tuple
import pytest
from pants.backend.codegen.protobuf.python import additional_fields as protobuf_additional_fields
from pants.backend.codegen.protobuf.python.python_protobuf_module_mapper import (
rules as protobuf_module_mapper_rules,
)
from pants.backend.codegen.protobuf.python.python_protobuf_subsystem import (
rules as protobuf_subsystem_rules,
)
from pants.backend.codegen.protobuf.python.rules import rules as protobuf_python_rules
from pants.backend.codegen.protobuf.target_types import ProtobufSourcesGeneratorTarget
from pants.backend.codegen.protobuf.target_types import rules as protobuf_target_types_rules
from pants.backend.python import target_types_rules
from pants.backend.python.dependency_inference import rules as dependency_inference_rules
from pants.backend.python.goals import package_dists
from pants.backend.python.goals.run_python_source import PythonSourceFieldSet
from pants.backend.python.goals.run_python_source import rules as run_rules
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.target_types import (
PythonDistribution,
PythonRequirementTarget,
PythonSourcesGeneratorTarget,
)
from pants.backend.python.util_rules import local_dists, pex_from_targets
from pants.build_graph.address import Address
from pants.core.goals.run import RunDebugAdapterRequest, RunRequest
from pants.engine.process import InteractiveProcess
from pants.engine.rules import QueryRule
from pants.engine.target import Target
from pants.testutil.debug_adapter_util import debugadapter_port_for_testing
from pants.testutil.pants_integration_test import run_pants
from pants.testutil.python_rule_runner import PythonRuleRunner
from pants.testutil.rule_runner import mock_console
@pytest.fixture
def rule_runner() -> PythonRuleRunner:
return PythonRuleRunner(
rules=[
*run_rules(),
*dependency_inference_rules.rules(),
*target_types_rules.rules(),
*local_dists.rules(),
*pex_from_targets.rules(),
*package_dists.rules(),
*protobuf_subsystem_rules(),
*protobuf_target_types_rules(),
*protobuf_python_rules(),
*protobuf_additional_fields.rules(),
*protobuf_module_mapper_rules(),
QueryRule(RunRequest, (PythonSourceFieldSet,)),
QueryRule(RunDebugAdapterRequest, (PythonSourceFieldSet,)),
],
target_types=[
ProtobufSourcesGeneratorTarget,
PythonSourcesGeneratorTarget,
PythonRequirementTarget,
PythonDistribution,
],
objects={"python_artifact": PythonArtifact},
)
def run_run_request(
rule_runner: PythonRuleRunner,
target: Target,
test_debug_adapter: bool = True,
) -> Tuple[int, str, str]:
run_request = rule_runner.request(RunRequest, [PythonSourceFieldSet.create(target)])
run_process = InteractiveProcess(
argv=run_request.args,
env=run_request.extra_env,
input_digest=run_request.digest,
run_in_workspace=True,
immutable_input_digests=run_request.immutable_input_digests,
append_only_caches=run_request.append_only_caches,
)
with mock_console(rule_runner.options_bootstrapper) as mocked_console:
result = rule_runner.run_interactive_process(run_process)
stdout = mocked_console[1].get_stdout()
stderr = mocked_console[1].get_stderr()
if test_debug_adapter:
debug_adapter_request = rule_runner.request(
RunDebugAdapterRequest, [PythonSourceFieldSet.create(target)]
)
debug_adapter_process = InteractiveProcess(
argv=debug_adapter_request.args,
env=debug_adapter_request.extra_env,
input_digest=debug_adapter_request.digest,
run_in_workspace=True,
immutable_input_digests=debug_adapter_request.immutable_input_digests,
append_only_caches=debug_adapter_request.append_only_caches,
)
with mock_console(rule_runner.options_bootstrapper) as mocked_console:
debug_adapter_result = rule_runner.run_interactive_process(debug_adapter_process)
assert debug_adapter_result.exit_code == result.exit_code, mocked_console[
1
].get_stderr()
return result.exit_code, stdout, stderr
@pytest.mark.parametrize(
"global_default_value, field_value, run_uses_sandbox",
[
# Nothing set -> True
(None, None, True),
# Field set -> use field value
(None, True, True),
(None, False, False),
# Global default set -> use default
(True, None, True),
(False, None, False),
# Both set -> use field
(True, True, True),
(True, False, False),
(False, True, True),
(False, False, False),
],
)
def test_run_sample_script(
global_default_value: bool | None,
field_value: bool | None,
run_uses_sandbox: bool,
rule_runner: PythonRuleRunner,
) -> None:
"""Test that we properly run a `python_source` target.
This checks a few things:
- We can handle source roots.
- We run in-repo when requested, and handle codegen correctly.
- We propagate the error code.
"""
sources = {
"src_root1/project/app.py": dedent(
"""\
import sys
from utils.strutil import my_file
from codegen.hello_pb2 import Hi
def main():
print("Hola, mundo.", file=sys.stderr)
print(my_file())
sys.exit(23)
if __name__ == "__main__":
main()
"""
),
"src_root1/project/BUILD": dedent(
f"""\
python_sources(
{("run_goal_use_sandbox=" + str(field_value)) if field_value is not None else ""}
)
"""
),
"src_root2/utils/strutil.py": dedent(
"""\
import os.path
def my_file():
return os.path.abspath(__file__)
"""
),
"src_root2/utils/BUILD": "python_sources()",
"src_root2/codegen/hello.proto": 'syntax = "proto3";\nmessage Hi {string name = 1;}',
"src_root2/codegen/BUILD": dedent(
"""\
protobuf_sources()
python_requirement(name='protobuf', requirements=['protobuf'])
"""
),
}
rule_runner.write_files(sources)
args = [
"--backend-packages=pants.backend.python",
"--backend-packages=pants.backend.codegen.protobuf.python",
"--source-root-patterns=['src_root1', 'src_root2']",
f"--debug-adapter-port={debugadapter_port_for_testing()}",
*(
(
"--python-default-run-goal-use-sandbox"
if global_default_value
else "--no-python-default-run-goal-use-sandbox",
)
if global_default_value is not None
else ()
),
]
rule_runner.set_options(args, env_inherit={"PATH", "PYENV_ROOT", "HOME"})
target = rule_runner.get_target(Address("src_root1/project", relative_file_path="app.py"))
exit_code, stdout, stderr = run_run_request(rule_runner, target)
assert "Hola, mundo.\n" in stderr
file = stdout.strip()
if run_uses_sandbox:
assert file.endswith("src_root2/utils/strutil.py")
assert "pants-sandbox-" in file
else:
assert file == os.path.join(rule_runner.build_root, "src_root2/utils/strutil.py")
assert exit_code == 23
def test_no_strip_pex_env_issues_12057(rule_runner: PythonRuleRunner) -> None:
sources = {
"src/app.py": dedent(
"""\
import os
import sys
if __name__ == "__main__":
exit_code = os.environ.get("PANTS_ISSUES_12057")
if exit_code is None:
os.environ["PANTS_ISSUES_12057"] = "42"
os.execv(sys.executable, [sys.executable, *sys.argv])
sys.exit(int(exit_code))
"""
),
"src/BUILD": dedent(
"""\
python_sources()
"""
),
}
rule_runner.write_files(sources)
args = [
"--backend-packages=pants.backend.python",
"--source-root-patterns=['src']",
]
rule_runner.set_options(args, env_inherit={"PATH", "PYENV_ROOT", "HOME"})
target = rule_runner.get_target(Address("src", relative_file_path="app.py"))
exit_code, _, stderr = run_run_request(rule_runner, target, test_debug_adapter=False)
assert exit_code == 42, stderr
@pytest.mark.parametrize("run_in_sandbox", [False, True])
def test_pex_root_location(rule_runner: PythonRuleRunner, run_in_sandbox: bool) -> None:
# See issues #12055 and #17750.
read_config_result = run_pants(["help-all"])
read_config_result.assert_success()
config_data = json.loads(read_config_result.stdout)
global_advanced_options = {
option["config_key"]: [
ranked_value["value"] for ranked_value in option["value_history"]["ranked_values"]
][-1]
for option in config_data["scope_to_help_info"][""]["advanced"]
}
named_caches_dir = global_advanced_options["named_caches_dir"]
sources = {
"src/app.py": "import os; print(__file__ + '\\n' + os.environ['PEX_ROOT'])",
"src/BUILD": dedent(
f"""\
python_sources(run_goal_use_sandbox={run_in_sandbox})
"""
),
}
rule_runner.write_files(sources)
args = [
"--backend-packages=pants.backend.python",
"--source-root-patterns=['src']",
]
rule_runner.set_options(args, env_inherit={"PATH", "PYENV_ROOT", "HOME"})
target = rule_runner.get_target(Address("src", relative_file_path="app.py"))
exit_code, stdout, _ = run_run_request(rule_runner, target, test_debug_adapter=False)
assert exit_code == 0
app_file, pex_root = stdout.splitlines(keepends=False)
sandbox = os.path.dirname(os.path.dirname(app_file))
expected_pex_root = (
os.path.join(sandbox, ".", ".cache", "pex_root")
if run_in_sandbox
else os.path.join(named_caches_dir, "pex_root")
)
assert expected_pex_root == pex_root
def test_local_dist(rule_runner: PythonRuleRunner) -> None:
sources = {
"foo/bar.py": "BAR = 'LOCAL DIST'",
"foo/setup.py": dedent(
"""\
from setuptools import setup
setup(name="foo", version="9.8.7", packages=["foo"], package_dir={"foo": "."},)
"""
),
"foo/main.py": "from foo.bar import BAR; print(BAR)",
"foo/BUILD": dedent(
"""\
python_sources(name="lib", sources=["bar.py", "setup.py"])
python_distribution(
name="dist",
dependencies=[":lib"],
provides=python_artifact(name="foo", version="9.8.7"),
sdist=False,
generate_setup=False,
)
python_sources(
sources=["main.py"],
# Force-exclude any dep on bar.py, so the only way to consume it is via the dist.
dependencies=[":dist", "!:lib"],
)
"""
),
}
rule_runner.write_files(sources)
args = [
"--backend-packages=pants.backend.python",
"--source-root-patterns=['/']",
]
rule_runner.set_options(args, env_inherit={"PATH", "PYENV_ROOT", "HOME"})
target = rule_runner.get_target(Address("foo", relative_file_path="main.py"))
exit_code, stdout, stderr = run_run_request(rule_runner, target)
assert exit_code == 0
assert stdout == "LOCAL DIST\n", stderr
def test_runs_in_venv(rule_runner: PythonRuleRunner) -> None:
# NB: We aren't just testing an implementation detail, users can and should expect their code to
# be run just as if they ran their code in a virtualenv (as is common in the Python ecosystem).
sources = {
"src/app.py": dedent(
"""\
import os
import sys
if __name__ == "__main__":
sys.exit(0 if "VIRTUAL_ENV" in os.environ else 1)
"""
),
"src/BUILD": dedent(
"""\
python_sources()
"""
),
}
rule_runner.write_files(sources)
args = [
"--backend-packages=pants.backend.python",
"--source-root-patterns=['src']",
]
rule_runner.set_options(args, env_inherit={"PATH", "PYENV_ROOT", "HOME"})
target = rule_runner.get_target(Address("src", relative_file_path="app.py"))
exit_code, stdout, _ = run_run_request(rule_runner, target)
assert exit_code == 0, stdout
|
#!/usr/bin/python
# coding=UTF-8
import smtplib
from email.mime.text import MIMEText
msg_Sender = '3522271681@qq.com' #发送方邮箱
msg_code = 'aymhwvxiumgjdbcj' #发送方邮箱的授权码
msg_Receiver = '7277710@qq.com' #收件人邮箱
subject = "python邮件测试" #主题
content = "这是我使用python smtplib及email模块发送的邮件sss" #正文
msg = MIMEText(content,_charset="utf-8")
msg['Subject'] = subject
msg['From'] = msg_Sender
msg['To'] = msg_Receiver
try:
s = smtplib.SMTP_SSL("smtp.qq.com",465) #邮件服务器及端口号
s.login(msg_Sender, msg_code)
s.sendmail(msg_Sender, msg_Receiver, msg.as_string())
print "发送成功"
# except s.SMTPException,e:
# print "发送失败"
finally:
s.quit()
|
import shelve
import boxscore
# Data layout in rawData:
#
# { game_id: game_data }
# game_data = (player_data, goalie_data)
# player_data = [#, name, pos, G, A, P, +-, PIM, S, Hits, BKS, GVA, TKA, FO%, PPTOI, SHTOI, TOI]
#
shelf_name = 'raw-redwings-data'
#Adds the data to the dictionary stored in shelved-stats
def updateShelf(game_id, newData):
d = shelve.open(shelf_name)
data = d['raw']
#Add the new data to the dictionary and store it
data[game_id] = newData
d['raw'] = data
d.close()
def initData():
ids = boxscore.getAllBoxScoreIds()
rawData = {}
for game_id in ids:
rawData[int(game_id)] = boxscore.getBoxScoreData(game_id)
# store the data
d = shelve.open(shelf_name)
d['raw'] = rawData
d.close()
def getRawData():
d = shelve.open(shelf_name)
rawData = d['raw']
d.close()
return rawData
def main():
rawData = getRawData()
print(rawData['2015020014'])
# initData()
if __name__ == '__main__':
main() |
# Representation of a fluid quantity which has an ion index.
import matplotlib.pyplot as plt
from . FluidQuantity import FluidQuantity
from . UnknownQuantity import UnknownQuantity
class IonSpeciesFluidQuantity(UnknownQuantity):
def __init__(self, name, data, attr, grid, output):
"""
Constructor.
"""
super().__init__(name=name, data=data, attr=attr, grid=grid, output=output)
self.attr = attr
self.ions = output.ionmeta
def __repr__(self):
"""
Convert this object to an "official" string.
"""
s = self.__str__()
if hasattr(self, 'description') and hasattr(self, 'description_eqn'):
s += "\n:: {}\n:: Evolved using: {}\n".format(self.description, self.description_eqn)
return s
def __str__(self):
"""
Convert this object to a string.
"""
if self.data.ndim == 3:
nt, ni, nr = self.data.shape
else:
ni, nt, nr = 1, *self.data.shape
s = '({}) Ion species fluid quantity of size NI x NT x NR = {} x {} x {}\n'.format(self.name, ni, nt, nr)
for i in range(len(self.ions.Z)):
s += " {:2s} (Z = {:3d})\n".format(*self.ions[i])
return s
def __getitem__(self, name):
"""
Direct access to data.
"""
idx = self.ions.getIndex(name)
if self.data.ndim == 3:
data = self.data[:,idx,:]
else:
data = self.data[:]
return FluidQuantity(name='{}_{}'.format(self.name, name), data=data, grid=self.grid, output=self.output, attr=self.attr)
def dumps(self, ion=None, r=None, t=None):
"""
Print the data in this quantity.
"""
return self.get(ion=ion, r=r, t=t).__str__()
def get(self, ion=None, r=None, t=None):
"""
Returns data for the specified ion, or in the specified time
interval or radial point. If none of the indices are given, returns
the full evolution of the quantity.
"""
sion = ion if ion is not None else slice(None)
sr = r if r is not None else slice(None)
st = t if t is not None else slice(None)
if self.data.ndim == 3:
return self.data[st,sion,sr]
else:
return self.data[st,sr]
def plot(self, ion=None, ax=None, show=None, r=None, t=None, *args, **kwargs):
"""
Plot data for all members of this IonSpeciesFluidQuantity in the
same figure.
"""
# Prevent trying to plot multiple 2D plots in the same window...
if ion is None:
if (r is None and self.grid.r.size != 1) and (t is None):
raise Exception('Cannot plot ion temperature for all ions simultaneously when nr > 1.')
if ion is not None:
q = self[ion]
ax = q.plot(ax=ax, show=show, r=r, t=t, *args, **kwargs)
else:
for i in self.ions.names:
q = self[i]
ax = q.plot(ax=ax, show=show, r=r, t=t, label=i, *args, **kwargs)
plt.legend()
return ax
|
# Starts Angry Birds
s = find("1315859303639.png")
doubleClick(s)
#Clicks the play button
play = "PTIJW.png"
wait(play, 20)
click(play)
# Selects Game Region
r = selectRegion("Select the app area")
r.highlight(2)
wait(5)
# Starts level 1-5
click(r.find("1320348602390.png"))
wait(5)
click(r.find("1320348830291.png"))
wait("1315865530307.png", FOREVER)
pause = r.find("1315936491114.png")
tries = 1
# Main game loop
for x in range(60, 300):
print tries
# Fling the bird
bird = r.find("1318266373260.png")
dragDrop(bird, [bird.x - 120, bird.y + x])
wait(2)
wait(Pattern("ml.png").similar(0.87), 25)
wait(5)
# wait(25)
if exists("1315939723295.png"):
click(r.find("1315939750314.png"))
tries = tries + 1
if exists("HJ9EL.png"):
popup("Winner! - after " + str(tries) + " tries")
exit() |
valores = []
while True:
v = int(input('Digite um valor: '))
resp = str(input('Quer continuar? [S/N] ')).lower().strip()[0]
valores.append(v)
while 's' not in resp and 'n' not in resp:
resp = str(input('Quer continuar? [S/N] ')).lower().strip()[0]
if resp in 'n':
break
print('-=' * 30)
print(f'Você digitou {len(valores)} elementos.')
print(f'Os valores em ordem decrescente são {sorted(valores, reverse=True)}')
print('O valor 5 ', end='')
if 5 in valores:
pos = (valores.index(5)) + 1
print(f'é o {pos}ª valor da lista!')
else:
print('não se encontra na lista!')
|
from sys import argv
from scss import Compiler
def compile_scss_file(filename: str) -> None:
assert '.scss' in filename, filename
with open(filename.replace('.scss', ''), 'wt') as fh:
fh.write(
Compiler().compile_string(open('static/style.css.scss', 'rb').read()),
)
if __name__ == '__main__':
compile_scss_file(argv[1])
|
'''
Create a program that asks the user for a number and then prints out a list of all the divisors of that number.
(If you don’t know what a divisor is, it is a number that divides evenly into another number. For example,
13 is a divisor of 26 because 26 / 13 has no remainder.)
'''
number = int(input('Please insert a number: '))
divisor_list = []
for num in range(1, (int(number/2)+1)):
if number % num == 0:
divisor_list.append(num)
print('The divisors of ' + str(number) + ' are: ', divisor_list) |
#!/usr/bin/python
from pymongo import MongoClient
from random import randint
from http.server import BaseHTTPRequestHandler, HTTPServer
import os
from azure.keyvault import KeyVaultClient
from msrestazure.azure_active_directory import MSIAuthentication, ServicePrincipalCredentials
from azure.mgmt.resource import ResourceManagementClient, SubscriptionClient
#url=os.environ['MONGODB_URI']
KEY_VAULT_URI = "https://angelocontinodevqy2a.vault.azure.net/"
def get_key_vault_credentials():
"""This tries to get a token using MSI, or fallback to SP env variables.
"""
if "APPSETTING_WEBSITE_SITE_NAME" in os.environ:
return MSIAuthentication(
resource='https://vault.azure.net'
)
"""
resource='https://vault.azure.net'
credentials = MSIAuthentication()
client_id = '2bcc8ac7-57c0-4422-8cc8-a7153eafa0e6'
subscription_client = SubscriptionClient(credentials)
subscription = next(subscription_client.subscriptions.list())
subscription_id = subscription.subscription_id
resource_client = ResourceManagementClient(credentials, subscription_id)
return credentials """
else:
return ServicePrincipalCredentials(
client_id=os.environ['AZURE_CLIENT_ID'],
secret=os.environ['AZURE_CLIENT_SECRET'],
tenant=os.environ['AZURE_TENANT_ID'],
resource='https://vault.azure.net'
)
def get_secret():
"""MSI Authentication example."""
# Get credentials
credentials = get_key_vault_credentials()
print (credentials)
# Create a KeyVault client
key_vault_client = KeyVaultClient(
credentials
)
key_vault_uri = os.environ.get("KEY_VAULT_URI", KEY_VAULT_URI)
secret = key_vault_client.get_secret(
key_vault_uri, # Your KeyVault URL
"cosmostest", # Name of your secret. If you followed the README 'secret' should exists
"" # The version of the secret. Empty string for latest
)
#print secret.value
#print secret
return secret.value
url=get_secret()
print (url)
PORT_NUMBER = 8080
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
"""Handler for GET requests"""
if self.path=="/":
self.send_response(200)
self.send_header('Content-type','image/png')
self.end_headers()
self.wfile.write(bytes("ROOT\n",'UTF-8'))
print ("ROOT")
if self.path=="/mongo":
self.send_response(200)
self.send_header('Content-type','image/png')
self.end_headers()
self.wfile.write(bytes("MONGODB\n",'UTF-8'))
print ("MONGO")
client = MongoClient(url)
print ("Client:"+str(client))
db=client.business
#Step 2: Create sample data
names = ['Kitchen','Animal','State', 'Tastey', 'Big','City','Fish', 'Pizza','Goat', 'Salty','Sandwich','Lazy', 'Fun']
company_type = ['LLC','Inc','Company','Corporation']
company_cuisine = ['Pizza', 'Bar Food', 'Fast Food', 'Italian', 'Mexican', 'American', 'Sushi Bar', 'Vegetarian']
for x in range(1, 501):
business = {
'name' : names[randint(0, (len(names)-1))] + ' ' + names[randint(0, (len(names)-1))] + ' ' + company_type[randint(0, (len(company_type)-1))],
'rating' : randint(1, 5),
'cuisine' : company_cuisine[randint(0, (len(company_cuisine)-1))]
}
#Step 3: Insert business object directly into MongoDB via insert_one
result=db.reviews.insert_one(business)
#Step 4: Print to the console the ObjectID of the new document
print('Created {0} of 100 as {1}'.format(x,result.inserted_id))
#Step 5: Tell us that you are done
print('finished creating 100 business reviews')
if self.path=="/dropdb":
self.wfile.write(bytes("DROPDB\n",'UTF-8'))
print ("dropdb")
client = MongoClient(url)
db = client.business
mycol = db["reviews"]
mycol.drop()
try:
server = HTTPServer(('', PORT_NUMBER), MyHandler)
print('Started httpserver on port', PORT_NUMBER)
server.serve_forever()
except KeyboardInterrupt:
server.server_close()
print('Stopping server')
|
import time
def now():
return time.strftime(time.localtime(time.time()))
def time2str(t):
return t.strftime("%Y-%m-%d %H:%M:%S")
def str2date(str):
return time.strptime(str, '%Y-%m-%d')
|
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse_lazy
from django.views.generic import CreateView, ListView, UpdateView, DeleteView
from django.views.generic.dates import MonthArchiveView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import *
from .forms import *
#####################CREACION DE LOS POSTS###############################
#INICIO: PostsViews
class PostCreateView(LoginRequiredMixin, CreateView): #entra heredando el createView y carga el formulario de forms.py y lo manda a post_Create.html
model = Post
form_class = PostForm
template_name = 'posts/post_create.html'
success_url = reverse_lazy('home')
def form_valid(self,form):
form.instance.author = self.request.user
return super().form_valid(form)
def CategoryView(request, cats):
category_posts = Post.objects.filter(categoria=cats.replace('-', ' ')).order_by('-fecha_publicacion')
return render(request, 'posts/categorias.html', {'cats':cats.title().replace('-', ' '), 'category_posts':category_posts})
def FechasView(request):
model = Post
fechas = Post.objects.filter(fecha_publicacion__lte=timezone.now()).order_by('fecha_publicacion')
context = super(CategoryView, self).get_context_data(*args, **kwargs)
context["fecha"] = FormFecha
return render(context, request, 'posts/categorias.html', {'posts': posts})
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# context["fecha"] = FormFecha
# return context
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'posts/post_detail.html', {'post': post})
class PostUpdateView(UpdateView):
form_class = PostForm
model = Post
template_name = 'posts/post_create.html'
success_url = reverse_lazy('home')
def form_valid(self,form):
form.instance.author = self.request.user
return super().form_valid(form)
class PostDeleteView(DeleteView):
model = Post
template_name = 'posts/post_confirm_delete.html'
success_url = reverse_lazy('home')
#FIN: PostsViews
#-------------------------------------------------------------
#INICIO: CommentsViews
class AddCommentView(CreateView):
model = Comentarios
form_class = CommentForm
template_name = 'posts/add_comment.html'
success_url = reverse_lazy('home')
def form_valid(self,form):
form.instance.user = self.request.user
form.instance.post_id = self.kwargs['pk']
return super().form_valid(form)
class UpDateCommentView(UpdateView):
form_class = CommentForm
model = Comentarios
template_name = 'posts/add_comment.html'
success_url = reverse_lazy('home')
def form_valid(self,form):
form.instance.user = self.request.user
return super().form_valid(form)
class DelCommentView(DeleteView):
model = Comentarios
template_name = 'posts/comment_delete.html'
success_url = reverse_lazy('home')
#FIN: CommentsViews
#-------------------------------------------------------------
#INICIO: Filtro FechasView
class PostMonthArchiveView(MonthArchiveView):
queryset = Post.objects.all()
date_field = "pub_date"
allow_future = True
#INICIO: Filtro FechasView
|
import boke
import timeit
def time_estimator(data,
function,
units='seconds',
sampling_fraction=1000):
'''
Estimates the time it takes to perform a given function
with a given dataset.
'''
# load the method
method_to_call = getattr(boke, function)
sampling_fraction = sampling_fraction
estimate_corrector = 1.1
sample_size = round(len(data) / sampling_fraction)
start_time = timeit.default_timer()
out = method_to_call(data[:sample_size])
end_time = timeit.default_timer()
estimate_seconds = (end_time - start_time) * sampling_fraction
estimated_time = round(estimate_seconds * estimate_corrector, -1)
if units is 'minutes':
estimated_time = estimated_time / 60
print("It will take roughly %d minutes" % estimated_time)
else:
print("It will take roughly %d seconds" % estimated_time)
|
import asyncio
def int2bytes(x):
return bytes.fromhex(f'{x:x}')
def bytes2int(x):
return int(x.hex(), 16)
def run(*args):
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*args))
|
from rest_framework import serializers
from authentication.models import FriendShip
from authentication.serializers import CustomUserSerializer
from movie.models import Movie, Genre, Director, Actor, UseService, Service, Review, Mark, Quote, Watcher
from functools import reduce
class GenreSerialize(serializers.ModelSerializer):
class Meta:
model = Genre
fields = '__all__'
class DirectorSerializer(serializers.ModelSerializer):
class Meta:
model = Director
fields = '__all__'
class ActorSerializers(serializers.ModelSerializer):
class Meta:
model = Actor
fields = '__all__'
class MovieListSerializer(serializers.ModelSerializer):
genres = serializers.SlugRelatedField(slug_field='name', read_only=True, many=True)
rating = serializers.SerializerMethodField()
class Meta:
model = Movie
fields = ['id', 'title', 'slug', 'posterUrl', 'genres', 'year', 'rating', 'country']
def get_rating(self, instance):
marks = Mark.objects.filter(movie=instance)
return f'{reduce(lambda pm, m: pm + m.value, marks, 0) / len(marks):.1f}' if len(marks) > 0 else '0.0'
class MovieSerializer(serializers.ModelSerializer):
director = DirectorSerializer(read_only=True)
company = serializers.SlugRelatedField(slug_field='name', read_only=True)
genres = GenreSerialize(read_only=True, many=True)
actors = ActorSerializers(read_only=True, many=True)
rating = serializers.SerializerMethodField()
class Meta:
model = Movie
fields = ['id', 'director', 'company', 'genres', 'actors', 'title', 'slug',
'description', 'short_description', 'posterUrl', 'year', 'country', 'trailer', 'age', 'rating']
def get_rating(self, instance):
marks = Mark.objects.filter(movie=instance)
return f'{reduce(lambda pm, m: pm + m.value, marks, 0) / len(marks):.1f}' if len(marks) > 0 else '0.0'
class ServiceSerializer(serializers.ModelSerializer):
class Meta:
model = Service
fields = ['name', 'logo']
class ServicesSerializer(serializers.ModelSerializer):
service = ServiceSerializer(read_only=True)
class Meta:
model = UseService
fields = ['id', 'service', 'type', 'link', 'money']
class CreateQuoteListSerializer(serializers.ModelSerializer):
author = CustomUserSerializer(read_only=True)
type = serializers.SerializerMethodField()
class Meta:
model = Quote
fields = ['id', 'hero', 'author', 'content', 'type', 'permissions']
def get_type(self, instance):
return 'self'
class ReviewListSerializer(serializers.ModelSerializer):
author = CustomUserSerializer(read_only=True)
date = serializers.DateField(format="%d.%m.%Y")
rating = serializers.SerializerMethodField()
type = serializers.SerializerMethodField()
class Meta:
model = Review
fields = ['id', 'title', 'author', 'date', 'content', 'rating', 'type', 'permissions']
def get_rating(self, instance):
try:
return Mark.objects.get(movie=instance.movie, user=instance.author).value
except:
return None
def get_type(self, instance):
if not self.context['request'].user.is_authenticated:
return 'default'
if self.context['request'].user == instance.author:
return 'self'
if FriendShip.objects.filter(sender=self.context['request'].user,
dester=instance.author,
status=1).first() or \
FriendShip.objects.filter(
sender=instance.author,
dester=self.context['request'].user,
status=1).first():
return 'friend'
return 'default'
class QuoteListSerializer(serializers.ModelSerializer):
author = CustomUserSerializer(read_only=True)
date = serializers.DateField(format="%d.%m.%Y")
type = serializers.SerializerMethodField()
class Meta:
model = Quote
fields = ['id', 'author', 'hero', 'date', 'content', 'type', 'permissions']
def get_type(self, instance):
if not self.context['request'].user.is_authenticated:
return 'default'
if self.context['request'].user == instance.author:
return 'self'
if FriendShip.objects.filter(sender=self.context['request'].user,
dester=instance.author,
status=1).first() or \
FriendShip.objects.filter(
sender=instance.author,
dester=self.context['request'].user,
status=1).first():
return 'friend'
return 'default'
class WatcherListSerializer(serializers.ModelSerializer):
user = CustomUserSerializer(read_only=True)
status = serializers.SerializerMethodField()
movies = serializers.SerializerMethodField()
rating = serializers.SerializerMethodField()
class Meta:
model = Watcher
fields = ['id', 'user', 'status', 'movies', 'rating']
def get_status(self, instance):
if not self.context['request'].user.is_authenticated:
return 0
if instance.user == self.context['request'].user:
return -1
if FriendShip.objects.filter(sender=self.context['request'].user,
dester=instance.user).exists() or \
FriendShip.objects.filter(
sender=instance.user,
dester=self.context['request'].user).exists():
return 1
return 0
def get_movies(self, instance):
user_watchs = Watcher.objects.filter(user=instance.user)
match = 0
if self.context['request'].user.is_authenticated:
for movie in user_watchs:
if Watcher.objects.filter(movie=movie.movie, user=self.context['request'].user).exists():
match += 1
return {
'all': user_watchs.count(),
'match': match
}
def get_rating(self, instance):
try:
return Mark.objects.get(user=instance.user, movie=instance.movie).value
except:
return 0
class MovieCommonSerializer(serializers.Serializer):
info = MovieSerializer(read_only=True)
services = ServicesSerializer(read_only=True, many=True)
rating = serializers.IntegerField()
review = serializers.BooleanField()
watched = serializers.BooleanField()
|
import boto3
import hashlib
import logging
from botocore.exceptions import ClientError
from cfn_resource_provider import ResourceProvider
from cryptography.hazmat.backends import default_backend as crypto_default_backend
from cryptography.hazmat.primitives import serialization as crypto_serialization
from cryptography.hazmat.primitives.asymmetric import rsa
import ssm_parameter_name
log = logging.getLogger()
request_schema = {
"type": "object",
"required": ["Name"],
"properties": {
"Name": {
"type": "string",
"minLength": 1,
"pattern": "[a-zA-Z0-9_/]+",
"description": "the name of the private key in the parameters store",
},
"KeySize": {
"type": "integer",
"default": 2048,
"description": "number of bits in the key",
},
"KeyFormat": {
"type": "string",
"enum": ["PKCS8", "TraditionalOpenSSL"],
"default": "PKCS8",
"description": "encoding type of the private key",
},
"Description": {
"type": "string",
"default": "",
"description": "the description of the key in the parameter store",
},
"KeyAlias": {
"type": "string",
"default": "alias/aws/ssm",
"description": "KMS key to use to encrypt the key",
},
"RefreshOnUpdate": {
"type": "boolean",
"default": False,
"description": "generate a new secret on update",
},
"Version": {"type": "string", "description": "opaque string to force update"},
},
}
class RSAKeyProvider(ResourceProvider):
def __init__(self):
super(RSAKeyProvider, self).__init__()
self.request_schema = request_schema
self.ssm = boto3.client("ssm")
self.iam = boto3.client("iam")
self.region = boto3.session.Session().region_name
self.account_id = (boto3.client("sts")).get_caller_identity()["Account"]
def convert_property_types(self):
self.heuristic_convert_property_types(self.properties)
@property
def allow_overwrite(self):
return ssm_parameter_name.equals(self.physical_resource_id, self.arn)
@property
def arn(self):
return ssm_parameter_name.to_arn(self.region, self.account_id, self.get("Name"))
def name_from_physical_resource_id(self):
return ssm_parameter_name.from_arn(self.physical_resource_id)
@property
def key_format(self):
if self.get("KeyFormat", "") == "TraditionalOpenSSL":
return crypto_serialization.PrivateFormat.TraditionalOpenSSL
else:
return crypto_serialization.PrivateFormat.PKCS8
def get_key(self):
response = self.ssm.get_parameter(
Name=self.name_from_physical_resource_id(), WithDecryption=True
)
private_key = response["Parameter"]["Value"].encode("ascii")
key = crypto_serialization.load_pem_private_key(
private_key, password=None, backend=crypto_default_backend()
)
private_key = key.private_bytes(
crypto_serialization.Encoding.PEM,
self.key_format,
crypto_serialization.NoEncryption(),
)
public_key = key.public_key().public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH,
)
return private_key.decode("ascii"), public_key.decode("ascii")
def create_key(self):
key = rsa.generate_private_key(
backend=crypto_default_backend(),
public_exponent=65537,
key_size=self.get("KeySize"),
)
private_key = key.private_bytes(
crypto_serialization.Encoding.PEM,
self.key_format,
crypto_serialization.NoEncryption(),
)
public_key = key.public_key().public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH,
)
return private_key.decode("ascii"), public_key.decode("ascii")
def public_key_to_pem(self, private_key):
key = crypto_serialization.load_pem_private_key(
private_key.encode("ascii"), password=None, backend=crypto_default_backend()
)
public_key = key.public_key().public_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PublicFormat.SubjectPublicKeyInfo,
)
return public_key.decode("ascii")
def create_or_update_secret(self, overwrite=False, new_secret=True):
try:
if new_secret:
private_key, public_key = self.create_key()
else:
private_key, public_key = self.get_key()
kwargs = {
"Name": self.get("Name"),
"KeyId": self.get("KeyAlias"),
"Type": "SecureString",
"Overwrite": overwrite,
"Value": private_key,
}
if self.get("Description") != "":
kwargs["Description"] = self.get("Description")
response = self.ssm.put_parameter(**kwargs)
version = response["Version"] if "Version" in response else 1
self.set_attribute("Arn", self.arn)
self.set_attribute("PublicKey", public_key)
self.set_attribute("PublicKeyPEM", self.public_key_to_pem(private_key))
self.set_attribute(
"Hash", hashlib.md5(public_key.encode("utf-8")).hexdigest()
)
self.set_attribute("Version", version)
if not ssm_parameter_name.equals(self.physical_resource_id, self.arn):
# prevent CFN deleting a resource with identical Arns in different formats.
self.physical_resource_id = self.arn
self.set_attribute("ParameterName", self.name_from_physical_resource_id())
except ClientError as e:
self.physical_resource_id = "could-not-create"
self.fail(str(e))
def create(self):
self.create_or_update_secret(overwrite=False, new_secret=True)
def update(self):
self.create_or_update_secret(
overwrite=self.allow_overwrite, new_secret=self.get("RefreshOnUpdate")
)
def delete(self):
name = self.physical_resource_id.split("/", 1)
if len(name) == 2:
try:
self.ssm.delete_parameter(Name=name[1])
except ClientError as e:
if e.response["Error"]["Code"] != "ParameterNotFound":
return self.fail(str(e))
self.success("System Parameter with the name %s is deleted" % name)
else:
self.success(
"System Parameter with the name %s is ignored"
% self.physical_resource_id
)
provider = RSAKeyProvider()
def handler(request, context):
return provider.handle(request, context)
|
from helpers import *
def main():
print("Running benchmarks...")
# Merge sort 10000000 elements, 100 - random seed
do_test("Merge sort", 'merge_sort', [10000000, 100])
# Insertion sort 100000 elements, 100 - random seed
do_test("Insertion sort", 'insertion_sort', [100000, 100])
# prime sum 20000 elements
do_test("Prime sum", 'prime_sum', [10000])
# Tag 300 elements
do_test("Tag", 'tag', [300])
# String permutations on ABCDEFGHIJ
do_test("String permutations", 'perm', ["ABCDEFGHIJ"])
# Prime count on 100000000
do_test("Prime count", 'prime_count', [100000000])
# Hash table on 100000
do_test("Hash table", 'hash_table', [1000000, 13])
if __name__ == "__main__":
main() |
#!/usr/bin/python
import time
import serial
import rospy
from sensor_msgs.msg import NavSatFix
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Vector3
from geometry_msgs.msg import Twist
from usma_novatel_parser import *
from os.path import expanduser
# Start the ROS node and create the ROS publisher
gpsPub = rospy.Publisher('gps/fix', NavSatFix, queue_size=1)
imuPub = rospy.Publisher('imu_data', Imu, queue_size=1)
#novaPub = rospy.Publisher(???,???, queue_size=1)
rospy.init_node('novatel_CNS5000', anonymous=True)
rate = rospy.Rate(5) # 10hz
with open("/home/user1/box_inspvaa_raw.csv") as insData:
try:
while not rospy.is_shutdown():
kvh5000_output = insData.readline() # Read data a line of data from buffer
#outFile.write(kvh5000_output) # Option to log data to file
print(kvh5000_output)
#TODO print once when gets into different mode like initializing, finesteering, etc
if (kvh5000_output.split(",")[0] == "#BESTGPSPOSA"): # check if this the gps message
#print "Inside best gps"
nova_Data = kvh5000_output.split(';')[1] # split the header and message body
nova_Data = nova_Data.split(',') # split the message body into fields
gps_out = NavSatFix()
gps_out = parse_novatelGPS(nova_Data) # returns a NavSatFix message
gpsPub.publish(gps_out) # publish the ROS message
elif (kvh5000_output.split(",")[0] == "%RAWIMUSA"): # check if this the IMU message
nova_Data = kvh5000_output.split(';')[1] # split the header and message body
nova_Data = nova_Data.split(',') # split the message body into fields
imu_out = parse_novatelIMU(nova_Data)
imuPub.publish(imu_out)
elif (kvh5000_output.split(",")[0] == "[COM1]#INSPVAA"): # check if this the INSPVA message
nova_Data = kvh5000_output.split(';')[1] # split the header and message body
nova_Data = nova_Data.split(',') # split the message body into fields
inspva_out = parse_novatelINSPVA(nova_Data)
gpsPub.publish(inspva_out[1])
imuPub.publish(inspva_out[0])
rate.sleep()
except KeyboardInterrupt:
ser.write('unlogall\r\n') # Send a message to CNS-5000 to stop sending logs
#outFile.close()
ser.close()
raise
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
''' NOT USABLE CODE STATE CALCULATED ON POSTGRESQL SIDE
create_hw_module_command_state_element_query = """
INSERT INTO public.hw_module_command_state AS hmcs
(hw_module_id, sound_buzzer_state, sound_buzzer_lock, stop_engine_state, stop_engine_lock,
disable_engine_start_state, disable_engine_start_lock, active, deleted, created_on, updated_on)
VALUES
($1, $2, $3, $4, $5, $6, $7, FALSE, now(), now()) RETURNING *;
"""
''' |
import abc
from grant.settings import (
SITE_URL,
GITHUB_CLIENT_ID,
GITHUB_CLIENT_SECRET,
TWITTER_CLIENT_ID,
TWITTER_CLIENT_SECRET,
LINKEDIN_CLIENT_ID,
LINKEDIN_CLIENT_SECRET
)
from requests_oauthlib import OAuth1Session, OAuth2Session
class VerifySocialException(Exception):
pass
class SocialItem(abc.ABC):
@abc.abstractmethod
def url_pattern(self, username):
pass
@abc.abstractmethod
def get_login_url(self):
pass
@abc.abstractmethod
def verify_and_get_user(self, code):
pass
class Github(SocialItem):
def url_pattern(self, username):
url = 'https://github.com/{}'
return url if not username else url.format(username)
def get_login_url(self):
url = 'https://github.com/login/oauth/authorize?scope=read:user&client_id={}'
return url.format(GITHUB_CLIENT_ID)
def verify_and_get_user(self, code):
github = OAuth2Session(GITHUB_CLIENT_ID)
token_url = 'https://github.com/login/oauth/access_token'
user_url = 'https://api.github.com/user'
github.fetch_token(token_url, client_secret=GITHUB_CLIENT_SECRET, code=code)
user = github.get(user_url).json()
return user['login']
class Twitter(SocialItem):
def url_pattern(self, username):
url = 'https://twitter.com/{}'
return url if not username else url.format(username)
def get_login_url(self):
twitter = OAuth1Session(client_key=TWITTER_CLIENT_ID, client_secret=TWITTER_CLIENT_SECRET)
request_token_url = 'https://api.twitter.com/oauth/request_token'
authorization_url = 'https://api.twitter.com/oauth/authorize'
data = twitter.fetch_request_token(request_token_url)
url = twitter.authorization_url(authorization_url)
return url
def verify_and_get_user(self, code):
oauth_token, oauth_verifier = code.split(':')
twitter = OAuth1Session(
client_key=TWITTER_CLIENT_ID,
client_secret=TWITTER_CLIENT_SECRET,
resource_owner_key=oauth_token)
url = 'https://api.twitter.com/oauth/access_token'
data = twitter.fetch_access_token(url, verifier=oauth_verifier)
return data['screen_name']
class Linkedin(SocialItem):
def url_pattern(self, username=None):
url = 'http://www.linkedin.com/in/{}'
return url if not username else url.format(username)
def get_login_url(self):
authorization_url = 'https://www.linkedin.com/uas/oauth2/authorization'
redirect_uri = '{}/callback/linkedin'.format(SITE_URL)
linkedin = OAuth2Session(LINKEDIN_CLIENT_ID, redirect_uri=redirect_uri)
url = linkedin.authorization_url(authorization_url)
return url
def verify_and_get_user(self, code):
redirect_uri = '{}/callback/linkedin'.format(SITE_URL)
linkedin = OAuth2Session(LINKEDIN_CLIENT_ID, redirect_uri=redirect_uri)
token_url = 'https://www.linkedin.com/uas/oauth2/accessToken'
user_url = 'https://api.linkedin.com/v1/people/~:(public-profile-url)?format=json'
linkedin.fetch_token(token_url, client_secret=LINKEDIN_CLIENT_SECRET, code=code)
user = linkedin.get(user_url).json()
profile_url = user['publicProfileUrl']
profile_base_url = self.url_pattern().format('')
username = profile_url.replace(profile_base_url, '')
return username
social_items = {
'GITHUB': Github(),
'TWITTER': Twitter(),
'LINKEDIN': Linkedin(),
}
def get_social(service):
if service in social_items:
return social_items[service]
raise VerifySocialException('Social service "{}" is not supported.'.format(service))
def generate_social_url(service, username):
return get_social(service).url_pattern(username)
def verify_social(service, code):
return get_social(service).verify_and_get_user(code)
def get_social_login_url(service):
return get_social(service).get_login_url()
|
from contextlib import nullcontext
import matplotlib
import numpy as np
import pandas as py
import seaborn as sns
from pandas import read_csv
import matplotlib.pyplot as pyplot
from sklearn.metrics import r2_score
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix
from sklearn.metrics import mean_squared_error
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
dataset = read_csv("D:\\dersler\\oruntu\\oruntufinal\\18110131042\\kod\\data.csv")
data = dataset.drop(columns = ['SehirAdi'])
array = data.values
X1 = array[:,0:1] # Ağaç Yoğunluğu
X2 = array[:,1:2] # Nüfus Yoğunluğu
X3 = array[:,2:3] # Hava Kirliliği
Y = array[:,3:4] # Sonuc
X1_train, X1_validation, Y1_train, Y1_validation = train_test_split(X1, Y, test_size=0.30) # trainkodları
X2_train, X2_validation, Y2_train, Y2_validation = train_test_split(X2, Y, test_size=0.30) # trainkodları
X3_train, X3_validation, Y3_train, Y3_validation = train_test_split(X3, Y, test_size=0.30) # trainkodları
#print(data)
print("###########################")
print("veri seti incelemesi")
print(data.info()) # Veri Setini İnceler
print("###########################")
print("veri seti istatistikler")
print(data.describe()) # Verinin İstatistiklerini Verir
print("###########################")
print("veri seti isNull?")
print(data.isnull().sum()) # Sütunlardaki Boş Değerlerin Sayısını Verir
print("###########################")
print("veri seti kor degerleri")
print(data.corr()) # Korelasyon Değerlerini Verir
print("###########################")
print("veri seti histogram")
print(data.hist()) # Histogram
######################################################################
######################################################################
### SCATTER GRAFİK ###################################################
data.plot(x="AgacYogunlugu", y="Sonuc", kind='scatter', subplots=True, layout=(3,3), sharex=False, sharey=False)
data.plot(x="NufusYogunlugu", y="Sonuc", kind='scatter', subplots=True, layout=(3,3), sharex=False, sharey=False)
data.plot(x="HavaKirliligi", y="Sonuc", kind='scatter', subplots=True, layout=(3,3), sharex=False, sharey=False)
pyplot.show()
###############################################################################################################
### KOMŞULUK ALGORİTMASI ######################################################################################
###############################################################################################################
# Modeli tanımladık
komsuModeli1 = KNeighborsClassifier(n_neighbors = 5, n_jobs = -1)
komsuModeli2 = KNeighborsClassifier(n_neighbors = 5, n_jobs = -1)
komsuModeli3 = KNeighborsClassifier(n_neighbors = 5, n_jobs = -1)
# Modeli eğittik
komsuModeli1.fit(X1_train, Y1_train)
komsuModeli2.fit(X2_train, Y2_train)
komsuModeli3.fit(X3_train, Y3_train)
# Sonucu aldık
Y_predictKomsu1 = komsuModeli1.predict(X1_validation)
Y_predictKomsu2 = komsuModeli2.predict(X2_validation)
Y_predictKomsu3 = komsuModeli3.predict(X3_validation)
# Hata Matrisinin Oluşturulması
komsuModeliMatrix1 = confusion_matrix(Y1_validation, Y_predictKomsu1)
f, ax = pyplot.subplots(figsize = (5, 5))
sns.heatmap(komsuModeliMatrix1, annot = True, linewidth = 0.9, linecolor = 'red', fmt = 'g', ax = ax, cmap = 'rocket_r')
pyplot.title('KNN Algoritması - Hata Matrisi(Ağaç Yoğunluğu)')
pyplot.xlabel('Y Tahmin')
pyplot.ylabel('Y Test')
pyplot.show()
komsuModeliMatrix2 = confusion_matrix(Y2_validation, Y_predictKomsu2)
f, ax = pyplot.subplots(figsize = (5, 5))
sns.heatmap(komsuModeliMatrix2, annot = True, linewidth = 0.9, linecolor = 'red', fmt = 'g', ax = ax, cmap = 'rocket_r')
pyplot.title('KNN Algoritması - Hata Matrisi(Nüfus Yoğunluğu)')
pyplot.xlabel('Y Tahmin')
pyplot.ylabel('Y Test')
pyplot.show()
komsuModeliMatrix3 = confusion_matrix(Y3_validation, Y_predictKomsu3)
f, ax = pyplot.subplots(figsize = (5, 5))
sns.heatmap(komsuModeliMatrix3, annot = True, linewidth = 0.9, linecolor = 'red', fmt = 'g', ax = ax, cmap = 'rocket_r')
pyplot.title('KNN Algoritması - Hata Matrisi(Hava Kirliliği)')
pyplot.xlabel('Y Tahmin')
pyplot.ylabel('Y Test')
pyplot.show()
komsuSkoru1 = komsuModeli1.score(X1_validation, Y1_validation)
komsuSkoru2 = komsuModeli2.score(X2_validation, Y2_validation)
komsuSkoru3 = komsuModeli3.score(X3_validation, Y3_validation)
print("Komşuluk Algoritması Skoru(Ağaç Yoğunluğu): ", komsuSkoru1)
print("Komşuluk Algoritması Skoru(Nüfus Yoğunluğu): ", komsuSkoru2)
print("Komşuluk Algoritması Skoru(Hava Kirliliği): ", komsuSkoru3)
###############################################################################################################
### KARAR AĞACI ###############################################################################################
###############################################################################################################
# Modeli tanımladık
kararAgaci1 = DecisionTreeClassifier(random_state = 9)
kararAgaci2 = DecisionTreeClassifier(random_state = 9)
kararAgaci3 = DecisionTreeClassifier(random_state = 9)
# Modeli eğittik
kararAgaci1.fit(X1_train, Y1_train)
kararAgaci2.fit(X2_train, Y2_train)
kararAgaci3.fit(X3_train, Y3_train)
# Sonucu aldık
Y_predictAgac1 = kararAgaci1.predict(X1_validation)
Y_predictAgac2 = kararAgaci2.predict(X2_validation)
Y_predictAgac3 = kararAgaci3.predict(X3_validation)
# Hata Matrisinin Oluşturulması
agacMatrix1 = confusion_matrix(Y1_validation, Y_predictAgac1)
f, ax = pyplot.subplots(figsize = (5, 5))
sns.heatmap(agacMatrix1, annot = True, linewidth = 0.9, linecolor = 'red', fmt = 'g', ax = ax, cmap = 'rocket_r')
pyplot.title('Karar Ağacı Algoritması - Hata Matrisi (Ağaç Yoğunluğu)')
pyplot.xlabel('Y Tahmin')
pyplot.ylabel('Y Test')
pyplot.show()
agacMatrix2 = confusion_matrix(Y1_validation, Y_predictAgac2)
f, ax = pyplot.subplots(figsize = (5, 5))
sns.heatmap(agacMatrix2, annot = True, linewidth = 0.9, linecolor = 'red', fmt = 'g', ax = ax, cmap = 'rocket_r')
pyplot.title('Karar Ağacı Algoritması - Hata Matrisi (Nüfus Yoğunluğu)')
pyplot.xlabel('Y Tahmin')
pyplot.ylabel('Y Test')
pyplot.show()
agacMatrix3 = confusion_matrix(Y1_validation, Y_predictAgac3)
f, ax = pyplot.subplots(figsize = (5, 5))
sns.heatmap(agacMatrix3, annot = True, linewidth = 0.9, linecolor = 'red', fmt = 'g', ax = ax, cmap = 'rocket_r')
pyplot.title('Karar Ağacı Algoritması - Hata Matrisi (Hava Kirliliği)')
pyplot.xlabel('Y Tahmin')
pyplot.ylabel('Y Test')
pyplot.show()
kararAgaciSkor1 = kararAgaci1.score(X1_validation, Y1_validation)
kararAgaciSkor2 = kararAgaci2.score(X2_validation, Y2_validation)
kararAgaciSkor3 = kararAgaci3.score(X3_validation, Y3_validation)
print("Karar Ağacı Skoru(Ağaç Yoğunluğu): ", kararAgaciSkor1)
print("Karar Ağacı Skoru(Nüfus Yoğunluğu): ", kararAgaciSkor2)
print("Karar Ağacı Skoru(Hava Kirliliği): ", kararAgaciSkor3)
###############################################################################################################
### RASGELE ORMAN ALGORİTMASI #################################################################################
###############################################################################################################
# Modeli tanımladık
rOrman1 = RandomForestClassifier(n_estimators = 100, random_state = 9, n_jobs = -1)
rOrman2 = RandomForestClassifier(n_estimators = 100, random_state = 9, n_jobs = -1)
rOrman3 = RandomForestClassifier(n_estimators = 100, random_state = 9, n_jobs = -1)
# Modeli eğittik
rOrman1.fit(X1_train, Y1_train)
rOrman2.fit(X2_train, Y2_train)
rOrman3.fit(X3_train, Y3_train)
# Sonucu aldık
Y_predictOrman1 = rOrman1.predict(X1_validation)
Y_predictOrman2 = rOrman2.predict(X2_validation)
Y_predictOrman3 = rOrman3.predict(X3_validation)
# Hata Matrisinin Oluşturulması
ormanMatrix1 = confusion_matrix(Y1_validation, Y_predictOrman1)
f, ax = pyplot.subplots(figsize = (5, 5))
sns.heatmap(ormanMatrix1, annot = True, linewidth = 0.9, linecolor = 'red', fmt = 'g', ax = ax, cmap = 'rocket_r')
pyplot.title('Rasgele Orman Algoritması - Hata Matrisi (Ağaç Yoğunluğu)')
pyplot.xlabel('Y Tahmin')
pyplot.ylabel('Y Test')
pyplot.show()
ormanMatrix2 = confusion_matrix(Y2_validation, Y_predictOrman2)
f, ax = pyplot.subplots(figsize = (5, 5))
sns.heatmap(ormanMatrix2, annot = True, linewidth = 0.9, linecolor = 'red', fmt = 'g', ax = ax, cmap = 'rocket_r')
pyplot.title('Rasgele Orman Algoritması - Hata Matrisi (Nüfus Yoğunluğu)')
pyplot.xlabel('Y Tahmin')
pyplot.ylabel('Y Test')
pyplot.show()
ormanMatrix3 = confusion_matrix(Y3_validation, Y_predictOrman3)
f, ax = pyplot.subplots(figsize = (5, 5))
sns.heatmap(ormanMatrix3, annot = True, linewidth = 0.9, linecolor = 'red', fmt = 'g', ax = ax, cmap = 'rocket_r')
pyplot.title('Rasgele Orman Algoritması - Hata Matrisi (Hava Kirliliği)')
pyplot.xlabel('Y Tahmin')
pyplot.ylabel('Y Test')
pyplot.show()
ormanSkor1 = rOrman1.score(X1_validation, Y1_validation)
ormanSkor2 = rOrman1.score(X2_validation, Y2_validation)
ormanSkor3 = rOrman1.score(X3_validation, Y3_validation)
print("Rasgele Orman Algoritması Skoru(Ağaç Yoğunluğu): ", ormanSkor1)
print("Rasgele Orman Algoritması Skoru(Nüfus Yoğunluğu): ", ormanSkor2)
print("Rasgele Orman Algoritması Skoru(Hava Kirliliği): ", ormanSkor3)
###############################################################################################################
### N. BAYES ALGORİTMASI ######################################################################################
###############################################################################################################
# Modeli tanımladık
nBayes1 = GaussianNB()
nBayes2 = GaussianNB()
nBayes3 = GaussianNB()
# Modeli eğittik
nBayes1.fit(X1_train, Y1_train)
nBayes2.fit(X2_train, Y1_train)
nBayes3.fit(X3_train, Y1_train)
# Sonucu aldık
Y_predictBayes1 = nBayes1.predict(X1_validation)
Y_predictBayes2 = nBayes2.predict(X2_validation)
Y_predictBayes3 = nBayes3.predict(X3_validation)
# Hata Matrisinin Oluşturulması
bayesMatrix1 = confusion_matrix(Y1_validation, Y_predictBayes1)
f, ax = pyplot.subplots(figsize=(5,5))
sns.heatmap(bayesMatrix1, annot = True, linewidth = 0.9, linecolor = 'red', fmt = 'g', ax = ax, cmap = 'rocket_r')
pyplot.title('Naive Bayes Algoritması - Hata Matrisi (Ağaç Yoğunluğu)')
pyplot.xlabel('Y Tahmin')
pyplot.ylabel('Y Test')
pyplot.show()
bayesMatrix2 = confusion_matrix(Y2_validation, Y_predictBayes2)
f, ax = pyplot.subplots(figsize=(5,5))
sns.heatmap(bayesMatrix2, annot = True, linewidth = 0.9, linecolor = 'red', fmt = 'g', ax = ax, cmap = 'rocket_r')
pyplot.title('Naive Bayes Algoritması - Hata Matrisi (Nüfus Yoğunluğu)')
pyplot.xlabel('Y Tahmin')
pyplot.ylabel('Y Test')
pyplot.show()
bayesMatrix3 = confusion_matrix(Y3_validation, Y_predictBayes3)
f, ax = pyplot.subplots(figsize=(5,5))
sns.heatmap(bayesMatrix3, annot = True, linewidth = 0.9, linecolor = 'red', fmt = 'g', ax = ax, cmap = 'rocket_r')
pyplot.title('Naive Bayes Algoritması - Hata Matrisi (Hava Kirliliği)')
pyplot.xlabel('Y Tahmin')
pyplot.ylabel('Y Test')
pyplot.show()
bayesSkor1 = nBayes1.score(X1_validation, Y1_validation)
bayesSkor2 = nBayes2.score(X2_validation, Y2_validation)
bayesSkor3 = nBayes3.score(X3_validation, Y3_validation)
print("N. Bayes Algoritması Skoru(Ağaç Yoğunluğu): ", bayesSkor1)
print("N. Bayes Algoritması Skoru(Nüfus Yoğunluğu): ", bayesSkor2)
print("N. Bayes Algoritması Skoru(Hava Kirliliği): ", bayesSkor3)
###############################################################################################################
### SKOR TABLOSU ##############################################################################################
###############################################################################################################
list_models1 = ["Komşuluk Algoritması", "Karar Ağacı", "Rasgele Orman Algoritması", "N. Bayes Algoritması"]
list_models2 = ["Komşuluk Algoritması", "Karar Ağacı", "Rasgele Orman Algoritması", "N. Bayes Algoritması"]
list_models3 = ["Komşuluk Algoritması", "Karar Ağacı", "Rasgele Orman Algoritması", "N. Bayes Algoritması"]
list_scores1 = [komsuSkoru1, kararAgaciSkor1, ormanSkor1, bayesSkor1]
list_scores2 = [komsuSkoru2, kararAgaciSkor2, ormanSkor2, bayesSkor2]
list_scores3 = [komsuSkoru3, kararAgaciSkor3, ormanSkor3, bayesSkor3]
pyplot.figure(figsize = (12, 4))
pyplot.bar(list_models1, list_scores1, width = 0.2, color = ['red', 'blue', 'brown', 'purple', 'orange'])
pyplot.title('Algoritma - Skor Oranı (Ağaç Yoğunluğu)')
pyplot.xlabel('Algoritmalar')
pyplot.ylabel('Skorlar')
pyplot.show()
pyplot.figure(figsize = (12, 4))
pyplot.bar(list_models2, list_scores2, width = 0.2, color = ['red', 'blue', 'brown', 'purple', 'orange'])
pyplot.title('Algoritma - Skor Oranı (Nüfus Yoğunluğu)')
pyplot.xlabel('Algoritmalar')
pyplot.ylabel('Skorlar')
pyplot.show()
pyplot.figure(figsize = (12, 4))
pyplot.bar(list_models3, list_scores3, width = 0.2, color = ['red', 'blue', 'brown', 'purple', 'orange'])
pyplot.title('Algoritma - Skor Oranı (Hava Kirliliği)')
pyplot.xlabel('Algoritmalar')
pyplot.ylabel('Skorlar')
pyplot.show()
|
'''
copyright: Shilong Bao
email: baoshilong@iie.ac.cn
'''
import getpass
import logging
import sys
import torch
import numpy as np
# import torch
import random
import os
class MyLog(object):
def __init__(self, init_file=None,name = None):
user = getpass.getuser()
self.logger = logging.getLogger(name)
self.logger.setLevel(logging.DEBUG)
if init_file == None:
assert False
# logFile = sys.argv[0][0:-3] + '.log'
else:
logFile = init_file
# formatter = logging.Formatter('%(asctime)-12s %(levelname)-8s %(name)-10s %(message)-12s')
formatter = logging.Formatter('')
logHand = logging.FileHandler(logFile, encoding="utf8")
logHand.setFormatter(formatter)
logHand.setLevel(logging.INFO)
logHandSt = logging.StreamHandler()
logHandSt.setFormatter(formatter)
self.logger.addHandler(logHand)
self.logger.addHandler(logHandSt)
def debug(self, msg):
self.logger.debug(msg)
def info(self, msg):
self.logger.info(msg)
def warn(self, msg):
self.logger.warning(msg)
def error(self, msg):
self.logger.error(msg)
def critical(self, msg):
self.logger.critical(msg)
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.enabled = True
|
# Reader of fcs files
# (cc) 2017 Ali Rassolie
# Formagna
from pandas import DataFrame as df
import fcsparser
import numpy as np
__version__ = "0.1"
class fcsReader:
def __init__(self, path, **kwargs):
self.path = path
# Returns the data contained in the fcs file
self.meta, self.data = fcsparser.parse(path,
meta_data_only = False,
reformat_meta=True)
def rdata(self, path=None, **kwargs):
# Returns the data retrieved from the .fcs file, which
# in turn is related to different channels. Note that
# the it is returned in a pandas.dataframe type.
if self.path: path = self.path
elif path: self.path = path
else: raise "Path to .fcs file has not been provided"
self.meta, self.data = fcsparser.parse(path,
meta_data_only = False,
reformat_meta=True)
return self.data
def rmeta(self, path=None, **kwargs):
# This method returns the metadata of the fcs file
# such as the various channels which are necessary
# for the later analysis. Refer to the README for further
# detail regarding the different channels involved.
if self.path: path = self.path
elif path: self.path = path
else: raise "Path to .fcs file has not been provided"
self.meta, self.data = fcsparser.parse(path,
meta_data_only = False,
reformat_meta=True)
return self.meta
def save_data(self):
pass
|
#!/usr/bin/env python3
from argparse import ArgumentParser
from datetime import datetime as dt
from time import sleep
from ping3 import ping
import asyncio
COLOR_RESET = "\033[0m"
COLOR_WHITE1 = "\033[38;5;255m"
COLOR_WHITE2 = "\033[38;5;251m"
COLOR_RED = "\033[38;5;009m"
COLOR_GREEN = "\033[38;5;002m"
def verbose_ping(host, args):
c = 0
while True:
time = ping(host, size=args.size, timeout=args.timeout, ttl=args.ttl, unit="ms")
ts = dt.now()
c += 1
COLOR_WHITE = COLOR_WHITE2 if c % 2 == 0 and args.alter_fg else COLOR_WHITE1
if time is not None:
time = round(time, 2)
if time < 0:
time = 1
print(f"{COLOR_WHITE}{ts} {COLOR_GREEN}OK{COLOR_RESET}{COLOR_WHITE} {host} seq={c} size={args.size} time={time:.2f} ms{COLOR_RESET}")
sleep(args.interval-time/1000)
else:
print(f"{COLOR_WHITE}{ts} {COLOR_RED}NG{COLOR_RESET}{COLOR_WHITE} {host} seq={c} size={args.size} timeout{COLOR_RESET}")
def main():
parser = ArgumentParser()
parser.add_argument("-l", dest="size", type=int, default=56, help="payload size (bytes)")
parser.add_argument("-w", dest="timeout", type=float, default=1, help="request timeout (sec)")
parser.add_argument("-i", dest="interval", type=float, default=1, help="request interval (sec)")
parser.add_argument("-t", dest="ttl", type=int, default=56, help="TTL")
parser.add_argument("-a", dest="alter_fg", action="store_true", default=False, help="color alternate line")
parser.add_argument("host", help="destination addresses")
args = parser.parse_args()
verbose_ping(args.host, args)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from internal_backend.utilities.register import PantsReleases
from pants.base.revision import Revision
def _branch_name(revision_str):
return PantsReleases._branch_name(Revision.semver(revision_str))
class ReleasesTest(unittest.TestCase):
def test_branch_name_master(self):
self.assertEquals('master', _branch_name('1.1.0-pre1'))
def test_branch_name_stable(self):
self.assertEquals('1.1.x', _branch_name('1.1.0-rc1'))
self.assertEquals('2.1.x', _branch_name('2.1.0'))
def test_branch_name_unknown_suffix(self):
with self.assertRaises(ValueError):
_branch_name('1.1.0-anything1')
|
#import pynput
import time
from graphics import *
import moons
from random import randint, seed
import physics as phy
import ships
from math import *
import towers
import readchar
LOOT_MODIFIER = 3
TIME_MODIFIER = 3
seed()
class Game_Engine():
def __init__(self):
self.paused = False
self.timeThisFrame = 0
self.hasStarted = False
self.planet_health = 127
self.wave_num = 1
self.metal = 200
self.active = []
self.shots = []
self.win = GraphWin('Test Wandow', 1000, 800, autoflush = False)
self.backround = Circle(Point(500, 400), 700)
self.backround.setFill("Black")
self.backround.draw(self.win)
line = Line(Point(800, 0), Point(800, 799))
line.setFill("Cyan")
line.draw(self.win)
title = Text(Point(900, 40), 'Gravitational Defense')
title.setSize(20)
title.setTextColor('Cyan')
title.draw(self.win)
stats = Text(Point(840, 90), 'Wave:\n Ships Left:\nMetal:')
stats.setSize(20)
stats.setTextColor('Cyan')
stats.draw(self.win)
self.exit = Text(Point(900, 740), 'Press N to start wave')
self.exit.setSize(20)
self.exit.setTextColor('Cyan')
self.exit.draw(self.win)
self.defBuildText = 'Build Menu:\nMoon A\nMoon B\nMoon C\nMoon D\nMoon E\nMoon F\nMoon G\nMoon H\nMoon I\nMoon J\nMoon K\nMoon L'
self.buildMenu = Text(Point(900, 400), 'Build Menu:\nMoon A\nMoon B\nMoon C\nMoon D\nMoon E\nMoon F\nMoon G\nMoon H\nMoon I\nMoon J\nMoon K\nMoon L')
self.buildMenu.setSize(20)
self.buildMenu.setTextColor('Cyan')
self.buildMenu.draw(self.win)
self.metalText = Text(Point(894, 113), str(self.metal))
self.metalText.setSize(20)
self.metalText.setTextColor('Cyan')
self.metalText.draw(self.win)
self.planet = Circle(Point(400,400), 25)
self.planet.setFill('Green')
self.planet.draw(self.win)
self.moonlist = []
for i in range(12):
size = randint(0,3)
if size == 0:
size = 2
self.moonlist.append(moons.Moon(75 + i * 25, size))
self.moonlist[i].image.draw(self.win)
update()
def run(self):
while True:
self.mode_build()
self.mode_onslaught()
self.wave_num += 1
def shoot(self, moon, ship):
ship.damage(moon.tower.damage, moon.tower.category)
moon.tower.last_shot = time.time()
newline = Line(Point(moon.position[0] + 400,moon.position[1] + 400), Point(ship.position[0] + 400, ship.position[1] + 400))
newline.setFill(moon.tower.color)
newline.draw(self.win)
self.shots.append((newline, time.time()))
if ship.health < 0:
self.destroy(ship, True)
def destroy(self, ship, reward = False):
if reward:
self.metal += int(ship.power * LOOT_MODIFIER)
self.metalText.setText(str(self.metal))
ship.image.undraw()
self.active.remove(ship)
def moon_tick(self):
for m in self.moonlist:
phy.move_moon(m, self.timeThisFrame)
m.image.move(m.last_move[0], m.last_move[1])
if m.tower is not None and m.tower.last_shot + m.tower.cooldown / TIME_MODIFIER < time.time():
for s in self.active:
if phy.distance(s, m) < m.tower.range:
self.shoot(m, s)
break
def ship_tick(self):
for s in self.active:
phy.move_ship(s, self.moonlist, self.timeThisFrame)
s.image.move(s.last_move[0], s.last_move[1])
dist = phy.norm_sqr(s.position)
if dist > 320000:
self.destroy(s)
elif dist <= 625:
self.planet_health -= s.power
self.planet.setFill(color_rgb(127 - max(self.planet_health, 0), max(self.planet_health, 0), 0))
self.destroy(s)
elif phy.collided(s, self.moonlist):
self.destroy(s, True)
else:
s.shieldgen(self.timeThisFrame)
def mode_onslaught(self):
self.active = []
upcoming = []
remaining_points = floor(4 + .4 * pow(self.wave_num, 2.4))
while remaining_points > 0:
print('Wave points: %d' % remaining_points)
min_possible = min(ceil(log(remaining_points / 200, 2)), 4)
if remaining_points >= 32:
max_possible = 4
else:
max_possible = min(floor(log(remaining_points, 2)), 3)
num = randint(min_possible, 4) % (max_possible + 1)
if num == 0:
upcoming.append(ships.Corvette())
remaining_points -= 1
elif num == 1:
upcoming.append(ships.Destroyer())
remaining_points -= 2
elif num == 2:
upcoming.append(ships.Cruiser())
remaining_points -= 4
elif num == 3:
upcoming.append(ships.Battleship())
remaining_points -= 8
else:
upcoming.append(ships.Dreadnought())
remaining_points -= 32
print(upcoming[-1].name)
self.active.append(upcoming.pop())
self.active[0].image.draw(self.win)
last_spawn = time.time()
while len(self.active) != 0 or len(upcoming) != 0:
startTime = time.time()
try:
fps = min(int(TIME_MODIFIER/self.timeThisFrame), 999)
except:
pass
else:
print('FPS: %d' % fps)
if len(upcoming) != 0 and startTime > last_spawn + 5 / TIME_MODIFIER:
arriving = upcoming.pop()
self.active.append(arriving)
arriving.image.draw(self.win)
last_spawn = startTime
for s in self.shots:
if s[1] + .1 < time.time():
s[0].undraw()
self.shots.remove(s)
self.moon_tick()
self.ship_tick()
update()
self.timeThisFrame = (time.time() - startTime) * TIME_MODIFIER
def mode_build(self):
print('Metal: %d' % self.metal)
while True:
print(self.metal)
key = ord(readchar.readchar())
if key == 110:
return
elif key < 97 or key > 108:
continue
temp_moon = self.moonlist[key - 97]
if temp_moon.tower == None:
self.buildMenu.setSize(15)
self.buildMenu.setText('Moon ' + chr(key) + '\n\n(1)Mass Driver: 90\n\n(2)Laser Turret: 100\n\n(3)Plasma Launcher: 110\n\n(4)Tesla Arc: 120\n\n(5)Kinetic Artillery: 140\n\n(6)Fusion Cannon: 160')
self.exit.setText('Press B to Exit')
update()
key2 = ord(readchar.readchar())
temp_tower = None
if key2 == 49:
temp_tower = towers.Mass_Driver()
elif key2 == 50:
temp_tower = towers.Laser_Turret()
elif key2 == 51:
temp_tower = towers.Plasma_Launcher()
elif key2 == 52:
temp_tower = towers.Tesla_Arc()
elif key2 == 53:
temp_tower = towers.Kinetic_Artillery()
elif key2 == 54:
temp_tower = towers.Fusion_Cannon()
if temp_tower != None and self.metal >= temp_tower.cost:
self.metal -= temp_tower.cost
self.buildMenu.setSize(20)
self.buildMenu.setText('Moon %c\n\n%s\n\n Level 1' % (key, temp_tower.name))
temp_moon.tower = temp_tower
#assign tower or return back to moon selection
else:
self.buildMenu.setSize(20)
self.buildMenu.setText('Moon %c\n\n%s\n\n Level %d\n\n Press 8 to upgrade \n\n Press 9 to destroy' % (key, temp_moon.tower.name, temp_moon.tower.level))
update()
key2 = ord(readchar.readchar())
cost = temp_moon.tower.upgrade_cost
if key2 == 56 and self.metal >= cost and temp_moon.tower.upgrade():
self.metal -= cost
elif key2 == 57:
self.metal += cost
temp_moon.tower = None
self.buildMenu.setSize(20)
self.buildMenu.setText(self.defBuildText)
self.exit.setText('Press N to start wave')
update()
if __name__ == '__main__':
ge = Game_Engine()
ge.run()
|
TARGET_COL = 'class'
ID_COL = 'id'
N_FOLD = 5
N_CLASS = 3
SEED = 42
|
# -*- coding: utf-8 -*-
import uuid
import inject
import hashlib
from model.users.users import User
import model.users.users
from model.mail.mail import Mail
from model.registry import Registry
class Ingreso:
registry = inject.instance(Registry)
reg = registry.getRegistry('ingreso')
mail = inject.attr(Mail)
@classmethod
def sendErrorEmail(cls, error, names, dni, email, tel):
From = cls.reg.get('error_mail_from')
subject = "{} - {} - {} - {} - {}".format(error, dni, names, email, tel)
To = cls.reg.get('error_mail_to')
mail = cls.mail.createMail(From, To, subject)
text = cls.mail.getTextPart("error: {}\ndni: {}\nNombres: {}\nEmail: {}\nTel: {}".format(error, dni, names, email, tel))
mail.attach(text)
cls.mail._sendMail(From, [To], mail)
return True
@classmethod
def sendFinalEmail(cls, user, password, email):
From = cls.reg.get('final_mail_from')
subject = cls.reg.get('final_mail_subject')
template = cls.reg.get('final_mail_template')
To = email
replace = [
('###NAME###', user.name),
('###LASTNAME###', user.lastname),
('###PASSWORD###', password),
('###USERNAME###', user.dni),
('###EMAIL###', email)
]
cls.mail.sendMail(From, [To, "red@econo.unlp.edu.ar"], subject, replace, html=template)
return True
@classmethod
def sendEmailConfirmation(cls, con, name, lastname, eid):
emails = model.users.users.Mail.findById(con, eid)
if emails is None or len(emails) <= 0:
raise Exception()
email = emails[0]
hash = hashlib.sha1(str(uuid.uuid4()).encode('utf-8')).hexdigest()
code = hash[:5]
email.hash = code
email.persist(con)
From = cls.reg.get('confirm_mail_from')
subject = cls.reg.get('confirm_mail_subject')
template = cls.reg.get('confirm_mail_template')
To = email.email
replace = [
('###CODE###', code),
('###NAME###', name),
('###LASTNAME###', lastname)
]
cls.mail.sendMail(From, [To], subject, replace, html=template)
return True
@classmethod
def checkEmailCode(cls, con, eid, code):
emails = model.users.users.Mail.findById(con, eid)
if emails is None or len(emails) <= 0:
raise Exception()
return (emails[0].hash == code)
@classmethod
def confirmEmail(cls, con, eid, code):
emails = model.users.users.Mail.findById(con, eid)
if emails is None or len(emails) <= 0:
raise Exception()
email = emails[0]
if email.hash == code:
email.confirmed = True
email.persist(con)
return True
return False
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from pants.engine.target import (
COMMON_TARGET_FIELDS,
FieldSet,
MultipleSourcesField,
Target,
generate_multiple_sources_field_help_message,
)
from pants.util.strutil import help_text
class CuePackageSourcesField(MultipleSourcesField):
default = ("*.cue",)
help = generate_multiple_sources_field_help_message(
"Example: `sources=['schema.cue', 'lib/**/*.cue']`"
)
class CuePackageTarget(Target):
alias = "cue_package"
core_fields = (
*COMMON_TARGET_FIELDS,
CuePackageSourcesField,
)
help = help_text(
"""
The `cue_package` target defines a CUE package. Within a module, CUE organizes files grouped
by package. A package can be defined within the module or externally. Definitions and
constraints can be split across files within a package, and even organized across
directories.
CUE docs: https://cuelang.org/docs/concepts/packages/
"""
)
@dataclass(frozen=True)
class CueFieldSet(FieldSet):
required_fields = (CuePackageSourcesField,)
sources: CuePackageSourcesField
|
from django.shortcuts import render , HttpResponse
# Create your views here.
def index(request):
return render(request , "index.html")
def about(request):
return render(request , "about.html")
"""
def detail(request,id):
return HttpResponse("Detail : " + str(id))
"""
|
# Config file for easily changing app behaviour
# data_filepath = 'Data/e0103.csv'
data_filepath = 'Data/paf-prediction-challenge-database-1.0.0/n08'
signal_duration = 20 # Signal duration in seconds
p_window_center = 0.12109375 # Initial P window centre as in seconds back from peak of QRS THIS CAN BREAK THINGS
p_window_duration = 0.15625 # Duration of the P window in seconds THIS CAN BREAK THING
premature_signal_coefficient = 0.99 # Threshold for RR interval i in terms of RR interval (i-1) for premature state=1
upsample_coefficient = 2 # Upsample up this factor for better use interfacing with heartpy |
"""
Inference model with Gaussian process for sampling isoform proportions.
"""
import numpy as np
def erf(x):
"""error function approximation, with maximum error 1.5e-7.
"""
# save the sign of x
if x >= 0: sign = 1
else: sign = -1
x = abs(x)
# constants
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
# A&S formula 7.1.26
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x*x)
return sign*y # erf(-x) = -erf(x)
def gamma_pdf(x, k, theta, log=True):
"""
Calculate the probability density of Gamma distribution.
Parameters
----------
x : float
The variable for calculating the probability density.
k : float
The shape of the Gamma distribution.
theta : float
The scale of the Gamma distribution.
log : bool
If true, the return value is at log scale.
Returns
-------
pdf : numpy float
The probability density of x.
"""
if k == 0:
print("The shape of Gamma distribution cannot be 0, please check!")
return None
pdf = -np.math.lgamma(k) - k*np.log(theta)
pdf += (k-1)*np.log(x) - x/theta
if log == False: pdf = np.exp(pdf)
return pdf
def normal_pdf(x, mu, cov, log=True):
"""
Calculate the probability density of Gaussian (Normal) distribution.
Parameters
----------
x : float, 1-D array_like (K, ), or 2-D array_like (K, N)
The variable for calculating the probability density.
mu : float or 1-D array_like, (K, )
The mean of the Gaussian distribution.
cov : float or 2-D array_like, (K, K)
The variance or the covariance matrix of the Gaussian distribution.
log : bool
If true, the return value is at log scale.
Returns
-------
pdf : numpy float
The probability density of x.
if N==1, return a float
elif N>1, return an array
"""
if len(np.array(mu).shape) == 0:
x = np.array(x).reshape(-1,1)
elif len(np.array(x).shape) <= 1:
x = np.array(x).reshape(1, -1)
x = x - np.array(mu)
N, K = x.shape
if len(np.array(cov).shape) < 2:
cov = np.array(cov).reshape(-1,1)
cov_inv = np.linalg.inv(cov)
cov_det = np.linalg.det(cov)
if cov_det <= 0:
print("Warning: the det of covariance is not positive!")
return None
pdf_all = np.zeros(N)
pdf_part1 = -(K*np.log(2*np.pi) + np.log(cov_det)) / 2.0
for i in range(N):
pdf_all[i] = pdf_part1 - np.dot(np.dot(x[i,:], cov_inv), x[i,:]) / 2.0
if log == False: pdf_all = np.exp(pdf_all)
if N == 1: pdf_all = pdf_all[0]
return pdf_all
def trun_normal_pdf(x, mu, sigma, a, b, log=True):
"""
Calculate the probability density of Truncated Normal distribution.
Parameters
----------
x : float
The variable for calculating the probability density.
mu : float
The mean of the Gaussian distribution.
sigma : float
The standard variance of the Gaussian distribution.
a : float
The lower bounder of the Truncated Normal distribution
b : float
The upper bounder of the Truncated Normal distribution
log : bool
If true, the return value is at log scale.
Returns
-------
pdf : float
The probability density of x.
"""
x = x - mu
a = a - mu
b = b - mu
pdf = np.exp(-0.5 * (x/sigma)**2) / (sigma * np.sqrt(2 * np.pi))
cdf_a = (1 + erf(a / sigma / np.sqrt(2))) / 2.0
cdf_b = (1 + erf(b / sigma / np.sqrt(2))) / 2.0
pdf = pdf / abs(cdf_b - cdf_a)
if log == True: pdf = np.log(pdf)
return pdf
def GP_K(X, theta):
"""
Covariance of Gaussian process generator.
It is based on a common squared-exponential kernel, with two parameters.
Parameters
----------
X : 1-D array_like, (N, )
The x-axis of the Gaussian process, e.g., time points.
theta : 1-D array_like, (2,)
The array of the two parameters of the squared-exponential kernel.
Returns
-------
K : 2-D array_like, (N, N)
The covariance matrix of the N points at x-axis.
"""
N = len(X)
K = np.zeros((N, N))
for i in range(N):
for j in range(N):
K[i,j] = theta[0] * np.exp(-0.5 * (X[i]-X[j])**2 / theta[1])
return K
def Geweke_Z(X, first=0.1, last=0.5):
"""
Geweke diagnostics for MCMC chain convergence.
See Geweke J. Evaluating the accuracy of sampling-based approaches to the
calculation of posterior moments[M]. Minneapolis, MN, USA: Federal Reserve
Bank of Minneapolis, Research Department, 1991.
and https://pymc-devs.github.io/pymc/modelchecking.html#formal-methods
Parameters
----------
X : 1-D array_like, (N, )
The uni-variate MCMC sampled chain for convergence diagnostic.
first : float
The proportion of first part in Geweke diagnostics.
last : float
The proportion of last part in Geweke diagnostics.
Returns
-------
Z : float
The Z score of Geweke diagnostics.
"""
N = X.shape[0]
A = X[:int(first*N)]
B = X[int(last*N):]
if np.sqrt(np.var(A) + np.var(B)) == 0:
Z = None
else:
Z = abs(A.mean() - B.mean()) / np.sqrt(np.var(A) + np.var(B))
return Z
def Psi_GP_MH(R_mat, len_isos, prob_isos, X=None, Ymean=None, var=None,
theta1=3.0, theta2=None, M=20000, initial=1000, gap=500,
randomS=None, theta2_std=1.0, theta2_low=0.00001, theta2_up=100):
"""
Estimate the proportion of C isoforms at T time points with all reads
by MCMC samplling (MH algorithm) combined with a GP prior.
Parameters
----------
R_mat : list of 2-D array_like, of length T
A set of reads identities of belonging to C isoforms
len_isos : list of 2-D array_like, of length T
A set of effective length of C isoforms
prob_isos : list of 2-D array_like, of length T
A set of probablity for isoform specific reads
X : 1-D array_like, (T, )
An array of time points.
Ymean : 2-D array_like, (C, T)
The means for Y.
var : 1-D array_like, (C-1, )
An array of variation of each y.
theta1 : float
The fixed hyper-parameter theta1
theta2 : float
The fixed hyper-parameter theta2. If it is None, then sample it.
theta2_std : float
The jump std of hyper-parameter theta2 for each dimension, default=1.0
theta2_low : float
The lower bound of Truncated Normal distribution for sampling theta2.
theta2_up : float
The upper bound of Truncated Normal distribution for sampling theta2.
randomS : float
The fixed seeds for random number generator. None means ignoring it.
M : int
the maximum iterations of in MCMC sampler, default=100000
initial : int
the minmum iterations of in MCMC sampler, default=3000
gap : int
the gap iterations of in MCMC sampler, default=1000
Returns
-------
Psi_all : 3-D array_like, (m, C, T)
The the proposed m proportion of C isoforms of T time points
Y_all : 3-D array_like, (m, C, T)
The the proposed m latent y for C isoforms of T time points
theta2_all : 2-D array_like, (m, C-1)
The the proposed m hyper-parameter theta2 for C-1 isoforms
Pro_all : 1-D array_like, (m,)
The the probability for accepted proposals
Lik_all : 1-D array_like, (m,)
The the probability for accepted proposals
cnt : int
The number of acceptances
m : int
The number of iterations
"""
T = len(len_isos)
C = len(len_isos[0])
if X is None: X = np.arange(T)
if Ymean is None: Ymean = np.zeros((C,T))
if randomS is not None: np.random.seed(randomS)
for t in range(T):
idx = (len_isos[t] != len_isos[t])
len_isos[t][idx] = 0.0
prob_isos[t][:,idx] = 0.0
R_mat[t][:,idx] = False
idx = np.where(R_mat[t] != R_mat[t])
R_mat[t][idx] = False
idx = np.where(prob_isos[t] != prob_isos[t])
prob_isos[t][idx] = 0.0
idx = (R_mat[t].sum(axis=1) > 0) * (prob_isos[t].sum(axis=1) > 0)
R_mat[t] = R_mat[t][idx,:]
prob_isos[t] = prob_isos[t][idx,:]
# step 0: MCMC fixed initializations
if var is None:
var = 0.05 * np.ones(C-1)
theta_now = np.zeros((C-1, 2))
theta_now[:,0] = theta1
if theta2 is not None:
theta_now[:,1] = theta2
else:
theta_now[:,1] = 0.1 * (np.max(T)-np.min(T)+0.001)**2 #0.75
Y_now = Ymean + 0.0
Ymean = np.zeros((C,T))
psi_now = np.zeros((C, T))
fsi_now = np.zeros((C, T))
for t in range(T):
psi_now[:,t] = np.exp(Y_now[:,t]) / np.sum(np.exp(Y_now[:,t]))
fsi_now[:,t] = len_isos[t]*psi_now[:,t]/np.sum(len_isos[t]*psi_now[:,t])
P_now, L_now = 0, 0
cov_now = np.zeros((T, T, C-1))
for c in range(C-1):
cov_now[:,:,c] = GP_K(X, theta_now[c,:])
P_now += normal_pdf(Y_now[c,:], Ymean[c,:], cov_now[:,:,c])
for t in range(T):
P_now += np.log(np.dot(R_mat[t]*prob_isos[t], fsi_now[:, t])).sum()
L_now += np.log(np.dot(R_mat[t]*prob_isos[t], fsi_now[:, t])).sum()
# MCMC running
Y_try = np.zeros((C, T))
Y_all = np.zeros((M, C, T))
psi_try = np.zeros((C, T))
fsi_try = np.zeros((C, T))
Psi_all = np.zeros((M, C, T))
cov_try = np.zeros((T, T, C-1))
theta_try = np.zeros((C-1, 2))
theta2_all = np.zeros((M, C-1))
theta_try[:, 0] = theta1
if theta2 is not None:
theta_try[:,1] = theta2
cov_try[:,:,:] = GP_K(X, theta_try[0,:]).reshape(T,T,1)
cnt = 0
Pro_all = np.zeros(M)
Lik_all = np.zeros(M)
for m in range(M):
P_try, L_try, Q_now, Q_try = 0, 0, 0, 0
# step 1: propose a value
for c in range(C-1):
# sample single theta2 for all isoforms
if theta2 is None and c==0:
theta_try[:,1] = np.random.normal(theta_now[c,1], theta2_std)
while theta_try[c,1]<theta2_low or theta_try[c,1]>theta2_up:
theta_try[:,1] = np.random.normal(theta_now[c,1],theta2_std)
cov_try[:,:,c] = GP_K(X, theta_try[c,:])
Q_now += trun_normal_pdf(theta_now[c,1], theta_try[c,1],
theta2_std, theta2_low, theta2_up)
Q_try += trun_normal_pdf(theta_try[c,1], theta_now[c,1],
theta2_std, theta2_low, theta2_up)
cov_jmp = cov_try[:,:,c] * var[c] * 5 / (T * C * theta1)
Y_try[c,:] = np.random.multivariate_normal(Y_now[c,:], cov_jmp)
Q_now += normal_pdf(Y_now[c,:], Y_try[c,:], cov_jmp)
Q_try += normal_pdf(Y_try[c,:], Y_now[c,:], cov_jmp)
P_try += normal_pdf(Y_try[c,:], Ymean[c,:], cov_try[:,:,c])
for t in range(T):
psi_try[:,t] = np.exp(Y_try[:,t]) / np.sum(np.exp(Y_try[:,t]))
fsi_try[:,t] = (len_isos[t]*psi_try[:,t] /
np.sum(len_isos[t]*psi_try[:,t]))
_lik_list = np.dot(R_mat[t]*prob_isos[t], fsi_try[:,t])
# if min(_lik_list) <= 0:
# P_try, L_try = -np.inf, -np.inf
# else:
# P_try += np.log(_lik_list).sum()
# L_try += np.log(_lik_list).sum()
P_try += np.log(_lik_list).sum()
L_try += np.log(_lik_list).sum()
# step 2: calculate the MH ratio; accept or reject the proposal
alpha = np.exp(min(P_try+Q_now-P_now-Q_try, 0))
if alpha is None:
print("alpha is none!")
elif np.random.rand(1) < alpha:
#print alpha
cnt += 1
P_now = P_try + 0.0
L_now = L_try + 0.0
Y_now = Y_try + 0.0
cov_now = cov_try + 0.0
psi_now = psi_try + 0.0
fsi_now = fsi_try + 0.0
theta_now = theta_try + 0.0
Pro_all[m] = P_now
Lik_all[m] = L_now
Y_all[m,:,:] = Y_now
Psi_all[m,:,:] = psi_now
theta2_all[m,:] = theta_now[:,1]
#step 3. convergence diagnostics
if m >= initial and m % gap == 0:
conv = 1
for c in range(C-1):
for t in range(T):
# Z = Geweke_Z(Y_all[:m,c,t])
Z = Geweke_Z(Psi_all[:m,c,t])
if Z is None or Z > 2:
conv = 0
break
#print("psi converged!")
if theta2 is None:
Z = Geweke_Z(theta2_all[:m, c])
if Z is None or Z > 2:
conv = 0
break
if conv == 0: break
if conv == 1:
Pro_all = Pro_all[:m,]
Lik_all = Lik_all[:m,]
Y_all = Y_all[:m,:,:]
Psi_all = Psi_all[:m,:,:]
theta2_all = theta2_all[:m,:]
break
# if m >= initial and conv == 0:
# print("Warning: Not converged. Need a longer MCMC chain.")
return Psi_all, Y_all, theta2_all, Pro_all, Lik_all, cnt, m
|
c1 = 4+5j
c2 = 10+5j
c= c1+c2
print(c,type(c)) |
"""Init file"""
from . import batchflow
from .src import * # pylint: disable=wildcard-import
__version__ = '0.1.0'
|
str = 'A' * 1024 * 1024
with open('meta.info.txt', 'w') as fout:
while True:
fout.write(str)
|
try:
from . import Ver
from . import DataType
from . import Log
from . import i18n
from . import ConnectCore
except ModuleNotFoundError:
import Ver
import DataType
import Log
import i18n
import ConnectCore
Version = Ver.V
# RetryWaitTime 秒後重新連線
RetryWaitTime = 3
# ScreenLongTimeOut 秒後判定此畫面沒有可辨識的目標
# 適用於需要特別等待的情況,例如: 剔除其他登入、發文等等
# 建議不要低於 10 秒,剔除其他登入最長可能會花費約六到七秒
ScreenLongTimeOut = 10.0
# ScreenLTimeOut 秒後判定此畫面沒有可辨識的目標
ScreenTimeOut = 3.0
# 預設語言
Language = i18n.Language.Chinese
# 預設連線模式
ConnectMode = ConnectCore.ConnectMode.WebSocket
# 預設 Log 等級
LogLevel = Log.Level.INFO
# 預設不剔除其他登入
KickOtherLogin = False
# 預設登入 PTT1
Host = DataType.Host.PTT1
|
import sys
import numpy as np
from area import *
MAX_SHADE = 256
# All Img properties are initialised on declaration of object.
class Img:
def __init__(self, name, height, width, test=None):
self.name = name
self.height = height
self.width = width
# The image is converted from string of bytes to a 2D numpy array.
self.array = bytes2arr(self.name, height, width)
# List of each individual area with each one's colour and co-ordinate set.
self.areas = get_areas(self.array)
def print_areas(self):
colours = []
for area in self.areas:
colours.append(area.colour)
for shade in range(MAX_SHADE):
print(colours.count(shade))
# Converts the input byte string into a numpy 2D array with input dimensions. Returns the array.
def bytes2arr(file_name, height, width):
try:
with open(file_name, 'rb') as f:
arr = np.array([list(f.read())])
except IOError:
print("Could not read file:", file_name)
sys.exit(1)
try:
arr.shape = (height, width)
except ValueError:
print("ValueError: Your image does not fit to your given height and weight dimensions.")
sys.exit(1)
return arr
# Every pixel in the image belongs to a particular area, and no pixel belongs to multiple areas.
# Therefore we can iterate over every single pixel given that it has not already been found to belong to another area.
# Returns a list containing all areas in the image, including their colours and set of all co-ordinates.
def get_areas(img_arr):
areas = []
checked_coords = set()
for x in range(img_arr.shape[0]):
for y in range(img_arr.shape[1]):
if (x, y) not in checked_coords:
# We only need to input (x,y) - any single set of co-ordinates lying within the area.
new_area = Area(img_arr, (x, y))
areas.append(new_area)
# The total set of all co-ordinates in the area is added to the list of already checked co-ordinates.
checked_coords = checked_coords | new_area.coords
return areas
|
def testData():
otest = open('test.txt', 'r')
test = otest.readlines()
oanswer = open('answer.txt', 'r')
answer = oanswer.readline()
status = False
print("Runs test data")
result = runCode(test)
if result == int(answer): #not always int
status = True
print("Correct answer: " + answer + "My answer: " + str(result))
return status
def runCode(data):
print("Runs code")
shipdirection = 'E'
ship = [0,0]
waypoint = [10, -1]
print("Ship: ", ship)
print("Waypoint: ", waypoint)
#a list of commands
for line in data:
print(line.strip())
heading = line[0]
steps = int(line[1:])
shipdirection, ship, waypoint = execute_command(shipdirection, ship, heading, steps, waypoint)
print("Ship: ", ship)
print("Waypoint: ", waypoint)
distance = abs(ship[0])+abs(ship[1])
print(distance)
return distance
def execute_command(shipdirection, ship, heading, steps, waypoint):
#position[0] #east/west
#position[1] #north/south
turns = steps%89
degrees = turns%4 #needed for the last item to be able to pick the first in the list
#Ship moves by multiplying the waypoint with steps, waypoint stays
if heading == 'F':
ship[0] += waypoint[0]*steps
ship[1] += waypoint[1]*steps
elif heading == 'L':
degrees = (4-degrees)%4 #re-write the degree to the corresponding degree for a right turn...
#Ship stays, waypoint turns (both directions)
if heading == 'R' or heading == 'L':
#... which means that we only need one set of rules for the different number of degrees, whether the turn is left or right
if degrees == 1:
x = waypoint[0]
y = waypoint[1]
waypoint[0] = -y
waypoint[1] = x
elif degrees == 2:
waypoint[0] = -waypoint[0]
waypoint[1] = -waypoint[1]
elif degrees == 3:
x = waypoint[0]
y = waypoint[1]
waypoint[0] = y
waypoint[1] = -x
#Ship stays, waypoint moves
if heading == 'E':
waypoint[0] += steps
elif heading == 'W':
waypoint[0] -= steps
elif heading == 'S':
waypoint[1] += steps
elif heading == 'N':
waypoint[1] -= steps
return shipdirection, ship, waypoint
#Runs testdata
testResult = testData()
if testResult == True:
print("Test data parsed. Tries to run puzzle.")
opuzzle = open('input.txt', 'r')
puzzle = opuzzle.readlines()
finalResult = runCode(puzzle)
print(finalResult)
else:
print("Test data failed. Code is not correct. Try again.")
|
import numpy as np
from vpython import *
def valores (particula, velocidad_inicial,angulo_inicial,largo_suelo, campo_electrico):
largo_suelo = largo_suelo*100
#componentes de velocidades:
velocidad_X = velocidad_inicial * np.cos(np.deg2rad(angulo_inicial))
velocidad_Y = velocidad_inicial * np.sin(np.deg2rad(angulo_inicial))
#aceleracion
aceleracion = ((particula[0]*campo_electrico)/particula[1])
#tiempo total
tiempo_total = (2*velocidad_Y)/(aceleracion*-1)
#suleo
suelo = box(
pos= vector(largo_suelo/2,-1,0),
size=vector(largo_suelo,1,10),
color= color.green
)
#cañon
canyon = cylinder(
pos = vector(0,0,0),
axis= vector(
2 * np.cos(np.deg2rad(angulo_inicial)),
2 * np.sin(np.deg2rad(angulo_inicial)),
0
)
)
#particula en movimiento.
bola = sphere(pos = vector(0,0,0))
bola.trail = curve(color=bola.color)
#flecha del movimiento de la particula
flecha = arrow(
pos= vector(0,0,0),
axis = vector(velocidad_X/1000000,velocidad_Y/1000000,0),
color = color.yellow
)
#posicion en X
text_Y = label(
pos= bola.pos,
text = "posicion y = 0 m",
xoffset=1,
yoffset = 80,
space = bola.radius,
font = "sans",
box = False,
height = 10
)
#posicion en Y
text_X = label(
pos= bola.pos,
text = "posicion x = 0 m",
xoffset=1,
yoffset = 40,
space = bola.radius,
font = "sans",
box = False,
height = 10
)
#tiempo
text_tiempo = label(
pos = bola.pos,
text = "tiempo = 0 s",
xoffset = 1,
yoffset = 60,
space = bola.radius,
font = "sans",
box = False,
height = 10
)
tiempo_inicial = 0
while tiempo_inicial <= tiempo_total:
if ((velocidad_X*tiempo_inicial*100)<= largo_suelo):
#posicion de la pelota *100 para lograr ver el movimiento
bola.pos = vector (
(velocidad_X*tiempo_inicial)*100,
(velocidad_Y*tiempo_inicial+0.5*aceleracion*tiempo_inicial**2)*100,
0
)
#posicion de la flecha *100 para lograr seguir el movimiento de la pelota
flecha.pos = vector(
velocidad_X*tiempo_inicial*100,
(velocidad_Y*tiempo_inicial+0.5*aceleracion*tiempo_inicial**2)*100,
0
)
#movimientos de los objetos.
#particula
bola.trail.append(pos=bola.pos)
#flecha
flecha.axis = vector(velocidad_X/1000000, (velocidad_Y+aceleracion*tiempo_inicial)/1000000,0)
#textos
#posicion en x
text_X.pos = bola.pos
text_X.text = "posicion x = %s m" % str(velocidad_X * tiempo_inicial)
#posicion en y
text_Y.pos = bola.pos
text_Y.text= "posicion y = %s m" % str(velocidad_Y * tiempo_inicial - 0.5 * aceleracion * tiempo_inicial**2)
#tiempo recorrido
text_tiempo.pos = bola.pos
text_tiempo.text= "tiempo = %s s" % str(tiempo_inicial)
#tiempo
tiempo_inicial = tiempo_inicial + tiempo_total/100
#frames por segundo o velocidad de la animacion
rate(10)
"""
#----------------------------------------------------------------------------#
#particulas = carga , masa
particula = [-1.6*(10**-19),9.1*(10**-31)]
el plano esta en centimetros
#variables
velocidad_inicial=5*(10**6)
angulo_inicial = 90
campo_electrico = 714
en centimetros
largo_suelo = 0.06
#----------------------------------------------------------------------------#
"""
"""
particula = [1.6*(10**-19), 9.1*(10**-27), "blue"]
velocidad_inicial=5*(10**4)
angulo_inicial = 85
campo_electrico = -514
largo_suelo = 0.6
valores(particula,velocidad_inicial,angulo_inicial,largo_suelo,campo_electrico)
"""
|
import pytest
from runlog import testLog
from page.call_page import Call
import logging
class TestCase_Call():
"""联系人应用测试类,只可以使用contact类中的方法"""
def setup_class(self):
self.tc = Call()
self.tc.mconnect()
def setup(self):
self.tc.mapp_start(self.tc.call_info['packagename'])
def test_make_call(self):
self.tc.make_call()
self.tc.end_call()
data=Call().get_data('call.yaml')
meun_data = [(x,y) for x,y in
zip(data['secondary_meun'],data['third_meun'])]
@pytest.mark.parametrize('s_meun,t_meun',meun_data)
def test_meun(self,s_meun,t_meun):
"""测试遍历菜单"""
self.tc.click_all_meun(s_meun,t_meun)
def teardown(self):
self.tc.mapp_stop(self.tc.call_info['packagename'])
print('执行完成')
if __name__ == "__main__":
testLog.startLog()
pytest.main([r'D:\mytools\SmokingTestCase\testcase\test_call.py::TestCase_Call::test_meun'])
|
from common import *
def partition(sortable, low, pivot):
high = pivot-1
def keep_going(): return low != high
while keep_going():
while keep_going() and sortable[low] <= sortable[pivot]:
low += 1
while keep_going() and sortable[high] >= sortable[pivot]:
high -= 1
swapsVarsInArray(sortable, low, high)
if sortable[pivot] < sortable[high]:
swapsVarsInArray(sortable, high, pivot)
return high
def quick_sort(sortable, left=0, right=None):
if right == None: right = len(sortable)-1
if right <= left: return sortable
else:
pivot = partition(sortable, left, right)
quick_sort(sortable, left, pivot)
quick_sort(sortable, pivot+1, right)
return sortable
if __name__=='__main__':
import ztesting.sorting_test.sorting_test as test
test.test_quick_sort()
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
class account_invoice(models.Model):
_inherit = 'account.invoice'
# Computo del IVA para la impresion QWeb
x_impuesto = fields.Monetary(string='Impuesto', store=True, readonly=True, compute='_compute_impuesto')
# Secuencial del documento Impreso Company
x_company_establecimiento = fields.Char(string='Mi Establecimiento', store=True, readonly=True, compute='_compute_establecimiento',\
help = "Este campo se llena automaticamente cuando valida el documento, por favor configurar el Secuencial con el formato 001-001-000000001")
x_company_emision = fields.Char(string='Mi Punto de Emision', store=True, readonly=True, compute='_compute_emision',\
help = "Este campo se llena automaticamente cuando valida el documento, por favor configurar el Secuencial con el formato 001-001-000000001")
x_company_secuencial = fields.Char(string='Mi Secuencial', store=True, readonly=True, compute='_compute_secuencial',\
help = "Este campo se llena automaticamente cuando valida el documento, por favor configurar el Secuencial con el formato 001-001-000000001")
x_company_autorizacion = fields.Char('Mi Autorización', required=False,\
help = "Seleccionar 'Open Debug Menu => Set Defaults' y establecer el número de autorizacion por defecto")
# Secuencial del documento Impreso Partner
x_partner_establecimiento = fields.Char('Partner Establecimiento', required=False)
x_partner_punto_emision = fields.Char('Partner Punto de Emisión', required=False)
x_partner_secuencial = fields.Char('Partner Secuencial', required=False)
x_partner_autorizacion = fields.Char('Partner Autorización', required=False)
# Campos Tributarios Factura Electrónica
# x_tipo_emision = fields.Char('TIpo de Emisión')
x_tipo_sustento = fields.Many2one('tipo.sustento', 'Código de Sustento')
x_tipo_comprobante = fields.Many2one('tipo.comprobante', 'Tipo de Comprobante', required=False)
x_fac_clave_acceso = fields.Char('Clave de Acceso Fac.', required=False)
x_fac_codigo_documento = fields.Char('Código de Documento - Fact. Elec', required=False)
# Campos Tributarios Retención Electrónica
# x_fac_ambiente = fields.Char('Ambiente')
# x_tipo_emision = fields.Char('TIpo de Emisión')
x_ret_clave_acceso = fields.Char('Clave de Acceso Ret.', required=False)
x_ret_codigo_documento = fields.Char('Código de Documento - Ret. Elec', required=False)
x_ret_fecha_emision = fields.Date('Fecha Retención', required=False)
# Campos Forma de Pago
x_forma_pago_id = fields.One2many('forma.pago.line', 'invoice_id', string='Transacción', copy=True)
@api.one
@api.depends('number')
def _compute_establecimiento(self):
if self.number:
self.x_company_establecimiento = self.number[0:3]
@api.one
@api.depends('number')
def _compute_emision(self):
if self.number:
self.x_company_emision = self.number[4:7]
@api.one
@api.depends('number')
def _compute_secuencial(self):
if self.number:
self.x_company_secuencial = self.number[8:17]
@api.one
@api.depends('tax_line_ids.amount')
def _compute_impuesto(self):
self.x_impuesto = sum([line.amount for line in self.tax_line_ids if line.amount >= 0.0])
class FormaPagoLine(models.Model):
_name = 'forma.pago.line'
name = fields.Many2one('forma.pago', 'Transacción')
invoice_id = fields.Many2one('account.invoice', string='Invoice Reference',
ondelete='cascade', index=True)
_sql_constraints = [
('name_invoice_uniq', 'unique (name,invoice_id)', 'El método de pago no puede repetirse!')
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
#!/usr/bin/env python
# require: parmed
# python ./add_flourine_to_AG.py you_pdb.pdb
# TODO: change this filename and methods' name
import sys
import parmed as pmd
import pytraj as pt
split_res = pt.tools.split_parmed_by_residues
your_pdb = sys.argv[1]
p = pmd.load_file(your_pdb)
root_name = your_pdb.split('.')[0]
for idx, s in enumerate(split_res(p)):
s.save("".join((root_name, '_', "res", str(idx + 1), ".pdb")))
|
st=input()
print(st+str('.'))
|
# Generated by Django 2.1.2 on 2018-12-02 10:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0026_auto_20181202_1227'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='is_stuff_plus',
),
]
|
# Generated by Django 3.0.4 on 2020-06-27 07:08
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('admin', '0003_logentry_add_action_flag_choices'),
('auth', '0011_update_proxy_permissions'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('devcamper_api', '0006_reviews'),
]
operations = [
migrations.RenameModel(
old_name='Bootcamps',
new_name='Bootcamp',
),
migrations.RenameModel(
old_name='Courses',
new_name='Course',
),
migrations.RenameModel(
old_name='Reviews',
new_name='Review',
),
migrations.RenameModel(
old_name='Users',
new_name='User',
),
]
|
#!/usr/bin/env python
from os.path import join
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import netCDF4 as nc4
from e3sm_case_output import day_str, time_str
START_DAY = 1
END_DAY = 1
TIME_STEP = 1800
assert 86400 % TIME_STEP == 0, "cannot fit even number of time steps in day"
times_per_day = 86400 // TIME_STEP
CASE_NAMES = [
"timestep_ctrl",
# "timestep_MG2_10s",
"timestep_CLUBB_10s_MG2_10s",
"timestep_CLUBB_MG2_10s",
# "timestep_CLUBB_MG2_10s_ftype1",
"timestep_all_10s",
# "timestep_all_300s",
# "timestep_all_900s",
# "timestep_ctrl_notms",
# "timestep_ctrl_niter_change",
# "timestep_ctrl_niter_change_smooth",
# "timestep_smooth14",
# "timestep_smooth35",
# "timestep_300s_niter_change",
# "timestep_dyn_10s",
# "timestep_presaer_ctrl",
# "timestep_presaer_CLUBB_MG2_10s",
# "timestep_presaer_CLUBB_MG2_10s_ZM_10s",
# "timestep_presaer_cld_10s",
# "timestep_presaer_cld_10s_ftype1",
# "timestep_presaer_all_10s",
# "timestep_presaer_all_10s_lower_tau",
# "timestep_presaer_all_10s_cld_5s",
]
SHORT_CASE_NAMES = [
"CTRL",
# "MICRO10",
"CLUBB10MICRO10",
"CLUBBMICRO10",
# "CLUBBMICRO10FTYPE1",
"ALL10",
# "ALL300",
# "ALL900",
# "CTRLFLXAVG",
# "CTRLNITERS",
# "CTRLNITERSSMOOTH35",
# "SMOOTH14",
# "SMOOTH35",
# "ALL300NITERS",
# "DYN10",
# "CTRLPA",
# "CLUBBMICRO10PA",
# "CLUBBMICRO10ZM10PA",
# "CLD10PA",
# "CLD10FTYPE1PA",
# "ALL10PA",
# "ALL10PALT",
# "ALL10CLD5PA",
]
STYLES = {
"CTRL": ('k', '-'),
"MICRO10": ('r', '-'),
"CLUBB10MICRO10": ('maroon', '-'),
"CLUBBMICRO10": ('indigo', '-'),
"CLUBBMICRO10FTYPE1": ('indigo', ':'),
"ALL10": ('dimgrey', '-'),
# "ALL300": ('dimgrey', ':'),
"ALL300": ('g', '-'),
"ALL900": ('k', '--'),
"CTRLFLXAVG": ('k', ':'),
"CTRLNITERS": ('orange', '-'),
"CTRLNITERSSMOOTH35": ('orange', '--'),
"SMOOTH14": ('r', '-'),
"SMOOTH35": ('r', '-'),
"ALL300NITERS": ('b', '-'),
"DYN10": ('y', '-'),
"CTRLPA": ('k', '-'),
"CLUBBMICRO10PA": ('indigo', '-'),
"CLUBBMICRO10ZM10PA": ('saddlebrown', '-'),
"CLD10PA": ('slateblue', '-'),
"CLD10FTYPE1PA": ('slateblue', ':'),
"ALL10PA": ('dimgrey', '-'),
"ALL10PALT": ('dimgrey', '-.'),
"ALL10CLD5PA": ('slateblue', '-.'),
}
OUTPUT_DIRS = ["/p/lustre2/santos36/ACME/{}/run/".format(case)
for case in CASE_NAMES]
suffix = ""
log_file = open("plot_water_budget_box_log{}.txt".format(suffix), 'w')
out_file_template = "{}.cam.h0.0001-01-{}-{}.nc"
def get_out_file_name(icase, day, time):
"""Given a case index, day, and time, return CAM header file name."""
return join(OUTPUT_DIRS[icase],
out_file_template.format(CASE_NAMES[icase],
day_str(day), time_str(time)))
first_file_name = get_out_file_name(0, 1, 0)
first_file = nc4.Dataset(first_file_name, 'r')
ncol = len(first_file.dimensions['ncol'])
nlev = len(first_file.dimensions['lev'])
lat = first_file['lat'][:]
lon = first_file['lon'][:]
lev = first_file['lev'][:]
# Find columns in box over South America.
min_lat = -20.
max_lat = 10.
min_lon = 280.
max_lon = 315.
column_set = set()
for i in range(ncol):
if min_lon <= lon[i] <= max_lon and min_lat <= lat[i] <= max_lat:
column_set.add(i)
first_file.close()
ncol_sa = len(column_set)
# Max diff in CLDLIQ at surface for CLUBBMICRO10
#ifocus = 28970
# Max CLDLIQ at surface for CLUBBMICRO10
#ifocus = 27898
# Max precipitation in CTRL
#ifocus = 29215
# Max precipitation in CLUBBMICRO10 and CLUBBMICRO10PA
ifocus = 29488
# Max precipitation in ALL10
#ifocus = 29227
# Large oscillations at 1800
#ifocus = 29520
#ifocus = -1
if ifocus == -1:
# Look at lowest level CLDLIQ, to see where fog difference is most intense
# between two cases.
print("Searching for largest fog difference.", file=log_file, flush=True)
ictrl = 0
itest = 2
cldliq_maxdiff = 0.
ifocus = -1
for day in range(START_DAY, END_DAY+1):
for it in range(times_per_day):
ctrl_file_name = get_out_file_name(ictrl, day, it*TIME_STEP)
ctrl_file = nc4.Dataset(ctrl_file_name, 'r')
test_file_name = get_out_file_name(itest, day, it*TIME_STEP)
test_file = nc4.Dataset(test_file_name, 'r')
for icol in column_set:
cldliq_diff = test_file['CLDLIQ'][0,nlev-1,icol] - \
ctrl_file['CLDLIQ'][0,nlev-1,icol]
if cldliq_diff > cldliq_maxdiff:
cldliq_maxdiff = cldliq_diff
ifocus = icol
ctrl_file.close()
test_file.close()
ctrl_file_name = get_out_file_name(ictrl, END_DAY+1, 0)
ctrl_file = nc4.Dataset(ctrl_file_name, 'r')
test_file_name = get_out_file_name(itest, END_DAY+1, 0)
test_file = nc4.Dataset(test_file_name, 'r')
for icol in column_set:
cldliq_diff = test_file['CLDLIQ'][0,nlev-1,icol] - \
ctrl_file['CLDLIQ'][0,nlev-1,icol]
if cldliq_diff > cldliq_maxdiff:
cldliq_maxdiff = cldliq_diff
ifocus = icol
ctrl_file.close()
test_file.close()
assert ifocus != -1, "Cloud liquid max difference not found!"
#print("Difference maximized at column ", ifocus, " at lat = ",
# lat[ifocus], ", lon = ", lon[ifocus], file=log_file, flush=True)
variables = [
{'name': 'RELHUM', 'units': r'%', 'ndim': 2},
{'name': 'CLDLIQ', 'units': r'$g/kg$', 'ndim': 2, 'scale': 1000.},
{'name': 'RAINQM', 'units': r'$g/kg$', 'ndim': 2, 'scale': 1000.},
{'name': 'LHFLX', 'units': r'$W/m^2$', 'ndim': 1},
{'name': 'SHFLX', 'units': r'$W/m^2$', 'ndim': 1},
{'name': 'CMELIQ', 'units': r'$kg/kg/s$', 'ndim': 2},
{'name': 'PRAO', 'units': r'$kg/kg/s$', 'ndim': 2},
{'name': 'PRCO', 'units': r'$kg/kg/s$', 'ndim': 2},
{'name': 'QCSEDTEN', 'units': r'$kg/kg/s$', 'ndim': 2},
{'name': 'QRSEDTEN', 'units': r'$kg/kg/s$', 'ndim': 2},
{'name': 'PRECL', 'units': r'$mm/day$', 'ndim': 1, 'scale': 1000.*86400.},
{'name': 'PRECC', 'units': r'$mm/day$', 'ndim': 1, 'scale': 1000.*86400.},
{'name': 'EVAPPREC', 'units': r'$kg/kg/s$', 'ndim': 2},
{'name': 'T', 'units': r'$K$', 'ndim': 2},
{'name': 'Q', 'units': r'$g/kg$', 'ndim': 2, 'scale': 1000.},
{'name': 'U', 'units': r'$m/s$', 'ndim': 2},
{'name': 'V', 'units': r'$m/s$', 'ndim': 2},
{'name': 'OMEGA', 'units': r'$Pa/s$', 'ndim': 2},
{'name': 'OMEGA500', 'units': r'$Pa/s$', 'ndim': 1, 'display': r'$\omega_{500}$'},
{'name': 'U10', 'units': r'$m/s$', 'ndim': 1},
{'name': 'PSL', 'units': r'$Pa$', 'ndim': 1},
{'name': 'FSDS', 'units': r'$W/m^2$', 'ndim': 1},
{'name': 'SWCF', 'units': r'$W/m^2$', 'ndim': 1},
{'name': 'CLDLOW', 'units': r'fraction', 'ndim': 1},
{'name': 'CAPE', 'units': r'$J/kg$', 'ndim': 1},
{'name': 'TAUX', 'units': r'$Pa$', 'ndim': 1},
{'name': 'TAUY', 'units': r'$Pa$', 'ndim': 1},
{'name': 'TAUGWX', 'units': r'$Pa$', 'ndim': 1},
{'name': 'TAUGWY', 'units': r'$Pa$', 'ndim': 1},
]
derived_variables = [
{'name': 'PRECT', 'units': r'$mm/day$', 'ndim': 1,
'depends': ['PRECL', 'PRECC'],
'calc': (lambda var_dict: var_dict['PRECL'] + var_dict['PRECC']),
'display': "Total Precipitation",
},
{'name': 'TAU', 'units': r'$Pa$', 'ndim': 1,
'depends': ['TAUX', 'TAUY'],
'calc': (lambda var_dict: np.sqrt(var_dict['TAUX']**2 + var_dict['TAUY']**2)),
},
]
# Check that dependencies are satisfied.
var_names = [var['name'] for var in variables]
for derived in derived_variables:
for depend in derived['depends']:
assert depend in var_names
ncases = len(CASE_NAMES)
ntimes = (END_DAY - START_DAY + 1) * times_per_day + 1
LEVEL = nlev - 1
out_vars = {}
for icase in range(ncases):
case = SHORT_CASE_NAMES[icase]
print("Processing case ", case)
if case == "ALL900":
case_times_per_day = times_per_day * 2
case_ntimes = ntimes*2 - 1
case_time_step = TIME_STEP // 2
elif case == "ALL300" or case == "ALL300NITERS":
case_times_per_day = times_per_day * 6
case_ntimes = ntimes*6 - 5
case_time_step = TIME_STEP // 6
else:
case_times_per_day = times_per_day
case_ntimes = ntimes
case_time_step = TIME_STEP
out_vars[case] = {}
for var in variables:
out_vars[case][var['name']] = np.zeros((case_ntimes,))
ita = 0
for day in range(START_DAY, END_DAY+1):
for it in range(case_times_per_day):
out_file_name = get_out_file_name(icase, day, it*case_time_step)
out_file = nc4.Dataset(out_file_name, 'r')
for var in variables:
varname = var['name']
ndim = var['ndim']
if ndim == 1:
out_vars[case][varname][ita] = out_file[varname][0,ifocus]
elif ndim == 2:
out_vars[case][varname][ita] = out_file[varname][0,LEVEL,ifocus]
else:
assert False, \
"don't know what to do with ndim={}".format(ndim)
out_file.close()
ita += 1
# Last file is 0-th time of the next day.
out_file_name = get_out_file_name(icase, END_DAY+1, 0)
out_file = nc4.Dataset(out_file_name, 'r')
for var in variables:
varname = var['name']
ndim = var['ndim']
if ndim == 1:
out_vars[case][varname][ita] = out_file[varname][0,ifocus]
elif ndim == 2:
out_vars[case][varname][ita] = out_file[varname][0,LEVEL,ifocus]
else:
assert False, \
"don't know what to do with ndim={}".format(ndim)
out_file.close()
# Scale variables
for var in variables:
if 'scale' in var:
out_vars[case][var['name']] *= var['scale']
# Calculate derived variables
for derived in derived_variables:
out_vars[case][derived['name']] = derived['calc'](out_vars[case])
# Assumes Venezuelan time.
TIME_OFFSET = 4.
times = np.linspace(0., TIME_STEP*(ntimes - 1) / 3600., ntimes) - TIME_OFFSET
times900 = np.linspace(0., TIME_STEP*(ntimes - 1) / 3600., 2*ntimes - 1) - TIME_OFFSET
times300 = np.linspace(0., TIME_STEP*(ntimes - 1) / 3600., 6*ntimes - 5) - TIME_OFFSET
print_vars = {"PRECT", "PRECL", "PRECC", "OMEGA500", "CAPE"}
print("time", ",", ",".join(str(time % 24) for time in times), sep="",
file=log_file, flush=True)
for var in variables + derived_variables:
name = var['name']
for icase in range(ncases):
case = SHORT_CASE_NAMES[icase]
if case == "ALL900":
case_times = times900
elif case == "ALL300" or case == "ALL300NITERS":
case_times = times300
else:
case_times = times
if name in print_vars:
print(name, ",", case, ",", ",".join(str(x) for x in out_vars[case][name]),
sep="", file=log_file, flush=True)
plt.plot(case_times, out_vars[case][name], color=STYLES[case][0],
linestyle=STYLES[case][1])
plt.axis('tight')
plt.xlabel("Time (hr)")
# Bad hard-coding!
if START_DAY - END_DAY + 1 == 1:
plt.xticks(np.linspace(-3., 18., 8),
["2100", "0000", "0300", "0600", "0900", "1200", "1500", "1800"])
elif START_DAY - END_DAY + 1 == 2:
plt.xticks(np.linspace(-3., 42., 16),
["2100", "0000", "0300", "0600", "0900", "1200", "1500", "1800",
"2100", "0000", "0300", "0600", "0900", "1200", "1500", "1800"])
plt.grid(True)
if 'display' in var:
dname = var['display']
else:
dname = name
plt.ylabel("{} ({})".format(dname, var['units']))
plt.savefig("{}_time_box{}.png".format(name, suffix))
plt.close()
# Plot wind issues over SA.
from mpl_toolkits import basemap
bmap = basemap.Basemap(lon_0=180.)
#TIME_CHECK = 57600
#TIME_CHECK = 79200
TIME_CHECK = 0
#TIME_CHECK = 54000
#TIME_CHECK = 12*3600
DAY_CHECK = 2
CASE_CHECK = 0
case = SHORT_CASE_NAMES[CASE_CHECK]
if case == "ALL900":
time_increment = TIME_STEP // 2
elif case == "ALL300" or case == "ALL300NITERS":
time_increment = TIME_STEP // 6
else:
time_increment = TIME_STEP
#plot_box = [min_lon, max_lon, min_lat, max_lat]
plot_box = [285., 297., 1., 10.]
mid_file_name = get_out_file_name(CASE_CHECK, DAY_CHECK, TIME_CHECK)
mid_file = nc4.Dataset(mid_file_name, 'r')
u1 = mid_file['U'][0,LEVEL,:]
v1 = mid_file['V'][0,LEVEL,:]
#plt.scatter(lon, lat, c=mid_file['PSL'][0,:])
#plt.colorbar()
plt.quiver(lon, lat, mid_file['U'][0,LEVEL,:], mid_file['V'][0,LEVEL,:],
scale=100., scale_units='height', angles='xy')
bmap.drawcoastlines()
plt.axis(plot_box)
plt.savefig("UV_arrow_box1{}.png".format(suffix))
plt.close()
plt.scatter(lon, lat, c=mid_file['OMEGA500'][0,:])
bmap.drawcoastlines()
plt.axis(plot_box)
plt.colorbar()
plt.savefig("OMEGA500_box1{}.png".format(suffix))
plt.close()
mid_file.close()
mid_file_name = get_out_file_name(CASE_CHECK, DAY_CHECK, TIME_CHECK + time_increment)
mid_file = nc4.Dataset(mid_file_name, 'r')
u2 = mid_file['U'][0,LEVEL,:]
v2 = mid_file['V'][0,LEVEL,:]
plt.quiver(lon, lat, mid_file['U'][0,LEVEL,:], mid_file['V'][0,LEVEL,:],
scale=100., scale_units='height', angles='xy')
bmap.drawcoastlines()
plt.axis(plot_box)
plt.savefig("UV_arrow_box2{}.png".format(suffix))
plt.close()
plt.scatter(lon, lat, c=mid_file['OMEGA500'][0,:])
bmap.drawcoastlines()
plt.axis(plot_box)
plt.colorbar()
plt.savefig("OMEGA500_box2{}.png".format(suffix))
plt.close()
mid_file.close()
mid_file_name = get_out_file_name(CASE_CHECK, DAY_CHECK, TIME_CHECK + 2*time_increment)
mid_file = nc4.Dataset(mid_file_name, 'r')
u3 = mid_file['U'][0,LEVEL,:]
v3 = mid_file['V'][0,LEVEL,:]
plt.quiver(lon, lat, mid_file['U'][0,LEVEL,:], mid_file['V'][0,LEVEL,:],
scale=100., scale_units='height', angles='xy')
bmap.drawcoastlines()
plt.axis(plot_box)
plt.savefig("UV_arrow_box3{}.png".format(suffix))
plt.close()
plt.scatter(lon, lat, c=mid_file['OMEGA500'][0,:])
bmap.drawcoastlines()
plt.axis(plot_box)
plt.colorbar()
plt.savefig("OMEGA500_box3{}.png".format(suffix))
plt.close()
mid_file.close()
plt.scatter(lon[ifocus], lat[ifocus])
plt.quiver(lon, lat, u1 - 2*u2 + u3, v1 - 2*v2 + v3,
scale=100., scale_units='height', angles='xy')
bmap.drawcoastlines()
plt.axis(plot_box)
plt.savefig("UV_D2_arrow_box{}.png".format(suffix))
plt.close()
log_file.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from semaforo import *
def main(inic):
big_bang(inic, frequencia=FREQUENCIA,
a_cada_tick=proxima_cor, #Semaforo -> Semaforo
desenhar=desenha, #Cor -> Imagem
parar_quando=parar, # Gato -> Boolean
modo_debug=False
)
main(cor) |
import os,sys
import json
import traceback
from flask import (Blueprint,request,jsonify)
from glyds.db import get_mongodb, log_error, next_sequence_value
from flask_jwt_extended import (
jwt_required, jwt_refresh_token_required, get_jwt_identity
)
from flask_cors import cross_origin
bp = Blueprint('pdataset', __name__, url_prefix='/pdataset')
@bp.route('/get_one', methods=('GET', 'POST'))
@jwt_required
def get_one():
current_user = get_jwt_identity()
res_obj = {}
try:
req_obj = request.json
ret_obj, status = get_record_doc(req_obj)
if status == 0:
return jsonify(ret_obj), 200
res_obj = {"record":ret_obj, "query":req_obj, "status":1}
except Exception as e:
res_obj = log_error(traceback.format_exc())
return jsonify(res_obj), 200
|
# -*- coding: utf-8 -*-
import signal
import sys
import inject
import logging
# sys.path.append('../python')
sys.path.insert(0, '../python')
from model.config import Config
from model.session import Session
import network.websocket
def config_injector(binder):
binder.bind(Config, Config('server-config.cfg'))
binder.bind(Session, Session())
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
inject.configure(config_injector)
config = inject.instance(Config)
(loop, server, factory) = network.websocket.getLoop()
# (reactor,port,factory) = network.websocket.getPort()
def close_sig_handler(signal, frame):
server.close()
loop.close()
sys.exit()
signal.signal(signal.SIGINT, close_sig_handler)
logging.debug('Ejecutando servidor de acciones')
try:
loop.run_forever()
except Exception as e:
logging.exception(e)
|
from functools import partial
class Foo(object):
def __init__(self):
self.func = int
self.arr_of_funcs = [partial(int, base=2), partial(int, base=8)]
def convert(self, num, base):
return self.arr_of_funcs[base](num)
|
"""
os.popen.py
"""
import sys
import os
import csv
infile = os.popen("tasklist /fo csv /nh")
lines = csv.reader(infile)
lines = sorted(set([line[0] for line in lines]))
status = infile.close()
if status != None:
print("status =", status)
sys.exit(1)
print("Programs Running")
for i, line in enumerate(lines, start = 1):
print("{:3} {}".format(i, line))
sys.exit(0)
|
# 设置多个返回值
def jwt_response_payload_handler(token, user=None, request=None):
'''
param token 在登录信息通过验证以后,生成都jwt字符串
param user 在登陆信息通关验证后用户的登陆信息
request 请求信息
'''
return {
'token':token,
'user_id':user.id,
'user_name':user.username
}
from django.contrib.auth.backends import ModelBackend
import re
from .models import User
#多条件登陆
def get_account_by_username(account):
try:
if re.match('^1[3-9]\d{9}$', account):
user = User.objects.get(phone=account)
else:
user = User.objects.get(username=account)
except User.DoesNotExist:
return None
else:
return user
#自定义用户认证
class UsernameMobileAuthBackend(ModelBackend):
def authenticate(self, request, username=None, password=None, **kwargs):
user = get_account_by_username(username)
if user is None:
return None
if user.check_password(password) and self.user_can_authenticate(user):
return user
|
# Copyright 2023 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
from rclpy.event_handler import PublisherEventCallbacks
from rclpy.event_handler import QoSPublisherMatchedInfo
from rclpy.event_handler import QoSSubscriptionMatchedInfo
from rclpy.event_handler import SubscriptionEventCallbacks
from rclpy.executors import SingleThreadedExecutor
from rclpy.node import Node
from rclpy.publisher import Publisher
from rclpy.subscription import Subscription
from rclpy.task import Future
from std_msgs.msg import String
"""
This demo program shows detected matched event.
Matched event occurs when publisher and subscription establishes the connection.
Class MatchedEventDetectNode output connection information of publisher and subscription.
Class MultiSubNode is used to create/destroy subscription to connect/disconnect the publisher of
MatchedEventDetectNode.
Class MultiPubNode is used to created/destroy publisher to connect/disconnect the subscription
of MatchedEventDetectNode.
"""
class MatchedEventDetectNode(Node):
def __init__(self, pub_topic_name: String, sub_topic_name: String):
super().__init__('matched_event_detection_node')
self.__any_subscription_connected = False # used for publisher event
self.__any_publisher_connected = False # used for subscription event
pub_event_callback = PublisherEventCallbacks(matched=self.__pub_matched_event_callback)
self.pub = self.create_publisher(String, pub_topic_name, 10,
event_callbacks=pub_event_callback)
sub_event_callback = SubscriptionEventCallbacks(matched=self.__sub_matched_event_callback)
self.sub = self.create_subscription(String, sub_topic_name, lambda msg: ...,
10, event_callbacks=sub_event_callback)
def __pub_matched_event_callback(self, info: QoSPublisherMatchedInfo):
if self.__any_subscription_connected:
if info.current_count == 0:
self.get_logger().info('Last subscription is disconnected.')
self.__any_subscription_connected = False
else:
self.get_logger().info('The changed number of connected subscription is '
+ str(info.current_count_change) + ' and '
'current number of connected subscription is '
+ str(info.current_count))
else:
if info.current_count != 0:
self.get_logger().info('First subscription is connected.')
self.__any_subscription_connected = True
self.future.set_result(True)
def __sub_matched_event_callback(self, info: QoSSubscriptionMatchedInfo):
if self.__any_publisher_connected:
if info.current_count == 0:
self.get_logger().info('Last publisher is disconnected.')
self.__any_publisher_connected = False
else:
self.get_logger().info('The changed number of connected publisher is '
+ str(info.current_count_change) + ' and current '
'number of connected publisher is '
+ str(info.current_count))
else:
if info.current_count != 0:
self.get_logger().info('First publisher is connected.')
self.__any_publisher_connected = True
self.future.set_result(True)
def get_future(self):
self.future = Future()
return self.future
class MultiSubNode(Node):
def __init__(self, topic_name: String):
super().__init__('multi_sub_node')
self.__subs = []
self.__topic_name = topic_name
def create_one_sub(self) -> Subscription:
self.get_logger().info('Create a new subscription.')
sub = self.create_subscription(String, self.__topic_name, lambda msg: ..., 10)
self.__subs.append(sub)
return sub
def destroy_one_sub(self, sub: Subscription):
if sub in self.__subs:
self.get_logger().info('Destroy a subscription.')
self.__subs.remove(sub)
self.destroy_subscription(sub)
class MultiPubNode(Node):
def __init__(self, topic_name: String):
super().__init__('multi_pub_node')
self.__pubs = []
self.__topic_name = topic_name
def create_one_pub(self) -> Publisher:
self.get_logger().info('Create a new publisher.')
pub = self.create_publisher(String, self.__topic_name, 10)
self.__pubs.append(pub)
return pub
def destroy_one_pub(self, pub: Publisher):
if pub in self.__pubs:
self.get_logger().info('Destroy a publisher.')
self.__pubs.remove(pub)
self.destroy_publisher(pub)
def main(args=None):
rclpy.init(args=args)
topic_name_for_detect_pub_matched_event = 'pub_topic_matched_event_detect'
topic_name_for_detect_sub_matched_event = 'sub_topic_matched_event_detect'
matched_event_detect_node = MatchedEventDetectNode(
topic_name_for_detect_pub_matched_event, topic_name_for_detect_sub_matched_event)
multi_subs_node = MultiSubNode(topic_name_for_detect_pub_matched_event)
multi_pubs_node = MultiPubNode(topic_name_for_detect_sub_matched_event)
maximum_wait_time = 10 # 10s
executor = SingleThreadedExecutor()
executor.add_node(matched_event_detect_node)
executor.add_node(multi_subs_node)
executor.add_node(multi_pubs_node)
# MatchedEventDetectNode will output:
# First subscription is connected.
sub1 = multi_subs_node.create_one_sub()
executor.spin_until_future_complete(matched_event_detect_node.get_future(), maximum_wait_time)
# MatchedEventDetectNode will output:
# The changed number of connected subscription is 1 and current number of connected
# subscription is 2.
sub2 = multi_subs_node.create_one_sub()
executor.spin_until_future_complete(matched_event_detect_node.get_future(), maximum_wait_time)
# MatchedEventDetectNode will output:
# The changed number of connected subscription is -1 and current number of connected
# subscription is 1.
multi_subs_node.destroy_one_sub(sub1)
executor.spin_until_future_complete(matched_event_detect_node.get_future(), maximum_wait_time)
# MatchedEventDetectNode will output:
# Last subscription is disconnected.
multi_subs_node.destroy_one_sub(sub2)
executor.spin_until_future_complete(matched_event_detect_node.get_future(), maximum_wait_time)
# MatchedEventDetectNode will output:
# First publisher is connected.
pub1 = multi_pubs_node.create_one_pub()
executor.spin_until_future_complete(matched_event_detect_node.get_future(), maximum_wait_time)
# MatchedEventDetectNode will output:
# The changed number of connected publisher is 1 and current number of connected publisher
# is 2.
pub2 = multi_pubs_node.create_one_pub()
executor.spin_until_future_complete(matched_event_detect_node.get_future(), maximum_wait_time)
# MatchedEventDetectNode will output:
# The changed number of connected publisher is -1 and current number of connected publisher
# is 1.
multi_pubs_node.destroy_one_pub(pub1)
executor.spin_until_future_complete(matched_event_detect_node.get_future(), maximum_wait_time)
# MatchedEventDetectNode will output:
# Last publisher is disconnected.
multi_pubs_node.destroy_one_pub(pub2)
executor.spin_until_future_complete(matched_event_detect_node.get_future(), maximum_wait_time)
multi_pubs_node.destroy_node()
multi_subs_node.destroy_node()
matched_event_detect_node.destroy_node()
rclpy.try_shutdown()
if __name__ == '__main__':
main()
|
#!/usr/bin/python2.7
#-*- coding: utf-8 -*-
from kalman_filter import kalman_filter
from testkf import testkf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import gl
from matplotlib import rc
#import math
plt.rcParams["legend.fontsize"]=30
plt.figure(figsize=(12,9),dpi=200)
plt.gca().yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))
rc('font',**{'family':'serif','serif':['Times'],'size':22})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
x1 = []
x2 = []
x = []
y1 = []
y2 = []
y = []
data1 = np.loadtxt('./Test09_Model3_17cm_Pressure.dat',skiprows=1)
x1.extend(data1[0:7940:50,0])
y1.extend(data1[0:7940:50,1])
x1.extend(data1[7941:8110:4,0])
y1.extend(data1[7941:8110:4,1])
x1.extend(data1[8110::50,0])
y1.extend(data1[8110::50,1])
max0 = max(y1)
plot1=plt.plot(x1,y1,'ko',linewidth=3,markersize=9,markeredgewidth=2,markerfacecolor='white',label='Exp. KRISO')
#plot2=plt.plot(x1,y2,'-r',label='P2')
data2 = np.loadtxt('./output/fusion/procs1.pSensor_value.1')
x3 = data2[::,0]
y3 = data2[::,1]/1000
plot2=plt.plot(x3,y3,'-b',linewidth=2,label='The present work')
# x2.extend(data2[120::,0])
# y2.extend(data2[120::,1]/1000)
# y33 = kalman_filter(y2,1e-6,1e-5,0.0,2)
# y33[0] = 0.0
# plot2=plt.plot(x2,y33,'-r',linewidth=3,label='The present work after KF')
x.extend(data2[140::,0])
y.extend(data2[140::,1]/1000)
#y33 = kalman_filter(y,1e-6,2e-5,0.0,2)
y33=y
for i in range(1,len(y)):
y33[i]=testkf(y[i],1e-6,2e-5)
#y33 = kalman_filter(y,1e-6,2e-5,0.0,2)
#print (y33)
plot2=plt.plot(x,y33,'-k',linewidth=3,label='The present work after KF')
#plot2=plt.plot(xx1,yy1,'ob',linewidth=2,label=r'$1.2 \times 1.8$')
#plot2=plt.plot(x1,y2,'-r',label='P2')
plt.xlim(0.00,0.50)
#plt.xlim(1.00,max(x3)-0.028)
#plt.xlim(5.5,7.3)
plt.ylim(0.0,25)
#plt.ylim(0.2,1.8)
plt.xlabel ('t(s)',fontsize=30)
#plt.xlabel ('t/s',fontsize=15)
#plt.ylabel (r'$U$',fontsize=15)
#plt.ylabel (r'$V$',fontsize=15)
plt.ylabel (r'P(kPa)',fontsize=30)
plt.legend(loc='best', numpoints=1)
#plt.legend(numpoints=1)
#plt.show()
#plt.savefig("wedge30-P1.eps")
#plt.savefig("wedge30-P2.eps")
# print(max0,max3)
plt.savefig("kalmanship09-P1-test.eps")
|
from django.db import models
# Create your models here.
class Song(models.Model):
spotify_uri = models.CharField(max_length=64, primary_key=True)
name = models.CharField(max_length=64)
#artists = models.ManyToManyField(related_name="songs")
artists = models.CharField(max_length=64)
bpm = models.IntegerField()
danceability = models.FloatField()
energy = models.FloatField()
valence = models.FloatField()
duration = models.IntegerField()
def __str__(self):
return f"{self.name} by {self.artists}"
class SpotifyUser(models.Model):
spotify_id = models.CharField(max_length=64, primary_key=True)
songs = models.ManyToManyField(Song, blank=True)
|
import socketio
from flask import Flask, render_template
import eventlet.wsgi
import eventlet
import game_engine
from communication_module import communication_module_sio
from test_manillen import test_class
from engine_test import test_comm_module
sio = socketio.Server()
app = Flask(__name__)
socket_list = []
#connects sid to player id
player_set = {}
#connects player id to sid
sid_set = {}
engine = None
comm_module = None
test_game = False
test_c = test_class("test_database.txt")
@app.route('/')
def index():
"""Serve the client-side application."""
return render_template('index.html')
@sio.on('client_connect', namespace='/')
def client_connect(sid):
socket_list.append(sid)
ask_client_info(sid, testing=False)
print("connected new client, amount of connected clients is: " + str(len(socket_list)))
def ask_client_info(sid, testing=test_game):
if not test_game:
sio.emit('ask_info', room=sid)
else:
test_set = test_c.give_test_set()
if test_set is not None:
sio.emit('give_info',test_set , room=sid)
else:
sio.emit('ask_info', room=sid)
@sio.on('player_info_send', namespace='/')
def client_send_info(sid, data):
print("server: client sent info, adding player")
data["comm_module"] = comm_module
# sid is considered player id, is used with other thing generate unique id
comm_module.send_engine("add_player", sid, data=data)
if test_game:
engine_functions_h = engine.get_functions()
AI_1 = test_comm_module(1, engine_functions=engine_functions_h)
AI_2 = test_comm_module(2, engine_functions=engine_functions_h)
AI_3 = test_comm_module(3, engine_functions=engine_functions_h)
comm_module.send_engine("add_player", 1, data=test_c.give_test_set(comm_module=AI_1))
comm_module.send_engine("add_player", 2, data=test_c.give_test_set(comm_module=AI_2))
comm_module.send_engine("add_player", 3, data=test_c.give_test_set(comm_module=AI_3))
@sio.on('viewer_info_send', namespace='/')
def client_send_info(sid, data):
print("server: client sent info, adding viewer")
data["comm_module"] = comm_module
# sid is considered player id, is used with other thing generate unique id
comm_module.send_engine("add_viewer", sid, data=data)
@sio.on('reset_game', namespace='/')
def client_send_info(sid):
print("server, players asked to reset")
# sid is considered player id, is used with other thing generate unique id
comm_module.send_engine("reset_game", sid)
@sio.on('keep_waiting', namespace='/')
def client_send_info(sid):
print("server, players asked to reset")
# sid is considered player id, is used with other thing generate unique id
comm_module.send_engine("keep_waiting", sid)
@sio.on('answer_card', namespace='/')
def client_answer_card(sid, data):
comm_module.send_engine("answer_card", sid, data=data)
@sio.on('disconnect', namespace='/')
def disconnect(sid):
comm_module.send_engine("disconnect", sid)
socket_list.remove(sid)
print("client left, amount of connected clients is: " + str(len(socket_list)))
@sio.on('answer_troef', namespace='/')
def answer_troef(sid, card):
comm_module.send_engine("answer_troef", sid, data=card)
def choose_troef(id):
sio.emit('choose_troef', room=id)
def wait_other_players(id):
sio.emit('wait_other_players', room=id)
def set_game_info(id, data):
sio.emit('set_game_info', data, room=id)
if __name__ == '__main__':
comm_module = communication_module_sio(sio)
engine = game_engine.engine(test_module=test_c)
comm_module.add_engine_function_dict(engine.get_functions())
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4000)), app)
|
'''
Questions?
Send email to jiz176@pitt.edu
Nice datasets:
Median death age and Obesity
So just rank them seperately
Let's do it
'''
import numpy as np
import matplotlib.pyplot as plt
'''
Again, the structure might not be good enough, but just for saving time
And I am not good at plt at all......
'''
class Grapher(object):
plt_figure_id = 1
def __init__(self):
super(Grapher, self).__init__()
def create_new_figure(self, figsize):
f = plt.figure(self.plt_figure_id, figsize=figsize)
Grapher.plt_figure_id += 1
return f
class BaseGraph(Grapher):
def __init__(self, figsize):
self.figure = self.create_new_figure(figsize)
self.sub_plot_list = []
def show(self):
self.figure.legend(loc = 7, prop = {'size':18})
self.figure.show()
def close(self):
self.figure.close()
import copy
""" NonCircledGraph has x and y axis"""
class NonCircleGraph(BaseGraph):
def __init__(self, figsize):
super(NonCircleGraph, self).__init__(figsize)
self.data_list_list = [] #Notice here. The structure is [[data for plot 1],[data for plot 2],[data for plot3]]
self.x_sequence_list_list = [] #Notice here. The structure is [[x sequence for plot 1],[x sequence for plot 2],[x sequence for plot3]
def create_plot(self):
self.sub_plot_list.append(self.figure.add_subplot(111))
self.data_list_list.append([])
self.x_sequence_list_list.append([])
def set_data(self, data_list, plot_id):
self.data_list_list[plot_id] = copy.deepcopy(data_list)
def set_x_sequence(self, sequence_list, plot_id):
self.x_sequence_list_list[plot_id] = copy.deepcopy(sequence_list)
def get_plot_count(self):
return len(self.sub_plot_list)
def get_plot(self, plot_id):
return self.sub_plot_list[plot_id]
def set_x_label(self, x_label, plot_id):
self.sub_plot_list[plot_id].set_xlabel(x_label)
def set_y_label(self, y_label, plot_id):
self.sub_plot_list[plot_id].set_ylabel(y_label)
"""There is a problem that one figure can only contain one type of graph. But I am not going to fix that bc I dont need to"""
class Histogram(NonCircleGraph):
def __init__(self, figsize=(20,8)):
super(Histogram, self).__init__(figsize)
def draw_plot(self, plot_id, bar_name):
x_sequence_list = self.x_sequence_list_list[plot_id]
data_list = self.data_list_list[plot_id]
if x_sequence_list == []:
x_sequence_list = range(len(data_list)) #I know range is not list
if len(x_sequence_list) != len(data_list):
raise ValueError(f"The length of x sequence list should be the same as that of data list. x_sequence_list: {x_sequence_list}. data_list: {data_list}")
self.sub_plot_list[plot_id].bar(x_sequence_list, data_list, label=bar_name)
def draw_plots(self, bar_name):
for i in range(len(self.sub_plot_list)):
self.draw_plot(i, bar_name)
class HistogramHoizonal(NonCircleGraph):
def __init__(self, figsize=(18,25)):
super(HistogramHoizonal, self).__init__(figsize)
def draw_plot(self, plot_id, bar_name):
x_sequence_list = self.x_sequence_list_list[plot_id]
data_list = self.data_list_list[plot_id]
if x_sequence_list == []:
x_sequence_list = range(len(data_list)) #I know range is not list
if len(x_sequence_list) != len(data_list):
raise ValueError(f"The length of x sequence list should be the same as that of data list. x_sequence_list: {x_sequence_list}. data_list: {data_list}")
self.sub_plot_list[plot_id].barh(x_sequence_list, data_list, label=bar_name)
def draw_plots(self, bar_name):
for i in range(len(self.sub_plot_list)):
self.draw_plot(i, bar_name)
'''
h = HistogramHoizonal()
h.create_plot()
h.set_data([1,2,1], 0)
h.draw_plots("s")
h.show()
'''
|
'''
Created on Oct 21, 2010
'''
from google.appengine.ext import db
from google.appengine.api import users
class Greeting(db.Model):
author = db.UserProperty()
content = db.StringProperty(multiline=True)
date = db.DateTimeProperty(auto_now_add=True)
@staticmethod
def GetGreeting():
current_user = users.get_current_user()
greeting = ("%s (<a href=\"%s\">sign out</a>)" %(current_user.nickname(), users.create_logout_url("/")))
return greeting |
color='red'
if color=='green':
print("You have got 5 points\n")
elif color=='yellow':
print("You have got 10 points\n")
else:
print("You have got 15 points\n")
#5-7
favorite_fruits=['banana','apple','orange']
if 'banana' in favorite_fruits:
print("You really like bananas!\n")
#5-7-1
request_toppings=[]
if request_toppings:
for request_topping in request_toppings:
if request_topping == 'green paper':
print("Sorry,We are out of green papers right now.")
else:
print("Adding "+request_topping +".")
print("\n Finished making your pizza!")
#test
avaliable_toppings=['mushroom','olives','green papers','pepperoni','pineapple','extra cheese']
request_toppings=['mushroom','french fries','extra cheese']
for request_topping in request_toppings:
if request_topping in avaliable_toppings:
print("Adding "+request_topping+".")
else:
print("Sorry,We don't have "+request_topping+".")
print("\nFinished making your pizza!\n")
#5-8
users=['admin','root','mike','loser','lily']
for user in users:
if user=='admin':
print("Hello "+user+" would you like to see a status report?\n")
else:
print("Hello "+user+" thank you for your logging in again!\n")
#5-9
users=[]
if users:
for user in users:
if user=='admin':
users.pop()
print("Hello "+user+" would you like to see a status report?\n")
else:
users.pop()
print("Hello "+user+" thank you for your logging in again!\n")
else:
print("We need to find some users\n")
#5-10
current_users=['Mike','John','Lucy','Lily','Jenny']
new_users=['Mike','Lucy','kite','lucy','phton']
for user in new_users:
if user in current_users or user.upper() in current_users or user.lower() in current_users or user.title() in current_users:
print("The user "+user+" has existed!You have to input another user")
else:
print("The user "+user+" has not used!")
#5-11
print("\n")
numbers=[val for val in range(1,10)]
for number in numbers:
if number == 1:
print("1st")
elif number == 2:
print("2nd")
elif number == 3:
print("3rd")
else:
print(str(number)+"th")
print("\n")
|
# Exercise 2.17, author Anders P. Åsbø.
import numpy as np
# solution to a):
# using the same code as in 2.9 to generate the t and y values.
n = 10 # antall tidssteg.
i = 0 # initial indeks.
v0 = 4.0 # ballens startfart [m/s].
g = 1.62 # månens tyngdeaksellerasjon [m/s**2].
ts = 2.0*v0/g # sluttverdi for tid [s].
dt = ts/n # beregner størelsen på tidssteg [s], slik at t == ts når i == n.
T = np.empty(n+1) # tomt array for tidskoordinater.
Y = np.empty(n+1) # tomt array for y-koordinater.
# løper gjennom indekser:
while i < n + 1:
T[i] = i*dt # beregner tid siden start, og lagrer i array.
Y[i] = v0*T[i] - (1.0/2.0)*g*(T[i]**2) # beregner høyde, og lagrer i array.
i += 1 # øker indeks med 1.
ty1 = np.empty((2, n+1)) # creating nested array 1.
ty1[0, :] = T
ty1[1, :] = Y
print("a)" '\n' "t: |y:") # adding headers.
for i in range(len(ty1[0])): # printing the table from a nested list with columns.
print(f"{ty1[0, i]:.2f}|{ty1[1, i]:.2f}")
print("\n---------\n") # adding a break between the two running examples.
# solution to b):
ty2 = np.empty((n+1, 2)) # creating nested list 2.
ty2[:, 0] = T
ty2[:, 1] = Y
print("b)" '\n' "t: |y:") # adding headers.
for i in range(len(ty2)): # printing the table from a nested list with rows.
print(f"{ty2[i, 0]:.2f}|{ty2[i, 1]:.2f}")
# running examples:
"""
$ python3 ball_table3.py
a)
t: |y:
0.00|0.00
0.49|1.78
0.99|3.16
1.48|4.15
1.98|4.74
2.47|4.94
2.96|4.74
3.46|4.15
3.95|3.16
4.44|1.78
4.94|0.00
---------
b)
t: |y:
0.00|0.00
0.49|1.78
0.99|3.16
1.48|4.15
1.98|4.74
2.47|4.94
2.96|4.74
3.46|4.15
3.95|3.16
4.44|1.78
4.94|0.00
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.