text stringlengths 8 6.05M |
|---|
class SimpleArithmetic:
def simple_add(x,y):
print(x+y)
def simple_subtract(x,y):
print(x-y)
def simple_multiply(x,y):
print(x*y)
def simple_divide(x,y):
print(x/y)
def floor_divide(x,y):
#the double-slash operator performs division and then rounds the result down to the next-lowest integer
#this provides interesting results when combined with negative products
print(x//y)
def simple_modulus(x,y):
print(x%y)
def simple_power(x,y):
print(x**y)
|
# encoding: utf-8
#@author: newdream_daliu QQ:279129436
#@file: aaa.py.py
#@time: 2021-05-05 18:13
#@desc:
print('test01')
print('test02')
print('test04')
print('test05')
print('test06') |
#!/usr/bin/python
import torch
def prepare_data(train_path, max_len=20):
print('Loading data...')
# read sents and get vocab
sents = []
vocab = {'<UNK>': 0, '<pad>': 1}
i = len(vocab)
with open(train_path) as fp:
for line in fp:
sent = line.strip().split()
sents.append(sent)
for word in sent:
if word not in vocab:
vocab[word] = i
i += 1
vocab_len = len(vocab)
# transform to vectors
one_hots = []
for i, sent in enumerate(sents):
sent_one_hot = torch.zeros((len(sent), vocab_len), dtype=torch.long)
for i, tok in enumerate(sent):
sent_one_hot[i][vocab.get(tok, 0)] = 1
one_hots.append(sent_one_hot)
# just do one for now
return torch.Tensor(one_hots[0]), vocab_len
|
import json
import shapely.wkb
import tile_gen.util as u
import tile_gen.vectiles.mvt as mvt
import tile_gen.vectiles.geojson as geojson
from tile_gen.geography import SphericalMercator
from ModestMaps.Core import Coordinate
from StringIO import StringIO
from math import pi
from psycopg2.extras import RealDictCursor
from psycopg2 import connect
from ModestMaps.Core import Point
def get_tolerance(simplify, zoom):
return (simplify[max(filter(lambda k : k <= zoom, simplify.keys()))]
if isinstance(simplify, dict) else simplify)
def pad(bounds, padding):
return (bounds[0] - padding,
bounds[1] - padding,
bounds[2] + padding,
bounds[3] + padding)
def st_bbox(bounds, srid):
return 'ST_MakeEnvelope({xmin}, {ymin}, {xmax}, {ymax}, {srid})' \
.format(xmin=bounds[0],
ymin=bounds[1],
xmax=bounds[2],
ymax=bounds[3],
srid=srid)
def st_simplify(geom, tolerance, bounds, srid=3857):
padding = (bounds[3] - bounds[1]) * 0.1
geom = 'ST_Intersection(%s, %s)' % (geom, st_bbox(pad(bounds, padding), srid))
return 'ST_MakeValid(ST_SimplifyPreserveTopology(%s, %.12f))' % (geom, tolerance)
def st_scale(geom, bounds, scale):
xmax = scale / (bounds[2] - bounds[0])
ymax = scale / (bounds[3] - bounds[1])
return ('ST_TransScale(%s, %.12f, %.12f, %.12f, %.12f)'
% (geom, -bounds[0], -bounds[1], xmax, ymax))
def build_bbox_query(subquery, bounds, geom='q.__geometry__', srid=3857):
query = '''SELECT *, ST_AsBinary(%(geom)s) AS __geometry__
FROM (%(query)s) AS q''' % {'geom': geom,
'query': subquery}
default_bbox_filter = ' WHERE ST_Intersects(q.__geometry__, %(bbox)s)'
bbox_token = '!bbox!'
bbox = st_bbox(bounds, srid)
return (query.replace(bbox_token, bbox)
if bbox_token in query
else query + default_bbox_filter % {'bbox': bbox})
def build_query(query, bounds, srid=3857, tolerance=0, is_geo=False, is_clipped=True, scale=4096):
bbox = st_bbox(bounds, srid)
geom = 'q.__geometry__'
if tolerance > 0: geom = st_simplify(geom, tolerance, bounds, srid)
if is_clipped: geom = 'ST_Intersection(%s, %s)' % (geom, bbox)
if is_geo: geom = 'ST_Transform(%s, 4326)' % geom
if scale: geom = st_scale(geom, bounds, scale)
return build_bbox_query(query, bounds, geom, srid)
def get_query(layer, coord, bounds, format):
query = (layer.query_fn(coord.zoom)
if layer.query_fn
else u.xs_get(layer.queries, coord.zoom, layer.queries[-1]))
if not query: return None
else:
srid = layer.srid
tolerance = get_tolerance(layer.simplify, coord.zoom)
clip = layer.clip
geo_query = build_query(query, bounds, srid, tolerance, True, clip)
mvt_query = build_query(query, bounds, srid, tolerance, False, clip)
return {'JSON': geo_query, 'MVT': mvt_query}[format]
def encode(out, name, features, coord, bounds, format):
if format == 'MVT':
mvt.encode(out, name, features)
elif format == 'JSON':
geojson.encode(out, features, coord.zoom)
else:
raise ValueError(format + ' is not supported')
def merge(out, feature_layers, coord, format):
if format == 'MVT':
mvt.merge(out, feature_layers)
elif format == 'JSON':
geojson.merge(out, feature_layers, coord.zoom)
else:
raise ValueError(format + ' is not supported')
class Provider:
def __init__(self, dbinfo):
conn = connect(**dbinfo)
conn.set_session(readonly=True, autocommit=True)
self.db = conn.cursor(cursor_factory=RealDictCursor)
def query_bounds(self, query, bounds, srid=3857):
query = build_bbox_query(query, bounds, 'q.__geometry__', srid)
self.db.execute(query)
return self.db.fetchall()
def query_zxy(self, query, z, x, y, srid=3857):
return self.query_bounds(query, u.bounds(z, x, y, srid), srid)
def pr_query(self, query, z, x, y, srid=3857):
print(build_bbox_query(query, u.bounds(z, x, y, srid), 'q.__geometry__', srid))
def explain_analyze_query(self, query, z, x, y, srid=3857):
query = build_bbox_query(query, u.bounds(z, x, y, srid), 'q.__geometry__', srid)
query = 'EXPLAIN ANALYZE ' + query
self.db.execute(query)
return self.db.fetchall()
def query(self, query, geometry_types, transform_fn, sort_fn):
features = []
self.db.execute(query)
for row in self.db.fetchall():
assert '__geometry__' in row, 'Missing __geometry__ in feature result'
assert '__id__' in row, 'Missing __id__ in feature result'
wkb = bytes(row.pop('__geometry__'))
id = row.pop('__id__')
shape = shapely.wkb.loads(wkb)
if geometry_types is not None:
if shape.type not in geometry_types:
continue
props = dict((k, v) for k, v in row.items() if v is not None)
if transform_fn:
shape, props, id = transform_fn(shape, props, id)
wkb = shapely.wkb.dumps(shape)
features.append((wkb, props, id))
if sort_fn:
features = sort_fn(features)
return features
def get_features(self, layer, coord, bounds, format):
query = get_query(layer, coord, bounds, format)
geometry_types = layer.geometry_types
transform_fn = layer.transform_fn
sort_fn = layer.sort_fn
return ([] if not query
else self.query(query, geometry_types, transform_fn, sort_fn))
def get_feature_layer(self, layer, coord, format):
bounds = u._bounds(coord, layer.srid)
features = self.get_features(layer, coord, bounds, format)
return {'name': layer.name, 'features': features}
def render_tile(self, lols, coord, format):
buff = StringIO()
if type(lols) is list:
get_feature_layer = lambda l : self.get_feature_layer(l, coord, format)
feature_layers = map(get_feature_layer, lols)
merge(buff, feature_layers, coord, format)
else:
bounds = u._bounds(coord, lols.srid)
features = self.get_features(lols, coord, bounds, format)
encode(buff, lols.name, features, coord, bounds, format)
return buff.getvalue()
|
import csv
import pickle
import string
from nltk.corpus import wordnet as wn
#this script is intended to be run from the python command line
def save_senti_data():
#read SentiWordNet data file to get list of positive and negative sentiment polarities for each word with a given part of speech
senti_file = open('/home/sysadmin/gmrepo/GMCustSent/rankcars/scripts/SentiWordNet_3.0.0_20130122.txt', 'r')
data_file = csv.reader(senti_file, delimiter='\t')
senti_dict = {}
for row in data_file:
#each row can contain multiple words, so handle each separately
word_list = string.split(row[4], ' ')
for word in word_list:
#each word ends with # followed by the definition number
#find end of word
i = string.find(word, '#')
#if the word exists, keep only the word
if i > 0:
word = word[:i]
#key for dictionary is word.part_of_speech
key = word + '.' + row[0]
#value for dictionary is a list of tuples that contain the positive sentiment polarity and the negative sentiment polarity for one definition of the word with a given part of speech
if key in senti_dict:
senti_dict[key].append((float(row[2]), float(row[3])))
else:
senti_dict[key] = []
senti_dict[key].append((float(row[2]), float(row[3])))
#save SentiWordNet data so it can be used later
save_senti_dict = open('/home/sysadmin/gmrepo/GMCustSent/rankcars/scripts/senti_dict.pickle', 'wb')
pickle.dump(senti_dict, save_senti_dict, protocol=2)
save_senti_dict.close()
#debugging print statement
#print(senti_dict)
#execute script
save_senti_data()
|
# import blender gamengine modules
from bge import logic
from .settings import *
from . import bgui
import logging
allowDuplicate = True
class Logger:
"""Message logger singleton"""
def __init__(self, widget):
self.bigList = []
self.panel = widget
# init secondary UI elements
self.logger1 = bgui.Label(self.panel, 'logger1', sub_theme='whiteLabelSmall', options=bgui.BGUI_CENTERX|bgui.BGUI_NO_FOCUS)
self.logger2 = bgui.Label(self.panel, 'logger2', sub_theme='whiteLabelSmall', options=bgui.BGUI_CENTERX|bgui.BGUI_NO_FOCUS)
self.logger3 = bgui.Label(self.panel, 'logger3', sub_theme='whiteLabelSmall', options=bgui.BGUI_CENTERX|bgui.BGUI_NO_FOCUS)
self.panel.visible = useLogger
def new(self, msg, type = "MESSAGE", repeat = True): # type can be "ERROR", "WARNING", or "MESSAGE"
"""creates a new log entry and add to memory"""
if self.bigList: # make sure bigList is not empty
if (not repeat) and (msg == self.bigList[-1].msg):
pass
else:
self._addEntry(msg, type) # add to logger if repeat mode is set to false
else:
self._addEntry(msg, type)
# trim list when it gets too long
if len(self.bigList) > 200:
self.bigList = self.bigList[-100:]
def _addEntry(self, msg, type):
try:
if self.bigList[-1].msg != msg or allowDuplicate:
self.bigList.append(entry(msg, type))
self._updateDisplay()
except IndexError:
self.bigList.append(entry(msg, type))
self._updateDisplay()
def _getEntry(self, count = 0):
"""returns a particular entry, 0th is last entry, -1 returns the second last entry, ad infinitum"""
return (self.bigList[count-1].msg, self.bigList[count-1].color)
def _updateDisplay(self):
"""refreshes the bgui elements"""
animationTime = 400
x = 0
try:
self.logger1.position = [x,10]
text, color = self._getEntry()
color[3] = 0.9
self.logger1.text = text
self.logger1.color = color
self.logger2.position = [x,30]
text, color = self._getEntry(-1)
color[3] = 0.6
self.logger2.text = text
self.logger2.color = color
self.logger3.position = [x,50]
text, color = self._getEntry(-2)
color[3] = 0.3
self.logger3.text = text
self.logger3.color = color
self.logger1.move([x, 25], animationTime/4.0)
self.logger2.move([x, 45], animationTime/2.0)
self.logger3.move([x, 65], animationTime)
except IndexError:
pass
class entry:
"""The log entry"""
def __init__(self, msg, msgType):
self.msg = str(msg)
self.type = msgType
if msgType == "ERROR":
self.color = [0.5,0,0,1]
logging.error("Logger Message: \t" + msg)
elif msgType == "WARNING":
self.color = [0.7,0.4,0,1]
logging.warning("Logger Message: \t" + msg)
else:
self.color = [0,0,0,1]
logging.debug("Logger Message: \t" + msg)
def init(widget):
# create the logger singleton
return Logger(widget)
|
import robots
AGENT = "test_robotparser"
parser = robots.RobotsParser.from_file("robots.txt")
if parser.errors:
print("ERRORS:")
print(parser.errors)
if parser.errors:
print("WARNINGS:")
print(parser.errors)
assert parser.can_fetch(AGENT, "/tmp")
assert not parser.can_fetch(AGENT, "/tmp/")
assert not parser.can_fetch(AGENT, "/tmp/a.html")
assert not parser.can_fetch(AGENT, "/a%3cd.html")
assert not parser.can_fetch(AGENT, "/a%3Cd.html")
assert not parser.can_fetch(AGENT, "/a/b.html")
assert not parser.can_fetch(AGENT, "/%7Ejoe/index.html")
|
#034: Error Correction in Reads
#http://rosalind.info/problems/corr/
#Given: A collection of up to 1000 reads of equal length (at most 50 bp) in FASTA format. Some of these reads were generated with a single-nucleotide error. For each read s in the dataset, one of the following applies:
titles = ['>Rosalind_52', '>Rosalind_44', '>Rosalind_68', '>Rosalind_28', '>Rosalind_95', '>Rosalind_66', '>Rosalind_33', '>Rosalind_21', '>Rosalind_18']
sequences = ['TCATC', 'TTCAT', 'TCATC', 'TGAAA', 'GAGGA', 'TTTCA', 'ATCAA', 'TTGAT', 'TTTCC']
#If parsing from file:
import bio
#f = open("rosalind_corr.txt", 'r')
#contents = f.read()
#f.close()
#titles, sequences = bio.fastaParse(contents)
#s was correctly sequenced and appears in the dataset at least twice (possibly as a reverse complement);
#s is incorrect, it appears in the dataset exactly once, and its Hamming distance is 1 with respect to exactly one correct read in the dataset (or its reverse complement).
#Return: A list of all corrections in the form "[old read]->[new read]". (Each correction must be a single symbol substitution, and you may return the corrections in any order.)
def getCorrections(sequences):
def countOccurences(match, seq):
count = 0
for s in seq:
if s == match or bio.reverseComplement(s) == match:
count += 1
return count
correct_sequences = []
for s in sequences:
if countOccurences(s, sequences) >= 2:
correct_sequences.append(s)
correct_sequences.append(bio.reverseComplement(s))
correct_sequences = list(set(correct_sequences))
# remove the correct sequences from the sequences list
misread = [x for x in sequences if x not in correct_sequences]
corrections = []
for m in misread:
for c in correct_sequences:
if bio.hammingDistance(m,c) == 1:
corrections.append((m,c))
break
return corrections
corrections = getCorrections(sequences)
for c in corrections:
print c[0] + '->' + c[1]
#If writing to file:
#w = open('output.txt', 'w')
#for c in corrections:
#w.write(c[0] + '->' + c[1])
#w.write('\n')
#w.close()
|
from ffl import app, db, models, espn, nfl, shark
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
import csv
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
@manager.command
def delete_data():
db.session.execute(models.playerPosition.delete())
models.EspnProjections.query.delete()
models.SharkProjections.query.delete()
models.NflPlayer.query.delete()
models.Position.query.delete()
models.NflGame.query.delete()
models.FieldTeam.query.delete()
models.FieldPlayer.query.delete()
db.session.commit()
print("Deleted all data.")
@manager.command
def shark_proj(period=app.config['SHARK_SEGMENT'],
scoring=app.config['SHARK_SCORING']):
shark.update_projections(int(period), int(scoring))
@manager.command
def shark_check():
shark.check_sanity()
@manager.command
def espn_proj(league_id=app.config['ESPN_LEAGUE_ID']):
espn.update_projections(int(league_id))
@manager.command
def update_boxscores(year=None, week=None):
if year is None:
models.NflGame.query.delete()
db.session.commit()
nfl.load_boxscores()
elif week is None:
models.NflGame.query.filter_by(season_value=int(year)).delete()
db.session.commit()
nfl.load_boxscores_per_year(int(year))
else:
models.NflGame.query.filter_by(
season_value=int(year), week_order=int(week)).delete()
db.session.commit()
nfl.load_boxscores_per_year(int(year), int(week))
@manager.command
def update_players():
dummy = models.FieldPlayer(espn_id=-1,
shark_id=-1,
name="dummy")
db.session.add(dummy)
players = models.FieldPlayer.query.all()
with open(app.config['PLAYERS_FILE']) as f:
r = csv.reader(f)
next(r)
for row in r:
sh = next((p for p in players if p.shark_id == int(row[1])),
None)
es = next((p for p in players if p.espn_id == int(row[0])),
None)
if sh:
if sh is es:
continue
dummy.shark_projections = sh.shark_projections
db.session.delete(sh)
if es:
dummy.espn_projections = es.espn_projections
db.session.delete(es)
db.session.commit()
p = models.FieldPlayer(espn_id=int(row[0]),
shark_id=int(row[1]),
name=row[2],
shark_projections=dummy.shark_projections,
espn_projections=dummy.espn_projections)
db.session.add(p)
db.session.commit()
db.session.delete(dummy)
db.session.commit()
@manager.command
def load_data():
delete_data()
with open(app.config['PLAYERS_FILE']) as f:
r = csv.reader(f)
next(r)
for row in r:
db.session.add(models.FieldPlayer(espn_id=int(row[0]),
shark_id=int(row[1]),
name=row[2]))
db.session.commit()
with open(app.config['TEAMS_FILE']) as f:
r = csv.reader(f)
next(r)
for row in r:
db.session.add(models.FieldTeam(espn_id=int(row[2]),
espn_code=row[1],
name=row[0],
shark_code=row[3]))
db.session.commit()
# BYE_STRING = "BYE"
# with open(app.config['SCHEDULE_FILE']) as f:
# r = csv.reader(f)
# r.next()
# for row in r:
# home = next(x for x in teams if x.espn_code == row[0])
# home.bye_week = row.index(BYE_STRING)
# for i in xrange(1, len(row)):
# away = next((x for x in teams if x.espn_code == row[i]), None)
# if away != None: db.session.add(models.NflGame(home, away, i))
# db.session.commit()
with open(app.config['POSITIONS_FILE']) as f:
r = csv.reader(f)
next(r)
positions = [models.Position(espn_code=row[0],
name=row[1], order=row[2]) for row in r]
for p in positions:
db.session.add(p)
db.session.commit()
# DEF_STRING = "D"
# FA_STRING = "FA"
# with open(app.config['PROJECTIONS_FILE']) as f:
# r = csv.reader(f)
# r.next()
# for row in r:
# if len(row) == 0:
# break
# if row[2] == FA_STRING:
# t = None
# else:
# t = next((x for x in teams if x.fs_name == row[2]), None)
# if row[3] == DEF_STRING:
# t.projected_defense_points = row[14]
# else:
# db.session.add(models.NflPlayer(row[1], t, [x for x in positions if x.code ==
# row[3]], row[14]))
# db.session.commit()
print("Loaded all reference data.")
if __name__ == '__main__':
manager.run()
|
"""
"""
class Solution(object):
def pivotIndexSol(self, nums):
pivot = -1
length = len(nums)
if length == 0: return pivot
i, leftSum, rightSum = 1, [0 for _ in range(length)], [0 for _ in range(length)]
leftSum[0] = nums[0]
rightSum[length - 1] = nums[length - 1]
while i < length:
leftSum[i] = nums[i] + leftSum[i-1]
rightSum[length - 1 - i] = nums[length - 1 - i] + rightSum[length - i]
i += 1
for i in range(length):
if leftSum[i] == rightSum[i]:
pivot = i
break
return pivot
if __name__ == '__main__':
sol = Solution()
nums = [1,7,3,6,5,6]
assert sol.pivotIndexSol(nums) == 3
nums = [-1,-1,-1,-1,-1,-1]
assert sol.pivotIndexSol(nums) == -1
nums = [-1,-1,-1,-1,-1,0]
assert sol.pivotIndexSol(nums) == 2
nums = [-1,-1,-1,-1,0,-1]
assert sol.pivotIndexSol(nums) == 2
nums = [-1,-1,-1,0,-1,-1]
assert sol.pivotIndexSol(nums) == 2
nums = [-1,-1,-1,0,1,1]
assert sol.pivotIndexSol(nums) == 0
|
# https://leetcode.com/problems/string-compression/?envType=study-plan-v2&envId=leetcode-75
class Solution:
def compress(self, chars: List[str]) -> int:
res = ""
count = 1
c = chars[0]
for ndx in range(1, len(chars)):
if c == chars[ndx]:
count += 1
else:
if count == 1:
res = f"{res}{c}"
else:
res = f"{res}{c}{count}"
count = 1
c = chars[ndx]
if count == 1:
res = f"{res}{c}"
else:
res = f"{res}{c}{count}"
for ndx in range(len(res)):
chars[ndx] = res[ndx]
return len(res) |
__author__ = 'joshgenao'
import cv2
import numpy as np
from matplotlib import pyplot as plt
def FlannMatcher(queryImage, image):
MIN_MATCH_COUNT = 10
img1 = cv2.imread(queryImage,0) # queryImage
img2 = cv2.imread(image,0) # trainImage
# Initiate SIFT detector
sift = cv2.SIFT()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50)
# Processes the matches using FlannBasedMatcher
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# A good match will show a lot of matches
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
print len(good)
pass
if __name__ == "__main__":
FlannMatcher('images/Book.jpg', 'images/BookShift2.jpg') |
from django.db import models
from django.contrib.auth import get_user_model
from django.core.validators import MinValueValidator,MaxValueValidator
import datetime
# Create your models here.
User=get_user_model()
class Doctor(models.Model):
#identificador=models.IntegerField(default=2)
dni= models.IntegerField(default=10000000,validators=[MinValueValidator(10000000),MaxValueValidator(99999999)],null=False,blank=False,primary_key=True)
contraseña=models.CharField(max_length=100)# una validaddor seria unos requisitos para seguridad
#Nusuarios=models.IntegerField(default=0)##########################FAL
def __str__(self):
return str(self.dni)
def num_usuarios(self):
number=self.paciente_set.all()
return (len(list(number)))
#####################################################################################################
class Paciente(models.Model):
id = models.AutoField(primary_key=True)
nombre=models.CharField(max_length=50,null=True)
apellido=models.CharField(max_length=50,null=True)
dni = models.IntegerField(default=10000000,validators=[MinValueValidator(10000000),
MaxValueValidator(99999999)],null=False,blank=False)
email = models.EmailField(max_length=100,blank=True)
contraseña=models.CharField(max_length=100,null=False,blank=False)
telefono = models.IntegerField(default=100000000,
validators=[MinValueValidator(100000000),MaxValueValidator(999999999)],
null=True,blank=True)
###########many to one #### un paciente tiene su doctor y el doctor tiene sus n pacientes
doctor=models.ForeignKey(Doctor,on_delete=models.SET_NULL,null=True)
#,default=None)
def __str__(self):
return str(self.nombre)+" "+str(self.apellido)+" "+str(self.dni)
#######################################################################################################
class Parmetros_directos_sensados(models.Model):
#faltal los de inter1relacion
frecuencia_cardiaca=models.FloatField(blank=True)
saturacion_de_oxigeno=models.FloatField(blank=True)
Fecha_de_la_medicion=models.DateField(("Date"), default=datetime.date.today)
Hora_de_la_medicion=models.TimeField()
###########many to one #### un paciente tiene su doctor y el doctor tiene sus n pacientes
Paciente=models.ForeignKey(Paciente,on_delete=models.CASCADE
,default=None,blank=True)
def __str__(self):
return str(self.frecuencia_cardiaca)
###########################################################################################################
class Parametros_Morisky(models.Model):
#faltal los de interrelacion
pregunta_1=models.BooleanField()
pregunta_2=models.BooleanField()
pregunta_3=models.BooleanField()
pregunta_4=models.BooleanField()
pregunta_5= models.IntegerField(default=1,validators=[MinValueValidator(1),
MaxValueValidator(5)])
Fecha_de_la_medicion=models.DateField(("Date"), default=datetime.date.today)
###########many to one #### un paciente tiene su doctor y el doctor tiene sus n pacientes
Paciente=models.ForeignKey(Paciente,on_delete=models.CASCADE
,default=None,blank=True)
def __str__(self):
return str(self.pregunta_1)
###############################################################################################################
class Parametros_Borg(models.Model):
#faltal los de interrelacion
puntaje=models.IntegerField(default=1,validators=[MinValueValidator(1), MaxValueValidator(10)])
Fecha_de_la_medicion=models.DateField(("Date"), default=datetime.date.today)
Hora_de_la_medicion=models.TimeField()
###########many to one #### un paciente tiene su doctor y el doctor tiene sus n pacientes
Paciente=models.ForeignKey(Paciente,on_delete=models.CASCADE
,default=None,blank=True)
def __str__(self):
return str(self.puntaje) |
"""
LeetCode - Easy
"""
class Solution:
def intersect(self, nums1, nums2):
dict_int_1 = dict()
dict_int_2 = dict()
index_1 = 0
index_2 = 0
flag = True
while flag == True:
if index_1 != len(nums1):
if nums1[index_1] in dict_int_1:
dict_int_1[nums1[index_1]] += 1
else:
dict_int_1[nums1[index_1]] = 1
index_1 += 1
if index_2 != len(nums2):
if nums2[index_2] in dict_int_2:
dict_int_2[nums2[index_2]] += 1
else:
dict_int_2[nums2[index_2]] = 1
index_2 += 1
if index_1 == len(nums1) and index_2 == len(nums2):
flag = False
list_intersection = []
for item, frequency in dict_int_1.items():
if item in dict_int_2:
if frequency == dict_int_2[item] or frequency < dict_int_2[item]:
frequency = frequency
elif frequency > dict_int_2[item]:
frequency = dict_int_2[item]
for i in range(frequency):
list_intersection.append(item)
return list_intersection
if __name__ == '__main__':
nums1 = [4, 9, 5, 9, 9]
nums2 = [9, 4, 9, 8, 4]
print(Solution().intersect(nums1,nums2))
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^register$', views.register),
url(r'^login$', views.login),
url(r'^dashboard$', views.dashboard),
url(r'^submit$', views.submit),
url(r'^add/(?P<quoteid>\d+)$', views.add),
url(r'^remove/(?P<quoteid>\d+)$', views.remove),
url(r'^info/(?P<userid>\d+)$', views.info),
url(r'^user/(?P<userid>\d+)$', views.user),
url(r'^logout$', views.logout)
] |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import TestCase, main
from tempfile import mkstemp
from os import close, remove
from os.path import basename, exists, relpath
from json import loads
from tornado.web import HTTPError
from qiita_core.qiita_settings import qiita_config, r_client
from qiita_core.testing import wait_for_prep_information_job
from qiita_core.util import qiita_test_checker
from qiita_db.user import User
from qiita_db.artifact import Artifact
from qiita_db.processing_job import ProcessingJob
from qiita_db.software import Parameters, Command
from qiita_pet.exceptions import QiitaHTTPError
from qiita_pet.test.tornado_test_base import TestHandlerBase
from qiita_pet.handlers.artifact_handlers.base_handlers import (
check_artifact_access, artifact_summary_get_request,
artifact_summary_post_request, artifact_patch_request,
artifact_post_req)
@qiita_test_checker()
class TestBaseHandlersUtils(TestCase):
def setUp(self):
self._files_to_remove = []
self.maxDiff = None
def tearDown(self):
for fp in self._files_to_remove:
if exists(fp):
remove(fp)
def test_check_artifact_access(self):
# "Study" artifact
a = Artifact(1)
# The user has access
u = User('test@foo.bar')
check_artifact_access(u, a)
# Admin has access to everything
admin = User('admin@foo.bar')
check_artifact_access(admin, a)
# Demo user doesn't have access
demo_u = User('demo@microbio.me')
with self.assertRaises(HTTPError):
check_artifact_access(demo_u, a)
# "Analysis" artifact
a = Artifact(8)
a.visibility = 'private'
check_artifact_access(u, a)
check_artifact_access(admin, a)
with self.assertRaises(HTTPError):
check_artifact_access(demo_u, a)
check_artifact_access(User('shared@foo.bar'), a)
a.visibility = 'public'
check_artifact_access(demo_u, a)
def _assert_summary_equal(self, obs, exp):
"Utility function for testing the artifact summary get request"
obs_files = obs.pop('files')
exp_files = exp.pop('files')
self.assertCountEqual(obs_files, exp_files)
obs_jobs = obs.pop('processing_jobs')
exp_jobs = obs.pop('processing_jobs')
self.assertCountEqual(obs_jobs, exp_jobs)
self.assertEqual(obs, exp)
def test_artifact_summary_get_request(self):
user = User('test@foo.bar')
# Artifact w/o summary
obs = artifact_summary_get_request(user, 1)
exp_files = [
(1, '1_s_G1_L001_sequences.fastq.gz (raw forward seqs)',
'2125826711', '58 Bytes'),
(2, '1_s_G1_L001_sequences_barcodes.fastq.gz (raw barcodes)',
'2125826711', '58 Bytes')]
exp = {'name': 'Raw data 1',
'artifact_id': 1,
'artifact_type': 'FASTQ',
'artifact_timestamp': '2012-10-01 09:10',
'visibility': 'private',
'editable': True,
'buttons': ('<button onclick="if (confirm(\'Are you sure you '
'want to make public artifact id: 1?\')) { '
'set_artifact_visibility(\'public\', 1) }" '
'class="btn btn-primary btn-sm">Make public'
'</button> <button onclick="if (confirm(\'Are you '
'sure you want to revert to sandbox artifact id: '
'1?\')) { set_artifact_visibility(\'sandbox\', 1) '
'}" class="btn btn-primary btn-sm">Revert to '
'sandbox</button>'),
'processing_info': {},
'files': exp_files,
'is_from_analysis': False,
'summary': None,
'job': None,
'errored_summary_jobs': []}
self.assertEqual(obs, exp)
# Artifact with summary being generated
job = ProcessingJob.create(
User('test@foo.bar'),
Parameters.load(Command(7), values_dict={'input_data': 1})
)
job._set_status('queued')
obs = artifact_summary_get_request(user, 1)
exp = {'name': 'Raw data 1',
'artifact_id': 1,
'artifact_type': 'FASTQ',
'artifact_timestamp': '2012-10-01 09:10',
'visibility': 'private',
'editable': True,
'buttons': ('<button onclick="if (confirm(\'Are you sure you '
'want to make public artifact id: 1?\')) { '
'set_artifact_visibility(\'public\', 1) }" '
'class="btn btn-primary btn-sm">Make public'
'</button> <button onclick="if (confirm(\'Are you '
'sure you want to revert to sandbox artifact id: '
'1?\')) { set_artifact_visibility(\'sandbox\', 1) '
'}" class="btn btn-primary btn-sm">Revert to '
'sandbox</button>'),
'processing_info': {},
'files': exp_files,
'is_from_analysis': False,
'summary': None,
'job': [job.id, 'queued', None],
'errored_summary_jobs': []}
self.assertEqual(obs, exp)
# Artifact with summary
fd, fp = mkstemp(suffix=".html")
close(fd)
with open(fp, 'w') as f:
f.write('<b>HTML TEST - not important</b>\n')
a = Artifact(1)
a.set_html_summary(fp)
self._files_to_remove.extend([fp, a.html_summary_fp[1]])
exp_files.append(
(a.html_summary_fp[0],
'%s (html summary)' % basename(a.html_summary_fp[1]),
'1642196267', '33 Bytes'))
exp_summary_path = relpath(
a.html_summary_fp[1], qiita_config.base_data_dir)
obs = artifact_summary_get_request(user, 1)
exp = {'name': 'Raw data 1',
'artifact_id': 1,
'artifact_type': 'FASTQ',
'artifact_timestamp': '2012-10-01 09:10',
'visibility': 'private',
'editable': True,
'buttons': ('<button onclick="if (confirm(\'Are you sure you '
'want to make public artifact id: 1?\')) { '
'set_artifact_visibility(\'public\', 1) }" '
'class="btn btn-primary btn-sm">Make public'
'</button> <button onclick="if (confirm(\'Are you '
'sure you want to revert to sandbox artifact id: '
'1?\')) { set_artifact_visibility(\'sandbox\', 1) '
'}" class="btn btn-primary btn-sm">Revert to '
'sandbox</button>'),
'processing_info': {},
'files': exp_files,
'is_from_analysis': False,
'summary': exp_summary_path,
'job': None,
'errored_summary_jobs': []}
self.assertEqual(obs, exp)
# No access
demo_u = User('demo@microbio.me')
with self.assertRaises(QiitaHTTPError):
obs = artifact_summary_get_request(demo_u, 1)
# A non-owner/share user can't see the files
a.visibility = 'public'
obs = artifact_summary_get_request(demo_u, 1)
exp = {'name': 'Raw data 1',
'artifact_id': 1,
'artifact_type': 'FASTQ',
'artifact_timestamp': '2012-10-01 09:10',
'visibility': 'public',
'editable': False,
'buttons': '',
'processing_info': {},
'files': [],
'is_from_analysis': False,
'summary': exp_summary_path,
'job': None,
'errored_summary_jobs': []}
self.assertEqual(obs, exp)
# testing sandbox
a.visibility = 'sandbox'
obs = artifact_summary_get_request(user, 1)
exp = {'name': 'Raw data 1',
'artifact_id': 1,
'artifact_type': 'FASTQ',
'artifact_timestamp': '2012-10-01 09:10',
'visibility': 'sandbox',
'editable': True,
'buttons': '',
'processing_info': {},
'files': exp_files,
'is_from_analysis': False,
'summary': exp_summary_path,
'job': None,
'errored_summary_jobs': []}
self.assertEqual(obs, exp)
# returnig to private
a.visibility = 'private'
# admin gets buttons
obs = artifact_summary_get_request(User('admin@foo.bar'), 2)
exp_files = [
(3, '1_seqs.fna (preprocessed fasta)', '', '0 Bytes'),
(4, '1_seqs.qual (preprocessed fastq)', '', '0 Bytes'),
(5, '1_seqs.demux (preprocessed demux)', '', '0 Bytes')]
exp = {'name': 'Demultiplexed 1',
'artifact_id': 2,
'artifact_type': 'Demultiplexed',
'artifact_timestamp': '2012-10-01 10:10',
'visibility': 'private',
'editable': True,
'buttons': ('<button onclick="if (confirm(\'Are you sure you '
'want to make public artifact id: 2?\')) { '
'set_artifact_visibility(\'public\', 2) }" '
'class="btn btn-primary btn-sm">Make public'
'</button> <button onclick="if (confirm(\'Are you '
'sure you want to revert to sandbox artifact id: '
'2?\')) { set_artifact_visibility(\'sandbox\', 2) '
'}" class="btn btn-primary btn-sm">Revert to '
'sandbox</button> <a class="btn btn-primary '
'btn-sm" href="/ebi_submission/2"><span '
'class="glyphicon glyphicon-export"></span> '
'Submit to EBI</a> <a class="btn btn-primary '
'btn-sm" href="/vamps/2"><span class="glyphicon '
'glyphicon-export"></span> Submit to VAMPS</a>'),
'processing_info': {
'command_active': True, 'software_deprecated': False,
'command': 'Split libraries FASTQ',
'processing_parameters': {
'max_barcode_errors': '1.5', 'sequence_max_n': '0',
'max_bad_run_length': '3', 'phred_offset': 'auto',
'rev_comp': 'False', 'phred_quality_threshold': '3',
'input_data': '1', 'rev_comp_barcode': 'False',
'rev_comp_mapping_barcodes': 'False',
'min_per_read_length_fraction': '0.75',
'barcode_type': 'golay_12'},
'software_version': '1.9.1', 'software': 'QIIME'},
'files': exp_files,
'is_from_analysis': False,
'summary': None,
'job': None,
'errored_summary_jobs': []}
self.assertEqual(obs, exp)
# analysis artifact
obs = artifact_summary_get_request(user, 8)
exp = {'name': 'noname',
'artifact_id': 8,
'artifact_type': 'BIOM',
# this value changes on build so copy from obs
'artifact_timestamp': obs['artifact_timestamp'],
'visibility': 'sandbox',
'editable': True,
'buttons': '',
'processing_info': {},
'files': [(22, 'biom_table.biom (biom)', '1756512010',
'1.1 MB')],
'is_from_analysis': True,
'summary': None,
'job': None,
'errored_summary_jobs': []}
self.assertEqual(obs, exp)
def test_artifact_summary_post_request(self):
# No access
with self.assertRaises(QiitaHTTPError):
artifact_summary_post_request(User('demo@microbio.me'), 1)
# Returns already existing job
job = ProcessingJob.create(
User('test@foo.bar'),
Parameters.load(Command(7), values_dict={'input_data': 2})
)
job._set_status('queued')
obs = artifact_summary_post_request(User('test@foo.bar'), 2)
exp = {'job': [job.id, 'queued', None]}
self.assertEqual(obs, exp)
def test_artifact_post_request(self):
# No access
with self.assertRaises(QiitaHTTPError):
artifact_post_req(User('demo@microbio.me'), 1)
obs = artifact_post_req(User('test@foo.bar'), 2)
self.assertCountEqual(obs.keys(), ['job'])
# Wait until the job is completed
wait_for_prep_information_job(1)
# Check that the delete function has been actually called
job = ProcessingJob(loads(r_client.get('prep_template_1'))['job_id'])
self.assertEqual(job.status, 'error')
self.assertIn('Cannot delete artifact 2', job.log.msg)
def test_artifact_patch_request(self):
a = Artifact(1)
test_user = User('test@foo.bar')
self.assertEqual(a.name, 'Raw data 1')
artifact_patch_request(test_user, 1, 'replace', '/name/',
req_value='NEW_NAME')
self.assertEqual(a.name, 'NEW_NAME')
# Reset the name
a.name = 'Raw data 1'
# No access
with self.assertRaises(QiitaHTTPError):
artifact_patch_request(User('demo@microbio.me'), 1, 'replace',
'/name/', req_value='NEW_NAME')
# Incorrect path parameter
with self.assertRaises(QiitaHTTPError):
artifact_patch_request(test_user, 1, 'replace',
'/name/wrong/', req_value='NEW_NAME')
# Missing value
with self.assertRaises(QiitaHTTPError):
artifact_patch_request(test_user, 1, 'replace', '/name/')
# Wrong attribute
with self.assertRaises(QiitaHTTPError):
artifact_patch_request(test_user, 1, 'replace',
'/wrong/', req_value='NEW_NAME')
# Wrong operation
with self.assertRaises(QiitaHTTPError):
artifact_patch_request(test_user, 1, 'add', '/name/',
req_value='NEW_NAME')
# Changing visibility
self.assertEqual(a.visibility, 'private')
artifact_patch_request(test_user, 1, 'replace', '/visibility/',
req_value='sandbox')
self.assertEqual(a.visibility, 'sandbox')
# Admin can change to private
artifact_patch_request(User('admin@foo.bar'), 1, 'replace',
'/visibility/', req_value='private')
self.assertEqual(a.visibility, 'private')
# Test user can't change to private
with self.assertRaises(QiitaHTTPError):
artifact_patch_request(test_user, 1, 'replace', '/visibility/',
req_value='private')
# Unkown req value
with self.assertRaises(QiitaHTTPError):
artifact_patch_request(test_user, 1, 'replace', '/visibility/',
req_value='wrong')
class TestBaseHandlers(TestHandlerBase):
def setUp(self):
super(TestBaseHandlers, self).setUp()
self._files_to_remove = []
def tearDown(self):
super(TestBaseHandlers, self).tearDown()
for fp in self._files_to_remove:
if exists(fp):
remove(fp)
def test_get_artifact_summary_ajax_handler(self):
response = self.get('/artifact/1/summary/')
self.assertEqual(response.code, 200)
def test_post_artifact_ajax_handler(self):
response = self.post('/artifact/2/', {})
self.assertEqual(response.code, 200)
wait_for_prep_information_job(1)
def test_patch_artifact_ajax_handler(self):
a = Artifact(1)
self.assertEqual(a.name, 'Raw data 1')
arguments = {'op': 'replace', 'path': '/name/', 'value': 'NEW_NAME'}
response = self.patch('/artifact/1/', data=arguments)
self.assertEqual(response.code, 200)
self.assertEqual(a.name, 'NEW_NAME')
a.name = 'Raw data 1'
def test_get_artifact_summary_handler(self):
a = Artifact(1)
# Add a summary to the artifact
fd, fp = mkstemp(suffix=".html")
close(fd)
with open(fp, 'w') as f:
f.write('<b>HTML TEST - not important</b>\n')
a = Artifact(1)
a.set_html_summary(fp)
self._files_to_remove.extend([fp, a.html_summary_fp[1]])
summary = relpath(a.html_summary_fp[1], qiita_config.base_data_dir)
response = self.get('/artifact/html_summary/%s' % summary)
self.assertEqual(response.code, 200)
self.assertEqual(response.body.decode('ascii'),
'<b>HTML TEST - not important</b>\n')
if __name__ == '__main__':
main()
|
import struct
import os
import json
from .protocol import Message, PayloadItem, parse_messages_json_folder
from .protocol import ProtocolPayload, message
from .protocol import DefaultPluginMessagePayload, plugin_message
from .protocol import MessageID
def handle_undefined_message(*args):
pass
def handle_undefined_plugin_message(*args):
pass
"""
PLUGIN MESSAGES
"""
class GenericPluginMessagePayload(DefaultPluginMessagePayload):
plugin_message_payload = Message([
PayloadItem(name = 'data', dimension = 4096-4, datatype = "B"),
])
"""
MANUAL AND VARIABLE LENGTH MESSAGES
"""
@message(MessageID.RESPONSE)
class ResponsePayload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'responseID', dimension = 1, datatype = 'H'),
PayloadItem(name = 'repsonseLength', dimension = 1, datatype = 'H'),
])
def get_varsize_item_list(self, payload_length):
_item_list = [
PayloadItem(name = 'responseID', dimension = 1, datatype = 'H'),
PayloadItem(name = 'repsonseLength', dimension = 1, datatype = 'H'),
PayloadItem(name = 'responseText', dimension = payload_length-4, datatype = 's'),
]
return _item_list
def get_varsize_arg_from_bytes(self, inBytes):
payload_length = len(inBytes)
return payload_length
"""def __init__(self, msgLength):
self.message_description = Message([
PayloadItem(name = 'responseID', dimension = 1, datatype = 'H'),
PayloadItem(name = 'repsonseLength', dimension = 1, datatype = 'H'),
PayloadItem(name = 'responseText', dimension = msgLength-24, datatype = 's'),
])
super().__init__()"""
@message(0x19)
class SYSSTAT_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'statMode', dimension = 1, datatype = 'I'),
PayloadItem(name = 'sysStat', dimension = 1, datatype = 'I'),
])
@staticmethod
def get_varsize_item_list(stat_mode):
_item_list = [
PayloadItem(name = 'statMode', dimension = 1, datatype = 'I'),
PayloadItem(name = 'sysStat', dimension = 1, datatype = 'I')
]
if(stat_mode & (1 << 0)):
_item_list += [PayloadItem(name = 'imuStat', dimension = 1, datatype = 'I')]
if(stat_mode & (1 << 1)):
_item_list += [PayloadItem(name = 'gnssStat', dimension = 1, datatype = 'I')]
if(stat_mode & (1 << 2)):
_item_list += [PayloadItem(name = 'magStat', dimension = 1, datatype = 'I')]
if(stat_mode & (1 << 3)):
_item_list += [PayloadItem(name = 'madcStat', dimension = 1, datatype = 'I')]
if(stat_mode & (1 << 4)):
_item_list += [PayloadItem(name = 'ekfStat', dimension = 2, datatype = 'I')]
if(stat_mode & (1 << 5)):
_item_list += [PayloadItem(name = 'ekfGeneralStat', dimension = 1, datatype = 'I')]
if(stat_mode & (1 << 6)):
_item_list += [PayloadItem(name = 'addStat', dimension = 4, datatype = 'I')]
if(stat_mode & (1 << 7)):
_item_list += [PayloadItem(name = 'serviceStat', dimension = 1, datatype = 'I')]
if(stat_mode & (1 << 8)):
_item_list += [PayloadItem(name = 'remainingAlignTime', dimension = 1, datatype = 'f')]
return _item_list
@staticmethod
def get_varsize_arg_from_bytes(inBytes):
stat_mode = struct.unpack('I', inBytes[:4])[0]
return stat_mode
@message(0x57)
class MONITOR_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name='log_level', dimension=1, datatype='B')
])
def get_varsize_item_list(self, len_logmsg):
_item_list = [
PayloadItem(name='log_level', dimension=1, datatype='B'),
PayloadItem(name='logmsg', dimension=len_logmsg, datatype='s')
]
return _item_list
def get_varsize_arg_from_bytes(self, inBytes):
len_logmsg = len(inBytes) - 1
return len_logmsg
@message(0x91)
class CANGATEWAY_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name='device', dimension=1, datatype='B'),
PayloadItem(name='reserved', dimension=3, datatype='B'),
])
def get_varsize_item_list(self, num_frames):
_can_frame = Message([
PayloadItem(name='data', dimension=8, datatype='B'),
PayloadItem(name='length', dimension=1, datatype='B'),
PayloadItem(name='is_extended_mid', dimension=1, datatype='B'),
PayloadItem(name='is_remote_frame', dimension=1, datatype='B'),
PayloadItem(name='reserved', dimension=1, datatype='B'),
PayloadItem(name='mid', dimension=1, datatype='I'),
PayloadItem(name='timestamp', dimension=1, datatype='d')
])
_item_list = [
PayloadItem(name='device', dimension=1, datatype='B'),
PayloadItem(name='reserved', dimension=3, datatype='B'),
PayloadItem(name='can_frames', dimension=num_frames, datatype=_can_frame)
]
return _item_list
def get_varsize_arg_from_bytes(self, inBytes):
num_frames = (len(inBytes) - 4) / 24
return int(num_frames)
class IMU_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'acc', dimension = 3, datatype = 'f'),
PayloadItem(name = 'omg', dimension = 3, datatype = 'f'),
])
@message(0x00)
class IMURAW_Payload(IMU_Payload):
pass
@message(0x01)
class IMUCORR_Payload(IMU_Payload):
pass
@message(0x02)
class IMUCOMP_Payload(IMU_Payload):
pass
@message(0x03)
class INSSOL_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'acc', dimension = 3, datatype = 'f', unit = 'm/s²', description = 'Acceleration'),
PayloadItem(name = 'omg', dimension = 3, datatype = 'f', unit = 'rad/s', description = 'Specific force'),
PayloadItem(name = 'rpy', dimension = 3, datatype = 'f', unit = 'rad', description = 'Angle'),
PayloadItem(name = 'vel', dimension = 3, datatype = 'f', unit = 'm/s', description = 'Velocity'),
PayloadItem(name = 'lon', dimension = 1, datatype = 'd', unit = 'rad', description = 'Longitude'),
PayloadItem(name = 'lat', dimension = 1, datatype = 'd', unit = 'rad', description = 'Latitude'),
PayloadItem(name = 'alt', dimension = 1, datatype = 'f', unit = 'm', description = 'Altitude'),
PayloadItem(name = 'undulation', dimension = 1, datatype = 'h', unit = 'cm', description = 'Undulation'),
PayloadItem(name = 'DatSel', dimension = 1, datatype = 'H'),
])
@message(0x0D)
class INSROTTEST_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'accNED', dimension = 3, datatype = 'd'),
])
@message(0x20)
class STATFPGA_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'usParID', dimension = 1, datatype = 'H'),
PayloadItem(name = 'uReserved', dimension = 1, datatype = 'B'),
PayloadItem(name = 'ucAction', dimension = 1, datatype = 'B'),
PayloadItem(name = 'uiPowerStatLower', dimension = 1, datatype = 'I'),
PayloadItem(name = 'uiPowerStatUpper', dimension = 1, datatype = 'I'),
PayloadItem(name = 'usFpgaStatus', dimension = 1, datatype = 'H'),
PayloadItem(name = 'usSupervisorStatus', dimension = 1, datatype = 'H'),
PayloadItem(name = 'ucImuStatus', dimension = 1, datatype = 'B'),
PayloadItem(name = 'ucTempStatus', dimension = 1, datatype = 'B'),
PayloadItem(name = 'usRes', dimension = 1, datatype = 'H'),
])
@message(0x40)
class POSTPROC_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'acc', dimension = 3, datatype = 'f', unit = 'm/s²', description = 'Specific force'),
PayloadItem(name = 'omg', dimension = 3, datatype = 'f', unit = 'rad/s', description = 'Angular rate'),
PayloadItem(name = 'delta_theta', dimension = 12, datatype = 'f'),
PayloadItem(name = 'delta_v', dimension = 12, datatype = 'f'),
PayloadItem(name = 'q_nb', dimension = 4, datatype = 'd'),
PayloadItem(name = 'pos', dimension = 3, datatype = 'd'),
PayloadItem(name = 'vel', dimension = 3, datatype = 'd', unit = 'm/s', description = 'NED Velocity'),
PayloadItem(name = 'sysStat', dimension = 1, datatype = 'I'),
PayloadItem(name = 'ekfStat', dimension = 2, datatype = 'I'),
PayloadItem(name = 'odoSpeed', dimension = 1, datatype = 'f'),
PayloadItem(name = 'odoTicks', dimension = 1, datatype = 'i'),
PayloadItem(name = 'odoInterval', dimension = 1, datatype = 'I'),
PayloadItem(name = 'odoTrigEvent', dimension = 1, datatype = 'I'),
PayloadItem(name = 'odoNextEvent', dimension = 1, datatype = 'I'),
])
@message(0x47)
class INSSOLECEF_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'acc', dimension = 3, datatype = 'f'),
PayloadItem(name = 'omg', dimension = 3, datatype = 'f'),
PayloadItem(name = 'pos_ecef', dimension = 3, datatype = 'd'),
PayloadItem(name = 'vel_ecef', dimension = 3, datatype = 'f'),
PayloadItem(name = 'q_nb', dimension = 4, datatype = 'f'),
])
@message(0x48)
class EKFSTDDEVECEF_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'Pos', dimension = 3, datatype = 'f'),
PayloadItem(name = 'Vel', dimension = 3, datatype = 'f'),
PayloadItem(name = 'Rpy', dimension = 3, datatype = 'f'),
PayloadItem(name = 'BiasAcc', dimension = 3, datatype = 'f'),
PayloadItem(name = 'BiasOmg', dimension = 3, datatype = 'f'),
PayloadItem(name = 'MaAcc', dimension = 9, datatype = 'f'),
PayloadItem(name = 'MaOmg', dimension = 9, datatype = 'f'),
PayloadItem(name = 'ScfOdo', dimension = 1, datatype = 'f'),
PayloadItem(name = 'MaOdo', dimension = 2, datatype = 'f'),
])
@message(0x49)
class LOADFACTOR_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'LoadFactor', dimension = 3, datatype = 'f'),
])
@message(0x50)
class OMGDOT_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'AngAcc', dimension = 3, datatype = 'f'),
])
@message(0x56)
class IMU_FILTERED_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'Omg', dimension = 3, datatype = 'f'),
PayloadItem(name = 'Acc', dimension = 3, datatype = 'f'),
])
@message(0x63)
class PASSTHROUGH_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'port', dimension = 1, datatype = 'B'),
PayloadItem(name = 'reserved', dimension = 3, datatype = 'B'),
PayloadItem(name='passthroughdata', dimension=256, datatype='B')
])
def from_bytes(self, inBytes):
item_list = [
PayloadItem(name = 'port', dimension = 1, datatype = 'B'),
PayloadItem(name = 'reserved', dimension = 3, datatype = 'B'),
]
item_list += [PayloadItem(name='passthroughdata', dimension=len(inBytes[4:]), datatype='B')]
#item_list += self._get_payload(port)
#item_list += passthrough_data
self.message_description = Message(item_list)
super().from_bytes(inBytes)
def _get_payload(self, stat_mode):
item_list = []
if(stat_mode & (1 << 0)):
item_list += [PayloadItem(name = 'imuStat', dimension = 1, datatype = 'I')]
if(stat_mode & (1 << 1)):
item_list += [PayloadItem(name = 'gnssStat', dimension = 1, datatype = 'I')]
if(stat_mode & (1 << 2)):
item_list += [PayloadItem(name = 'magStat', dimension = 1, datatype = 'I')]
if(stat_mode & (1 << 3)):
item_list += [PayloadItem(name = 'madcStat', dimension = 1, datatype = 'I')]
if(stat_mode & (1 << 4)):
item_list += [PayloadItem(name = 'ekfStat', dimension = 2, datatype = 'I')]
if(stat_mode & (1 << 5)):
item_list += [PayloadItem(name = 'ekfGeneralStat', dimension = 1, datatype = 'I')]
if(stat_mode & (1 << 6)):
item_list += [PayloadItem(name = 'addStat', dimension = 4, datatype = 'I')]
if(stat_mode & (1 << 7)):
item_list += [PayloadItem(name = 'serviceStat', dimension = 1, datatype = 'I')]
if(stat_mode & (1 << 8)):
item_list += [PayloadItem(name = 'remainingAlignTime', dimension = 1, datatype = 'f')]
return item_list
@message(0x65)
class MAGDATA2_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name='rawx', dimension=1, datatype='i'),
PayloadItem(name='rawy', dimension=1, datatype='i'),
PayloadItem(name='rawz', dimension=1, datatype='i'),
PayloadItem(name='bit_error', dimension=1, datatype='B'),
PayloadItem(name='reserved', dimension=3, datatype='B'),
])
@message(0x66)
class IPST_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name='omg', dimension=3, datatype='f'),
PayloadItem(name='acc', dimension=3, datatype='f'),
PayloadItem(name='system_status', dimension=1, datatype='I'),
PayloadItem(name='fpga_status', dimension=1, datatype='I'),
PayloadItem(name='odo_0_ticks', dimension=1, datatype='i'),
PayloadItem(name='odo_0_event', dimension=1, datatype='I'),
PayloadItem(name='odo_0_event_next', dimension=1, datatype='I'),
PayloadItem(name='odo_1_ticks', dimension=1, datatype='i'),
PayloadItem(name='odo_1_event', dimension=1, datatype='I'),
PayloadItem(name='odo_1_event_next', dimension=1, datatype='I'),
PayloadItem(name='odo_2_ticks', dimension=1, datatype='i'),
PayloadItem(name='odo_2_event', dimension=1, datatype='I'),
PayloadItem(name='odo_2_event_next', dimension=1, datatype='I'),
])
@message(0x80)
class ENCODERDAT_Payload(ProtocolPayload):
MAXENC = 3
message_description = Message([
PayloadItem(name='encoder_pos', dimension=MAXENC, datatype='d'),
PayloadItem(name='encoder_vel', dimension=MAXENC, datatype='d'),
PayloadItem(name='encoder_status', dimension=MAXENC, datatype='I'),
PayloadItem(name='nav_status', dimension=1, datatype='I'),
])
@message(0x1a)
class MAGHDG_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'MagHdg', dimension = 1, datatype = 'f'),
PayloadItem(name = 'MagCOG', dimension = 1, datatype = 'f'),
PayloadItem(name = 'Deviation', dimension = 1, datatype = 'f'),
])
@message(0x04)
class INSRPY_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'rpy', dimension = 3, datatype = 'f'),
])
@message(0x05)
class INSDCM_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'DCM', dimension = 9, datatype = 'f'),
])
@message(0x06)
class INSQUAT_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'quat', dimension = 4, datatype = 'f'),
])
@message(0x0A)
class INSPOSLLH_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'lon', dimension = 1, datatype = 'd'),
PayloadItem(name = 'lat', dimension = 1, datatype = 'd'),
PayloadItem(name = 'alt', dimension = 1, datatype = 'f'),
])
@message(0x0C)
class INSPOSUTM_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'zone', dimension = 1, datatype = 'i'),
PayloadItem(name = 'easting', dimension = 1, datatype = 'd'),
PayloadItem(name = 'northing', dimension = 1, datatype = 'd'),
PayloadItem(name = 'height', dimension = 1, datatype = 'f'),
])
@message(0x0B)
class INSPOSECEF_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'pos', dimension = 3, datatype = 'd'),
])
class INSVEL_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'vel', dimension = 3, datatype = 'f'),
])
@message(0x07)
class INSVELNED_Payload(INSVEL_Payload):
pass
@message(0x08)
class INSVELECEF_Payload(INSVEL_Payload):
pass
@message(0x09)
class INSVELBODY_Payload(INSVEL_Payload):
pass
@message(0x23)
class INSVELENU_Payload(INSVEL_Payload):
pass
@message(0x18)
class MAGDATA_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'field', dimension = 3, datatype = 'f'),
PayloadItem(name = 'magHdg', dimension = 1, datatype = 'f'),
PayloadItem(name = 'magBank', dimension = 1, datatype = 'f'),
PayloadItem(name = 'magElevation', dimension = 1, datatype = 'f'),
PayloadItem(name = 'magDeviation', dimension = 1, datatype = 'f'),
PayloadItem(name = 'status', dimension = 1, datatype = 'I'),
])
@message(0x17)
class AIRDATA_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'TAS', dimension = 1, datatype = 'f'),
PayloadItem(name = 'IAS', dimension = 1, datatype = 'f'),
PayloadItem(name = 'baroAlt', dimension = 1, datatype = 'f'),
PayloadItem(name = 'baroAltRate', dimension = 1, datatype = 'f'),
PayloadItem(name = 'Pd', dimension = 1, datatype = 'f'),
PayloadItem(name = 'Ps', dimension = 1, datatype = 'f'),
PayloadItem(name = 'OAT', dimension = 1, datatype = 'f'),
PayloadItem(name = 'estBias', dimension = 1, datatype = 'f'),
PayloadItem(name = 'estScaleFactor', dimension = 1, datatype = 'f'),
PayloadItem(name = 'estBiasStdDev', dimension = 1, datatype = 'f'),
PayloadItem(name = 'estScaleFactorStdDev', dimension = 1, datatype = 'f'),
PayloadItem(name = 'status', dimension = 1, datatype = 'I'),
])
@message(0x0e)
class INSTRACKACC_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'AccACV', dimension = 3, datatype = 'f'),
])
@message(0x0F)
class EKFSTDDEV_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'pos', dimension = 3, datatype = 'f'),
PayloadItem(name = 'vel', dimension = 3, datatype = 'f'),
PayloadItem(name = 'tilt', dimension = 3, datatype = 'f'),
PayloadItem(name = 'biasAcc', dimension = 3, datatype = 'f'),
PayloadItem(name = 'biasOmg', dimension = 3, datatype = 'f'),
PayloadItem(name = 'scfAcc', dimension = 3, datatype = 'f'),
PayloadItem(name = 'scfOmg', dimension = 3, datatype = 'f'),
PayloadItem(name = 'scfOdo', dimension = 1, datatype = 'f'),
])
@message(0x28)
class EKFSTDDEV2_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'pos', dimension = 3, datatype = 'f'),
PayloadItem(name = 'vel', dimension = 3, datatype = 'f'),
PayloadItem(name = 'rpy', dimension = 3, datatype = 'f'),
PayloadItem(name = 'biasAcc', dimension = 3, datatype = 'f'),
PayloadItem(name = 'biasOmg', dimension = 3, datatype = 'f'),
PayloadItem(name = 'fMaAcc', dimension = 9, datatype = 'f'),
PayloadItem(name = 'fMaOmg', dimension = 9, datatype = 'f'),
PayloadItem(name = 'scfOdo', dimension = 1, datatype = 'f'),
PayloadItem(name = 'fMaOdo', dimension = 2, datatype = 'f'),
])
@message(0x27)
class EKFERROR2_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'biasAcc', dimension = 3, datatype = 'f'),
PayloadItem(name = 'biasOmg', dimension = 3, datatype = 'f'),
PayloadItem(name = 'fMaAcc', dimension = 9, datatype = 'f'),
PayloadItem(name = 'fMaOmg', dimension = 9, datatype = 'f'),
PayloadItem(name = 'scfOdo', dimension = 1, datatype = 'f'),
PayloadItem(name = 'maOdo', dimension = 2, datatype = 'f'),
])
@message(0x10)
class EKFERROR_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'biasAcc', dimension = 3, datatype = 'f'),
PayloadItem(name = 'biasOmg', dimension = 3, datatype = 'f'),
PayloadItem(name = 'scfAcc', dimension = 3, datatype = 'f'),
PayloadItem(name = 'scfOmg', dimension = 3, datatype = 'f'),
PayloadItem(name = 'scfOdo', dimension = 1, datatype = 'f'),
PayloadItem(name = 'maOdo', dimension = 2, datatype = 'f'),
])
@message(0x11)
class EKFTIGHTLY_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'ucSatsAvailablePSR', dimension = 1, datatype = 'B'),
PayloadItem(name = 'ucSatsUsedPSR', dimension = 1, datatype = 'B'),
PayloadItem(name = 'ucSatsAvailableRR', dimension = 1, datatype = 'B'),
PayloadItem(name = 'ucSatsUsedRR', dimension = 1, datatype = 'B'),
PayloadItem(name = 'ucSatsAvailableTDCP', dimension = 1, datatype = 'B'),
PayloadItem(name = 'ucSatsUsedTDCP', dimension = 1, datatype = 'B'),
PayloadItem(name = 'ucRefSatTDCP', dimension = 1, datatype = 'B'),
PayloadItem(name = 'usReserved', dimension = 1, datatype = 'B'),
PayloadItem(name = 'uiUsedSatsPSR_GPS', dimension = 1, datatype = 'I'),
PayloadItem(name = 'uiOutlierSatsPSR_GPS', dimension = 1, datatype = 'I'),
PayloadItem(name = 'uiUsedSatsPSR_GLONASS', dimension = 1, datatype = 'I'),
PayloadItem(name = 'uiOutlierSatsPSR_GLONASS', dimension = 1, datatype = 'I'),
PayloadItem(name = 'uiUsedSatsRR_GPS', dimension = 1, datatype = 'I'),
PayloadItem(name = 'uiOutlierSatsRR_GPS', dimension = 1, datatype = 'I'),
PayloadItem(name = 'uiUsedSatsRR_GLONASS', dimension = 1, datatype = 'I'),
PayloadItem(name = 'uiOutlierSatsRR_GLONASS', dimension = 1, datatype = 'I'),
PayloadItem(name = 'uiUsedSatsTDCP_GPS', dimension = 1, datatype = 'I'),
PayloadItem(name = 'uiOutlierSatsTDCP_GPS', dimension = 1, datatype = 'I'),
PayloadItem(name = 'uiUsedSatsTDCP_GLONASS', dimension = 1, datatype = 'I'),
PayloadItem(name = 'uiOutlierSatsTDCP_GLONASS', dimension = 1, datatype = 'I'),
])
@message(0x29)
class EKFPOSCOVAR_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'fPosCovar', dimension = 9, datatype = 'f'),
])
@message(0x21)
class POWER_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'power', dimension = 32, datatype = 'f'),
])
@message(0x22)
class TEMP_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'temp_power_pcb', dimension = 1, datatype = 'f'),
PayloadItem(name = 'temp_switcher', dimension = 1, datatype = 'f'),
PayloadItem(name = 'temp_oem628', dimension = 1, datatype = 'f'),
PayloadItem(name = 'temp_oem615', dimension = 1, datatype = 'f'),
PayloadItem(name = 'temp_cpu', dimension = 1, datatype = 'f'),
PayloadItem(name = 'temp_acc', dimension = 3, datatype = 'f'),
PayloadItem(name = 'temp_omg', dimension = 3, datatype = 'f'),
PayloadItem(name = 'temp_other', dimension = 5, datatype = 'f'),
])
@message(0x1F)
class HEAVE_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'StatFiltPos', dimension = 1, datatype = 'd'),
PayloadItem(name = 'AppliedFreqHz', dimension = 1, datatype = 'd'),
PayloadItem(name = 'AppliedAmplMeter', dimension = 1, datatype = 'd'),
PayloadItem(name = 'AppliedSigWaveHeightMeter', dimension = 1, datatype = 'd'),
PayloadItem(name = 'PZpos', dimension = 1, datatype = 'd'),
PayloadItem(name = 'ZDpos', dimension = 1, datatype = 'd'),
PayloadItem(name = 'ZDvel', dimension = 1, datatype = 'd'),
PayloadItem(name = 'AccZnavDown', dimension = 1, datatype = 'd'),
PayloadItem(name = 'HeavePosVelDown', dimension = 2, datatype = 'd'),
PayloadItem(name = 'HeaveAlgoStatus1', dimension = 1, datatype = 'I'),
PayloadItem(name = 'HeaveAlgoStatus2', dimension = 1, datatype = 'I'),
])
@message(0x24)
class CANSTAT_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'uiErrorMask', dimension = 1, datatype = 'I'),
PayloadItem(name = 'ucControllerStatus', dimension = 1, datatype = 'B'),
PayloadItem(name = 'ucTransceiverStatus', dimension = 1, datatype = 'B'),
PayloadItem(name = 'ucProtocolStatus', dimension = 1, datatype = 'B'),
PayloadItem(name = 'ucProtocolLocation', dimension = 1, datatype = 'B'),
])
@message(0x1D)
class ARINC429STAT_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'uiStatus', dimension = 1, datatype = 'I'),
])
@message(0x26)
class TIME_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'sysTime', dimension = 1, datatype = 'd'),
PayloadItem(name = 'ImuInterval', dimension = 1, datatype = 'd'),
PayloadItem(name = 'TimeSincePPS', dimension = 1, datatype = 'd'),
PayloadItem(name = 'PPS_IMUtime', dimension = 1, datatype = 'd'),
PayloadItem(name = 'PPS_GNSStime', dimension = 1, datatype = 'd'),
PayloadItem(name = 'GNSSbias', dimension = 1, datatype = 'd'),
PayloadItem(name = 'GNSSbiasSmoothed', dimension = 1, datatype = 'd'),
])
@message(0x12)
class GNSSSOL_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'lon', dimension = 1, datatype = 'd'),
PayloadItem(name = 'lat', dimension = 1, datatype = 'd'),
PayloadItem(name = 'alt', dimension = 1, datatype = 'f'),
PayloadItem(name = 'undulation', dimension = 1, datatype = 'f'),
PayloadItem(name = 'velNED', dimension = 3, datatype = 'f'),
PayloadItem(name = 'stdDevPos', dimension = 3, datatype = 'f'),
PayloadItem(name = 'stdDevVel', dimension = 3, datatype = 'f'),
PayloadItem(name = 'solStatus', dimension = 1, datatype = 'H'),
PayloadItem(name = 'posType', dimension = 1, datatype = 'H'),
PayloadItem(name = 'pdop', dimension = 1, datatype = 'f'),
PayloadItem(name = 'satsUsed', dimension = 1, datatype = 'B'),
PayloadItem(name = 'solTracked', dimension = 1, datatype = 'B'),
PayloadItem(name = 'baseID', dimension = 1, datatype = 'H'),
PayloadItem(name = 'diffAge', dimension = 1, datatype = 'f'),
PayloadItem(name = 'solAge', dimension = 1, datatype = 'f'),
PayloadItem(name = 'gnssStatus', dimension = 1, datatype = 'I'),
])
@message(0x13)
class INSGNDSPEED_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'SOG', dimension = 1, datatype = 'f'),
PayloadItem(name = 'COG', dimension = 1, datatype = 'f'),
PayloadItem(name = 'VDown', dimension = 1, datatype = 'f'),
])
@message(0x14)
class GNSSTIME_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'utcOffset', dimension = 1, datatype = 'd'),
PayloadItem(name = 'offset', dimension = 1, datatype = 'd'),
PayloadItem(name = 'year', dimension = 1, datatype = 'I'),
PayloadItem(name = 'month', dimension = 1, datatype = 'B'),
PayloadItem(name = 'day', dimension = 1, datatype = 'B'),
PayloadItem(name = 'hour', dimension = 1, datatype = 'B'),
PayloadItem(name = 'minute', dimension = 1, datatype = 'B'),
PayloadItem(name = 'millisec', dimension = 1, datatype = 'I'),
PayloadItem(name = 'status', dimension = 1, datatype = 'I'),
])
@message(0x15)
class GNSSSOLCUST_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'dLon', dimension = 1, datatype = 'd'),
PayloadItem(name = 'dLat', dimension = 1, datatype = 'd'),
PayloadItem(name = 'fAlt', dimension = 1, datatype = 'f'),
PayloadItem(name = 'fUndulation', dimension = 1, datatype = 'f'),
PayloadItem(name = 'fStdDev_Pos', dimension = 3, datatype = 'f'),
PayloadItem(name = 'fVned', dimension = 3, datatype = 'f'),
PayloadItem(name = 'fStdDev_Vel', dimension = 3, datatype = 'f'),
PayloadItem(name = 'fDisplacement', dimension = 3, datatype = 'f'),
PayloadItem(name = 'fStdDev_Displacement', dimension = 3, datatype = 'f'),
PayloadItem(name = 'usSolStatus', dimension = 1, datatype = 'H'),
PayloadItem(name = 'fDOP', dimension = 2, datatype = 'f'),
PayloadItem(name = 'ucSatsPos', dimension = 1, datatype = 'B'),
PayloadItem(name = 'ucSatsVel', dimension = 1, datatype = 'B'),
PayloadItem(name = 'ucSatsDisplacement', dimension = 1, datatype = 'B'),
PayloadItem(name = 'usReserved', dimension = 1, datatype = 'H'),
PayloadItem(name = 'fDiffAge', dimension = 1, datatype = 'f'),
PayloadItem(name = 'fSolAge', dimension = 1, datatype = 'f'),
PayloadItem(name = 'uiGnssStatus', dimension = 1, datatype = 'I'),
])
@message(0x33)
class GNSSHDG_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'hdg', dimension = 1, datatype = 'f'),
PayloadItem(name = 'stdDevHdg', dimension = 1, datatype = 'f'),
PayloadItem(name = 'pitch', dimension = 1, datatype = 'f'),
PayloadItem(name = 'stdDevPitch', dimension = 1, datatype = 'f'),
PayloadItem(name = 'solStat', dimension = 1, datatype = 'H'),
PayloadItem(name = 'solType', dimension = 1, datatype = 'H'),
PayloadItem(name = 'res', dimension = 1, datatype = 'H'),
PayloadItem(name = 'satsUsed', dimension = 1, datatype = 'B'),
PayloadItem(name = 'satsTracked', dimension = 1, datatype = 'B'),
PayloadItem(name = 'gnssStatus', dimension = 1, datatype = 'I'),
])
@message(0x1B)
class GNSSLEVERARM_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'primary', dimension = 3, datatype = 'f'),
PayloadItem(name = 'stdDevPrimary', dimension = 3, datatype = 'f'),
PayloadItem(name = 'relative', dimension = 3, datatype = 'f'),
PayloadItem(name = 'stdDevRelative', dimension = 3, datatype = 'f'),
])
@message(0x1C)
class GNSSVOTER_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'ucSatsUsed_INT', dimension = 1, datatype = 'B'),
PayloadItem(name = 'ucSatsUsed_EXT', dimension = 1, datatype = 'B'),
PayloadItem(name = 'usReserved', dimension = 1, datatype = 'H'),
PayloadItem(name = 'fStdDevHDG_INT', dimension = 1, datatype = 'f'),
PayloadItem(name = 'fStdDevHDG_EXT', dimension = 1, datatype = 'f'),
PayloadItem(name = 'fStdDevPOS_INT', dimension = 1, datatype = 'f'),
PayloadItem(name = 'fStdDevPOS_EXT', dimension = 1, datatype = 'f'),
PayloadItem(name = 'uiStatus', dimension = 1, datatype = 'I'),
])
@message(0x1E)
class GNSSHWMON_Payload(ProtocolPayload):
GnssHwMonitor = Message([PayloadItem(name = 'val' , dimension = 1, datatype = 'f'),
PayloadItem(name = 'status' , dimension = 1, datatype = 'I')])
message_description = Message([PayloadItem(name = 'GnssHwMonitor' , dimension = 16, datatype = GnssHwMonitor)])
@message(0x38)
class GNSSALIGNBSL_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'east', dimension = 1, datatype = 'd'),
PayloadItem(name = 'north', dimension = 1, datatype = 'd'),
PayloadItem(name = 'up', dimension = 1, datatype = 'd'),
PayloadItem(name = 'eastStddev', dimension = 1, datatype = 'f'),
PayloadItem(name = 'northStddev', dimension = 1, datatype = 'f'),
PayloadItem(name = 'upStddev', dimension = 1, datatype = 'f'),
PayloadItem(name = 'solStatus', dimension = 1, datatype = 'H'),
PayloadItem(name = 'posVelType', dimension = 1, datatype = 'H'),
PayloadItem(name = 'satsTracked', dimension = 1, datatype = 'B'),
PayloadItem(name = 'satsUsedInSolution', dimension = 1, datatype = 'B'),
PayloadItem(name = 'extSolStat', dimension = 1, datatype = 'B'),
PayloadItem(name = 'reserved', dimension = 1, datatype = 'B'),
])
@message(0x16)
class WHEELDATA_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'odoSpeed', dimension = 1, datatype = 'f'),
PayloadItem(name = 'ticks', dimension = 1, datatype = 'i'),
])
@message(0x32)
class WHEELDATADBG_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'odoSpeed', dimension = 1, datatype = 'f'),
PayloadItem(name = 'ticks', dimension = 1, datatype = 'i'),
PayloadItem(name = 'interval', dimension = 1, datatype = 'I'),
PayloadItem(name = 'trigEvent', dimension = 1, datatype = 'I'),
PayloadItem(name = 'trigNextEvent', dimension = 1, datatype = 'I'),
])
@message(0x34)
class EVENTTIME_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'dGpsTime_EVENT_0', dimension = 1, datatype = 'd'),
PayloadItem(name = 'dGpsTime_EVENT_1', dimension = 1, datatype = 'd'),
])
@message(0x35)
class OMGINT_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'omgINT', dimension = 3, datatype = 'f'),
PayloadItem(name = 'omgINTtime', dimension = 1, datatype = 'f'),
])
@message(0x36)
class ADC24STATUS_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'uiRRidx', dimension = 4, datatype = 'I'),
PayloadItem(name = 'uiRRvalue', dimension = 4, datatype = 'I'),
])
@message(0x37)
class ADC24DATA_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'acc', dimension = 3, datatype = 'I'),
PayloadItem(name = 'frameCounter', dimension = 1, datatype = 'H'),
PayloadItem(name = 'temperature', dimension = 1, datatype = 'h'),
PayloadItem(name = 'errorStatus', dimension = 1, datatype = 'B'),
PayloadItem(name = 'intervalCounter', dimension = 3, datatype = 'B'),
])
@message(0x42)
class CSACDATA_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'status', dimension = 1, datatype = 'I'),
PayloadItem(name = 'alarm', dimension = 1, datatype = 'I'),
PayloadItem(name = 'serialNum', dimension = 32, datatype = 's'),
PayloadItem(name = 'mode', dimension = 1, datatype = 'I'),
PayloadItem(name = 'contrast', dimension = 1, datatype = 'I'),
PayloadItem(name = 'laserCurrent', dimension = 1, datatype = 'f'),
PayloadItem(name = 'tcx0', dimension = 1, datatype = 'f'),
PayloadItem(name = 'heatP', dimension = 1, datatype = 'f'),
PayloadItem(name = 'sig', dimension = 1, datatype = 'f'),
PayloadItem(name = 'temperature', dimension = 1, datatype = 'f'),
PayloadItem(name = 'steer', dimension = 1, datatype = 'i'),
PayloadItem(name = 'atune', dimension = 1, datatype = 'f'),
PayloadItem(name = 'phase', dimension = 1, datatype = 'i'),
PayloadItem(name = 'discOk', dimension = 1, datatype = 'I'),
PayloadItem(name = 'timeSincePowerOn', dimension = 1, datatype = 'I'),
PayloadItem(name = 'timeSinceLock', dimension = 1, datatype = 'I'),
PayloadItem(name = 'dataValid', dimension = 1, datatype = 'B'),
PayloadItem(name = 'reserved', dimension = 1, datatype = 'B'),
PayloadItem(name = 'fwStatus', dimension = 1, datatype = 'H'),
])
@message(0x46)
class INSMGRS_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'Error Code', dimension = 1, datatype = 'I'),
PayloadItem(name = 'MGRS Position', dimension = 64, datatype = 's'),
])
@message(0x25)
class GNSSSATINFO_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'SvID', dimension = 1, datatype = 'I'),
PayloadItem(name = 'PositionECEF', dimension = 3, datatype = 'd'),
PayloadItem(name = 'VelocityECEF', dimension = 3, datatype = 'd'),
PayloadItem(name = 'CN0', dimension = 3, datatype = 'f'),
PayloadItem(name = 'ClockError', dimension = 1, datatype = 'f'),
PayloadItem(name = 'IonoError', dimension = 1, datatype = 'f'),
PayloadItem(name = 'TropoError', dimension = 1, datatype = 'f'),
PayloadItem(name = 'Elevation', dimension = 1, datatype = 'f'),
PayloadItem(name = 'Azimuth', dimension = 1, datatype = 'f'),
])
@message(0x45)
class NTRIPSTAT_Payload(ProtocolPayload):
message_description = Message([
PayloadItem(name = 'NtripStatus', dimension = 1, datatype = 'I'),
PayloadItem(name = 'LastErrorMsg', dimension = 256, datatype = 's'),
PayloadItem(name = 'ErrorCounter', dimension = 1, datatype = 'I'),
])
"""
MESSAGES FROM JSON FILES
"""
path_json = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'json-files', 'messages')
parse_messages_json_folder(path_json)
|
from collections import defaultdict
from fs import path
from fs.subfs import SubFS
from fs.copy import copy_dir
from fs.copy import copy_file
from fs.zipfs import WriteZipFS
from fs.tempfs import TempFS
from fs.osfs import OSFS
from fs.errors import NoSysPath
from sqlalchemy import desc
from onegov.core.csv import convert_list_of_dicts_to_csv
from onegov.core.utils import module_path
from onegov.ballot import Vote, Election, ElectionCompound
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from onegov.election_day import ElectionDayApp
class ArchiveGenerator:
"""
Iterates over all Votes, Election and ElectionCompounds and runs the
csv export function on each of them.
This creates a bunch of csv files, which are zipped and the path to
the zip is returned.
"""
def __init__(self, app: 'ElectionDayApp'):
assert app.filestorage is not None
self.app = app
self.session = app.session()
self.archive_dir: SubFS = app.filestorage.makedir("archive",
recreate=True)
self.temp_fs = TempFS()
self.archive_parent_dir = "zip"
self.MAX_FILENAME_LENGTH = 60
def generate_csv(self):
"""
Creates csv files with a directory structure like this:
archive
├── elections
│ └── 2022
│ ├── election1.csv
│ ├── election2.csv
│ └── ...
│
└── votes
├── 2021
│ └── vote1.csv
└── 2022
└── vote1.csv
"""
names = ["votes", "elections", "elections"]
entities = [
self.all_counted_votes_with_results(),
self.all_counted_election_with_results(),
self.all_counted_election_compounds_with_results()
]
for entity_name, entity in zip(names, entities):
grouped_by_year = self.group_by_year(entity)
for yearly_package in grouped_by_year:
year = str(yearly_package[0].date.year)
year_dir = f"{entity_name}/{year}"
self.temp_fs.makedirs(year_dir, recreate=True)
for item in yearly_package:
# item may be of type Vote, Election or ElectionCompound
filename = item.id[: self.MAX_FILENAME_LENGTH] + ".csv"
combined_path = path.combine(year_dir, filename)
with self.temp_fs.open(combined_path, "w") as f:
rows = item.export(sorted(self.app.locales))
f.write(convert_list_of_dicts_to_csv(rows))
# Additionally, create 'flat csv' containing all votes in a single file
votes = entities[0]
if votes:
filename = "all_votes.csv"
combined_path = path.combine("votes", filename)
with self.temp_fs.open(combined_path, "w") as f:
votes_exports = self.get_all_rows_for_votes(votes)
f.write(convert_list_of_dicts_to_csv(votes_exports))
def get_all_rows_for_votes(self, votes):
all_votes = []
for v in votes:
vote_row = v.export(sorted(self.app.locales))
all_votes.extend(vote_row)
return all_votes
def group_by_year(self, entities):
"""Creates a list of lists, grouped by year.
:param entities: Iterable of entities
:type entities: list[Vote] | list[Election] | list[ElectionCompound]
:returns: A nested list, where each sublist contains all from one year.
For example:
Given a list:
votes = [vote_1, vote_2, vote_3, ...]
We create a new list:
groups = [[vote_1, vote_2], [vote_3], ...]
where vote_1.date.year == vote_2.date.year
"""
groups = defaultdict(list)
for entity in entities:
groups[entity.date.year].append(entity)
return list(groups.values())
def zip_dir(self, base_dir: SubFS) -> str | None:
"""Recursively zips a directory (base_dir).
:param base_dir: is a directory in a temporary file system.
Contains subdirectories 'votes' and 'elections', as well as various
other files to include.
:returns path to the zipfile or None if base_dir doesn't exist
or is empty.
"""
self.archive_dir.makedir(self.archive_parent_dir, recreate=True)
zip_path = f"{self.archive_parent_dir}/archive.zip"
self.archive_dir.create(zip_path)
with self.archive_dir.open(zip_path, mode="wb") as file:
with WriteZipFS(file) as zip_filesystem: # type:ignore[arg-type]
counts = base_dir.glob("**/*.csv").count()
if counts.files != 0:
if len(base_dir.listdir('/')) != 0:
for entity in base_dir.listdir('/'):
if base_dir.isdir(entity):
copy_dir(
src_fs=base_dir,
src_path=entity,
dst_fs=zip_filesystem,
dst_path=entity,
)
if base_dir.isfile(entity):
copy_file(
src_fs=base_dir,
src_path=entity,
dst_fs=zip_filesystem,
dst_path=entity,
)
return zip_path
return None
def all_counted_votes_with_results(self):
all_votes = self.session.query(Vote).order_by(desc(Vote.date)).all()
closed_votes = self.filter_by_final_results(all_votes)
return closed_votes
def all_counted_election_with_results(self):
all_elections = (
self.session.query(Election).order_by(desc(Election.date)).all()
)
final_elections = self.filter_by_final_results(all_elections)
return final_elections
def all_counted_election_compounds_with_results(self):
all_election_compounds = (
self.session.query(ElectionCompound)
.order_by(desc(ElectionCompound.date))
.all()
)
final_election_compounds = self.filter_by_final_results(
all_election_compounds
)
return final_election_compounds
def filter_by_final_results(self, all_entities):
finalized = []
for entity in all_entities:
if entity.counted and entity.has_results:
finalized.append(entity)
return finalized
@property
def archive_system_path(self):
zip_path = f"{self.archive_parent_dir}/archive.zip"
# syspath may not be available, depending on the actual filestorage
try:
sys_path = self.archive_dir.getsyspath(zip_path)
return sys_path
except NoSysPath:
return None
def include_docs(self):
api = module_path("onegov.election_day", "static/docs/api")
native_fs = OSFS(api)
for match in native_fs.glob("**/open_data*.md"):
copy_file(
src_fs=native_fs,
src_path=match.path,
dst_fs=self.temp_fs,
dst_path=match.path,
)
def generate_archive(self):
self.generate_csv()
self.include_docs()
root = self.temp_fs.opendir('/')
return self.zip_dir(root)
|
import sys
sys.path.append("../")
from src import verbalGraphGenerator
import random
from src import logger
import logging
import uuid
from src import utils
from src import constants
import argparse
import codecs
import os
import time
import pdb
from tqdm import tqdm
cur_Intents_String = ""
class controller:
def __init__(self):
self.intentTree = verbalGraphGenerator.generate()
def pick_path(self):
node = self.intentTree.root
path = []
while node.getIntent() != verbalGraphGenerator.end:
path.append(node)
# print(node.getIntent())
# node.showClds()
# print(node.getIntent())
if len(node.clds)<=0:
break
node_pre = node
node = random.choice(node.clds)
while node.getIntent()=="WFSB":
node = random.choice(node_pre.clds)
print_path = " --> ".join([item.getIntent() for item in path])
print_verbal = " --> ".join([item.getVerbal() for item in path])
logging.info("Randomly pick a routine:{print_path}".format(print_path=print_path))
logging.info("verbal path:{print_verbal}".format(print_verbal = print_verbal))
return path
def regualr_routine(self,callId,path):
path1 = path[:-1]
path2 = path[1:]
lastNode = None
actual_path = []
arriveEnd = True
allRight = True
right_len = 0
for cur_node, next_node in zip(path1, path2):
actual_path.append(cur_node)
next_intent = self.oneAction(cur_node, next_node, callId)
next_intentId = self.intentTree.intentAbbr2Id[next_intent]
lastNode = next_node
if next_intent != next_node.getIntent():
logging.debug("Wrong!")
allRight = False
if next_intentId in cur_node.cldsIntentIdSet:
logging.debug("Begain select path by return intent!")
else:
pot_intent = ",".join(item.getIntent() for item in cur_node.clds)
logging.debug("Return Intent {ret_intent} not in potential intent {pot_intent}".format(
ret_intent=next_intent, pot_intent=pot_intent))
arriveEnd = False
lastNode = None
break
return arriveEnd,allRight,lastNode,cur_node,next_intentId,actual_path,right_len
def wrong_routine(self):
pass
def record_right_intent(self,total_dict,right_dict,node):
t_dict = total_dict
r_dict = right_dict
name = node.getIntent()
right_count = r_dict.get(name, 0)+1
total_count = t_dict.get(name, 0)+1
t_dict[name] = total_count
r_dict[name] = right_count
return t_dict,r_dict
def record_wrong_intent(self,total_dict,node,sen,next_intent,intent_intent_sen_list):
l = intent_intent_sen_list
t_dict = total_dict
name = node.getIntent()
total_count = t_dict.get(name, 0) + 1
t_dict[name] = total_count
l.append([name,cur_Intents_String,sen.encode("utf-8")])
return t_dict,l
def record_time(self,time_list,t):
time_list.append(t)
return time_list
def new_do(self):
callId = str(uuid.uuid1())
logging.info(callId)
path = self.pick_path()
init_len = len(path) - 1
arriveEnd, allRight, lastNode, cur_node, next_intentId, actual_path, right_len = self.regualr_routine(callId,path)
if not allRight and arriveEnd:
return self.wrong_routine(actual_path,cur_node,next_intentId)
print_actual_path = " --> ".join([item.getIntent() for item in actual_path])
print_init_select_path = " --> ".join([item.getIntent() for item in path])
return init_len,right_len,print_init_select_path,print_actual_path,arriveEnd,allRight
def do(self,path):
# pdb.set_trace()
callId = str(uuid.uuid1())
logging.info(callId)
path1 = path[:-1]
path2 = path[1:]
lastNode =None
actual_path = []
arriveEnd = True
allRight = True
init_len = len(path)-1
right_len = 0
node_right_statistic = dict()
node_total_statistic = dict()
time_statistic = list()
time_text = list()
time_huashu = list()
intent_intent_sen_list = list()
for cur_node,next_node in zip(path1,path2):
actual_path.append(cur_node)
next_intent,pick_sen, textTime, huashuTime, totalTime = self.oneAction(cur_node,next_node,callId)
self.record_time(time_statistic,totalTime)
self.record_time(time_text,textTime)
self.record_time(time_huashu,huashuTime)
next_intentId = self.intentTree.intentAbbr2Id[next_intent]
lastNode = next_node
if next_intent!=next_node.getIntent():
logging.debug("Wrong!")
allRight = False
node_total_statistic,intent_intent_sen_list = self.record_wrong_intent(node_total_statistic,next_node,pick_sen,next_intent,intent_intent_sen_list) #record
if next_intentId in cur_node.cldsIntentIdSet:
logging.debug("Begain select path by return intent!")
else:
pot_intent = ",".join(item.getIntent() for item in cur_node.clds)
logging.debug("Return Intent {ret_intent} not in potential intent {pot_intent}".format(
ret_intent=next_intent,pot_intent=pot_intent))
arriveEnd = False
lastNode = None
break
while next_intentId in cur_node.cldsIntentIdSet:
cur_node = cur_node.intentId2item[next_intentId]
actual_path.append(cur_node)
if len(cur_node.clds)<=0:
lastNode = cur_node
else:
next_node = random.choice(cur_node.clds)
while next_node.getIntent() == "WFSB":
next_node = random.choice(cur_node.clds)
logging.info("change next node to ==>"+next_node.getIntent())
if next_node.getIntent() == verbalGraphGenerator.end:
lastNode = None
break
next_intent,pick_sen,textTime,huashuTime,totalTime = self.oneAction(cur_node, next_node,callId)
self.record_time(time_statistic, totalTime)
self.record_time(time_text, textTime)
self.record_time(time_huashu, huashuTime)
if next_intent == next_node.getIntent(): #record
node_total_statistic, node_right_statistic = self.record_right_intent(node_total_statistic,
node_right_statistic, next_node)
else:
node_total_statistic,intent_intent_sen_list = self.record_wrong_intent(node_total_statistic,next_node,pick_sen,next_intent,intent_intent_sen_list)
next_intentId = self.intentTree.intentAbbr2Id[next_intent]
if next_intentId not in cur_node.cldsIntentIdSet:
pot_intent = ",".join(item.getIntent() for item in cur_node.clds)
logging.debug("Return Intent {ret_intent} not in potential intent {pot_intent}".format(
ret_intent=next_intent, pot_intent=pot_intent))
arriveEnd = False
lastNode = None
break
break
right_len = right_len + 1
node_total_statistic, node_right_statistic = self.record_right_intent(node_total_statistic,node_right_statistic,next_node)#record
if lastNode:
actual_path.append(lastNode)
self.oneAction(lastNode,callId=callId)
print_actual_path = " --> ".join([item.getIntent() for item in actual_path])
print_init_select_path = " --> ".join([item.getIntent() for item in path])
logging.info("Initial Intent Path: "+print_init_select_path)
logging.info("Actual Intent Path: "+print_actual_path)
if allRight:
logging.info("Reach the end of the path!")
if not arriveEnd:
logging.info("Not arrived to end! ")
ret_dict=dict()
ret_dict["init_len"]=init_len
ret_dict["right_len"]=right_len
ret_dict["print_init_select_path"]=print_init_select_path
ret_dict["print_actual_path"]=print_actual_path
ret_dict["arriveEnd"] = arriveEnd
ret_dict["allRight"]=allRight
ret_dict["node_total_statistic"] = node_total_statistic
ret_dict["node_right_statistic"]=node_right_statistic
ret_dict["time_statistic"]=time_statistic
ret_dict["time_text"]=time_text
ret_dict["time_huashu"]=time_huashu
ret_dict["intent_intent_sen_list"]=intent_intent_sen_list
return ret_dict
# return init_len,right_len,print_init_select_path,print_actual_path,arriveEnd,allRight,node_total_statistic,node_right_statistic,time_statistic,intent_intent_sen_list
def sendSentence(self,sentence,callId,real=constants.EMULATOR):
if real:
return self.sendSentence_true(sentence,callId)
else:
return self.sendSentence_false(sentence,callId)
def sendSentence_false(self,sentence,callId):
logging.debug("Sending sendSentence:" + sentence)
global cur_Intents_String
sentence = "".join(sentence.split())
origin_ret_intent, textTime = utils.uplaodText(sentence, callId)
cur_Intents_String = origin_ret_intent
splited_ret_intent = origin_ret_intent.split('/')
ret_intent = splited_ret_intent[0]
if ret_intent not in self.intentTree.intentAbbr2Id.keys():
ret_intent = verbalGraphGenerator.end
logging.error("NLU return a None Exist Intent! END instead!")
return ret_intent, textTime
def sendSentence_true(self,sentence,callId):
callId = "defaultCallId"
raw_input("Say:{sentence}\n Type <enter>".format(sentence=sentence.encode('utf-8')))
logging.info("Waiting NLU recognize result:")
start = time.time()
with tqdm(total=100) as pbar:
while not utils.haveRecogResult():
time.sleep(0.05)
pbar.update(5)
end = time.time()
# if end-start>10:
# return verbalGraphGenerator.end,end-start
intent = utils.getNLUIntent()
intent= intent.split("#")
text = intent[-1]
intent = intent[0]
print("NLU Intent:"+intent.encode('utf-8'))
print("NLU text:"+text.encode('utf-8'))
end = time.time()
return intent,start-end
def sendTTS(self,node,callId):
# tts = "_".join(node.getPreVerbal())
tts = node.getVerbal()
logging.debug("Sending TTS:"+tts)
if constants.EMULATOR:
utils.cleanIntentResult()
textTime=utils.uploadHuaShu(tts,callId)
return textTime
def pickSentence(self,node):
catName = node.getIntent()
logging.debug("In pickSentence")
ret_sen = utils.select_text(catName)
return ret_sen
def oneAction(self,cur_node,next_node=None,callId="1"):
huashuTime=self.sendTTS(cur_node,callId)
sen = ""
if next_node:
sen = self.pickSentence(next_node)
ret_intent,textTime = self.sendSentence(sen,callId)
totalTime=round((float(huashuTime)+float(textTime)),6)
return ret_intent,sen,textTime,huashuTime,totalTime
else:
ret_intent = verbalGraphGenerator.end
return ret_intent,sen
def gen_paths(self,num):
pikced_paths = []
for _ in range(num):
pikced_paths.append(self.pick_path())
return pikced_paths
def main(iterNum):
clt = controller()
endCount = 0
rightCount = 0
total_right_length = 0
total_init_length = 0
save_file_Name = constants.HIST_DIR + os.sep +utils.getTimeStamp()
save_sen_Name = save_file_Name+"_wrong_intent_sen"
logging.info("Saving summary in===>"+save_file_Name)
max_time_consuming = 0
min_time_consuming = 1000000
average_time_consuming = 0
step_statistic_dict=dict()
node_total_statistic_dict=dict()
node_right_statistic_dict=dict()
verbal_total_statistic_dict = dict()
verbal_right_statistic_dict = dict()
node_right_prob_dict = dict()
intent_intent_sen_list = list()
tmp_sum_time = 0
tmp_avg_time = 0
tmp_max_time = 0
tmp_min_time = 0
picked_paths = clt.gen_paths(iterNum)
with codecs.open(save_file_Name,"w","utf-8") as f:
for i,path in enumerate(picked_paths):
if constants.EMULATOR:
utils.resetNlu()
f.write("#========{num}=========#\n".format(num = str(i)))
logging.info("#========{num}=========#\n".format(num = str(i)))
# init_len,right_len,s_path,a_path,end,right,node_total_statistic,node_right_statistic,time_statistic,tmp_intent_intent_sen_list = clt.do(path)
ret_dict = clt.do(path)
init_len = ret_dict["init_len"]
right_len = ret_dict["right_len"]
s_path = ret_dict["print_init_select_path"]
a_path = ret_dict["print_actual_path"]
end = ret_dict["arriveEnd"]
right = ret_dict["allRight"]
node_total_statistic = ret_dict["node_total_statistic"]
node_right_statistic = ret_dict["node_right_statistic"]
time_statistic = ret_dict["time_statistic"]
time_huashu = ret_dict["time_huashu"]
time_text = ret_dict["time_text"]
tmp_intent_intent_sen_list = ret_dict["intent_intent_sen_list"]
intent_intent_sen_list = intent_intent_sen_list+tmp_intent_intent_sen_list
total_init_length = total_init_length+init_len
total_right_length = total_right_length+right_len
f.write("initPath:{path}\n".format(path = s_path))
f.write("actualPath:{path}\n".format(path = a_path))
f.write("init length {len1},right length {len2},rate:{rate}\n".format(
len1=init_len,len2=right_len,rate=float(right_len)/init_len))
step_statistic_dict[right_len] = step_statistic_dict.get(right_len,0)+1
max_time_consuming = max(max_time_consuming,max(time_statistic))
min_time_consuming = min(min_time_consuming,min(time_statistic))
tmp_sum_time = sum(time_statistic)
tmp_avg_time = tmp_sum_time/len(time_statistic)
tmp_max_time = max(time_statistic)
tmp_min_time = min(time_statistic)
f.write("Time consuming:\n")
f.write("upload_huashu : {time}\n".format(time=time_huashu))
f.write("upload_text : {time}\n".format(time=time_text))
f.write("upload_time : {time}\n".format(time=time_statistic))
f.write("max : {time}\n".format(time=tmp_max_time))
f.write("min :{time}\n".format(time=tmp_min_time))
f.write("avg :{time}\n".format(time=tmp_avg_time))
f.write("Total :{time}\n".format(time=tmp_sum_time))
average_time_consuming = average_time_consuming+sum(time_statistic)
for k,v in node_right_statistic.items():
node_right_statistic_dict[k] = node_right_statistic_dict.get(k,0)+1
for k,v in node_total_statistic.items():
node_total_statistic_dict[k] = node_total_statistic_dict.get(k,0)+1
if right:
rightCount = rightCount+1
f.write("Right?:True\n")
else:
f.write("Right?:False\n")
if end:
endCount = endCount+1
f.write("Finish?:True\n")
else:
f.write("Finish?:False\n\n")
f.write("SUMMARIZE:\n")
f.write("\titerat {iterNum} nums\n".format(iterNum = iterNum))
f.write("\tComplete right:{rightCount}, prob:{prob}\n".format(rightCount=rightCount,prob = float(rightCount)/iterNum))
f.write("\tFinish :{endCount}, prob:{prob}\n".format(endCount = endCount,prob = float(endCount)/iterNum))
f.write("\tright/init length rate:{rate}\n".format(rate = float(total_right_length)/total_init_length))
total_intent = sum([v for k,v in node_total_statistic_dict.items()])
f.write("\tTIME CONSUMING:\n")
f.write("\t\tmax : {time}\n".format(time=max_time_consuming))
f.write("\t\tmin :{time}\n".format(time=min_time_consuming))
f.write("\t\tavg :{time}\n".format(time=average_time_consuming/total_intent))
f.write("\t\tTotal :{time}\n".format(time=average_time_consuming))
for k in node_total_statistic_dict.keys():
if k not in node_right_statistic_dict.keys():
node_right_prob_dict[k] = 0
else:
node_right_prob_dict[k] = float(node_right_statistic_dict[k])/node_total_statistic_dict[k]
step_statistic_list = sorted(step_statistic_dict.items(), key=lambda x: x[1])
# node_total_statistic_list = sorted(node_total_statistic_dict.items(), key=lambda x: x[1])
# node_right_statistic_list = sorted(node_total_statistic_dict.items(), key=lambda x: x[1])
node_right_prob_list = sorted(node_right_prob_dict.items(), key=lambda x: x[1])
f.write("\tSTEP-NUM:\n")
for item in step_statistic_list:
f.write("\t\t{step} - {num}\n".format(step=item[0],num=item[1]))
f.write("\tINTENT - PROB , (right/total):\n")
sum_prob = 0
for index,item in enumerate(node_right_prob_list):
f.write("\t\t{node} - {prob} , ({right}/{total})\n".format(node=item[0],
prob=item[1],
right=node_right_statistic_dict.get(item[0],0),
total=node_total_statistic_dict[item[0]]))
sum_prob = sum_prob+float(item[1])
avg_prob = sum_prob/(index+1)
f.write("avg precision: "+str(avg_prob)+"\n")
with open(save_sen_Name,'w') as f:
for item in intent_intent_sen_list:
f.write("{intent1} - {intent2}\n".format(intent1=item[0],intent2=item[1]))
f.write("\t{sen}".format(sen=item[2]))
if __name__ == "__main__":
logger.setup_logging()
parser = argparse.ArgumentParser()
parser.add_argument("--iterNum",'-i',default=10,help = "iteration num!")
parser.add_argument("--randomSeed",'-rs',default=None,help = "random seed!")
args = parser.parse_args()
constants.RANDOM_SEED = args.randomSeed
if constants.RANDOM_SEED:
random.seed(constants.RANDOM_SEED)
main(int(args.iterNum))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 15 15:55:01 2017
@author: brian Cechmanek
Implementation of a classifier Perceptron from scratch. with extensive/excessive documentation
We'll later use it for exploration of the Iris data set.
"""
import numpy as np # this is to drastically speed up linear computations
class Perceptron(object):
""" Perceptron Classifier
Parameters
----------
eta: (float) the learning rate. range: 0.0-1.0
n_iter: (int) number of passes over the training dataset
Attributes
----------
w_ : (1d-array numpy) weights after fitting.
errors: (list) total number of misclassifications in each epoch.
"""
def __init__(self, eta=0.01, n_iters=10): # arbitrarily small starting parameters
""" instantiate the Perceptrn object with arbitrarily small starting
parameters. 0.01 is a 'slow' learning rate, and 10 iterations should
be very quick on small to medium-sized data sets. These parameters
often require manual optimization.
"""
self.eta = eta
self.n_iters = n_iters
def fit(self, X, y):
""" Fit the training data to the Perceptron.
Parameters
----------
X: (array-like) defined by n_samples X n_features.
y: (array-like) the corresponding target value of each X sample.
ex:
X = [[1,0,1], [0,1,0]]
y = [ 'Yes' , 'No' ]
Returns
-------
self: object
"""
# create an array of zeros the length of 1 + n_features of X
self.w_ = np.zeros(1 + X.shape[1])
# and a basic python array to count the misclassifications of each pass
self.errors_ = []
# actually weight values, by iterating over the set.
# thus, this is really the time-comsuming step
for _ in range(self._iters):
errors = 0
for xi, target in zip(X,y): # xi represents the ith-iteration of a sample in X
# update the model weight based on diference of prediction to value
# multiplied by our learning weight (default 0.01)
# hence why it requires manual tuning to balance speed of
# learning and accuracy
update = self.eta * (target- self.predict(xi)) # see prediction below
self.w_[1:] += update * xi # update each of the feature weights
self.w_[0] += update # update the internal (implicit) weight
errors += int(update !=0.0)
self.errors_.append(errors)
return self
def net_input(self, X):
""" Calculate the weighted sum value via dot product of implicit weight,
and all feature weights."""
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self,X):
""" Binary step-function output - if the dot-product of all weights is
greater then zero, then return 1. else -1. """
return np.where(self.net_input(X) >= 0.0, 1, -1)
|
from django.forms import ModelForm
from .models import Post, Comment, Like
from datetime import datetime
class PostForm(ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['author'].widget.attrs.update({'class': 'form-control'})
self.fields['title'].widget.attrs.update({'class': 'form-control'})
self.fields['slug'].widget.attrs.update({'class': 'form-control'})
self.fields['theme'].widget.attrs.update({'class': 'form-control'})
self.fields['category'].widget.attrs.update({'class': 'form-control'})
self.fields['image'].widget.attrs.update({'class': 'form-control'})
self.fields['description'].widget.attrs.update({'class': 'form-control'})
self.fields['is_activate'].widget.attrs.update({'class': 'form-check-input'})
self.fields['image_front'].widget.attrs.update({'class': 'form-control'})
self.fields['image'].widget.attrs.update(accept='image/png,image/jpeg, image/jpg')
self.fields['image_front'].widget.attrs.update(accept='image/png,image/jpeg')
class Meta:
model = Post
fields = ['author', 'title', 'slug', 'theme', 'category', 'description', 'image', 'image_front', 'is_activate', 'activate_date', 'due_date']
class CommentForm(ModelForm):
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
self.object = kwargs.pop('object', None)
super().__init__(*args, **kwargs)
self.initial['user'] = self.request.user
self.initial['post'] = self.object
self.fields['user'].widget.attrs.update({'class': 'visually-hidden'})
self.fields['post'].widget.attrs.update({'class': 'visually-hidden'})
class Meta:
model = Comment
fields = ['user', 'post', 'content']
class LikeForm(ModelForm):
def __init__(self, *args, **kwargs):
#self.request = kwargs.pop('request', None)
#self.object = kwargs.pop('object', None)
super().__init__(*args, **kwargs)
#self.initial['user'] = self.request.user
#self.initial['post'] = self.object
#self.fields['user'].widget.attrs.update({'class': 'visually-hidden'})
#self.fields['post'].widget.attrs.update({'class': 'visually-hidden'})
class Meta:
model = Like
fields = ['user', 'post', 'like'] |
import csv as csv
import numpy as np
# Read the file
csv_file = csv.reader(open('train.csv', 'rb'))
header = csv_file.next()
data = []
for line in csv_file:
data.append(line)
data = np.array(data)
|
#Copyright (C) Practica Ana Sollars & Co.
#Permission is granted to copy, distribute and/or modify this document
#under the terms of the GNU Free Documentation License, Version 1.3
#or any later version published by the Free Software Foundation;
#with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.
#A copy of the license is included in the section entitled "GNU
#Free Documentation License"
#<Authors: Ana Mª Sollars Castellanos>
#lista de compañias
#compañia--> lista de drugs
import http.server
import json
import http.client
class OpenFDAClient():
OPENFDA_API_URL = "api.fda.gov"
OPENFDA_API_EVENT = "/drug/event.json"
def get_event(self, limit): #--> conectado a ***
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET", self.OPENFDA_API_EVENT + "?limit=" + limit)
r1 = conn.getresponse()
data1 = r1.read()
data2 = data1.decode("utf8") #bytes a string
event = data2
return event
def get_search(self, drug):
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET", self.OPENFDA_API_EVENT + '?search=patient.drug.medicinalproduct='+drug+'&limit=10')
r1 = conn.getresponse()
data1 = r1.read()
data2 = data1.decode("utf8")
event = data2
return event
def get_company_drug(self, comp):
conn = http.client.HTTPSConnection(self.OPENFDA_API_URL)
conn.request("GET", self.OPENFDA_API_EVENT + '?search=companynumb='+comp+'&limit=10')
r1 = conn.getresponse()
data1 = r1.read()
data2 = data1.decode("utf8")
event = data2
return event
class OpenFDAParser():
def get_medicinalproduct(self,limit):
client = OpenFDAClient()
event = client.get_event(limit)
event2= json.loads(event)
results= event2["results"]
for i in results:
patient= i["patient"]
drug= patient["drug"]
med_list= []
for i in range(int(limit)):
patient= results[i]["patient"]
medicinal= patient["drug"][0]["medicinalproduct"]
med_list.append(medicinal)
return med_list
def get_company_list(self, limit):
client= OpenFDAClient()
event = client.get_event(limit)
event2= json.loads(event)
results= event2["results"]
med_list= []
for i in results:
companynumb= i["companynumb"]
med_list.append(companynumb)
return med_list
def parser_get_search(self,event):
event2= json.loads(event)
results= event2["results"]
company_list= []
for i in results:
company= i["companynumb"]
company_list.append(company)
return company_list
def get_gender_list(self, limit):
client = OpenFDAClient()
event = client.get_event(limit)
event2= json.loads(event)
results= event2["results"]
sex_list=[]
listGender=[]
for i in results:
patient = i["patient"]
patient_sex= patient["patientsex"]
sex_list.append(patient_sex)
for i in sex_list:
if i == "1":
listGender.append("Female")
elif i == "2":
listGender.append("Male")
return listGender
def parser_get_company_drug(self,event):
event2= json.loads(event)
results= event2["results"]
drug_list=[]
for i in results:
companynumb = i["companynumb"]
patient= i["patient"]
medicinal= patient["drug"][0]["medicinalproduct"]
drug_list.append(medicinal)
return drug_list
class OpenFDAHTML():
def get_main_page(self):
html = """
<html>
<head>
<link rel="shortcut icon" href="https://b64459531885200b3efb-5206a7b3a50a3f5974248375cd863061.ssl.cf1.rackcdn.com/favicon-new.ico">
<title>OpenFDA Cool App</title>
<DIV ALIGN=center>
<IMG SRC="https://pbs.twimg.com/profile_images/701113332183371776/57JHEzt7.jpg" width="400" height="200" alt="correo">
</DIV>
<style type= "text/css">
.button{
text-decoration: none;
padding: 3px;
padding-left: 10px;
padding-right: 10px;
font-family: Helvetica Neue;
font-weight: 300;
font-size: 15px;
font-style: bold;
color: blue;
background-color: #99CCFF;
border-radius: 15px;
border: 3px double blue;
}
.boton_1:hover{
opacity: 0.6;
text-decoration: none;
}
</style>
</head>
<body>
<DIV ALIGN=center>
<h1>
<FONT FACE="arial" SIZE=8 COLOR=><u>OpenFDA Client</u></FONT>
</h1>
<form method="get" action="listDrugs">
<input class="button" type="submit" value="Drug List: Send to OpenFDA">
</input>
limit:<input type="text" name="limit">
</input>
</form>
<form method="get" action="listCompanies">
<input class="button" type="submit" value="Company List: Send to OpenFDA">
</input>
limit:<input type="text" name="limit">
</input>
</form>
<form method="get" action="searchDrug">
<input type="text" name="drug">
<input class="button" type="submit" value="Drug Search: Send to OpenFDA">
</input>
</form>
<form method="get" action="searchCompany">
<input type="text" name="company">
<input class="button" type="submit" value="Company Search: Send to OpenFDA">
</input>
</form>
<form method="get" action="listGender">
<input type="text" name="limit">
<input class="button" type="submit" value="Gender">
</input>
</form>
</DIV>
</body>
</html>
"""
return html
def get_drug(self,drug):
client = OpenFDAClient()
parser = OpenFDAParser()
event = client.get_search(drug)
items= parser.parser_get_search(event)
list = self.write_html(items)
return list
def get_second_page(self,limit):
parser = OpenFDAParser()
items= parser.get_medicinalproduct(limit)
list = self.write_html(items)
return list
def get_third_page(self,limit):
parser = OpenFDAParser()
items= parser.get_company_list(limit)
list = self.write_html(items)
return list
def get_company_html(self,comp):
client = OpenFDAClient()
parser = OpenFDAParser()
event= client.get_company_drug(comp)
items= parser.parser_get_company_drug(event)
list = self.write_html(items)
return list
def get_patient_sex(self,limit):
parser = OpenFDAParser()
items= parser.get_gender_list(limit)
list= self.write_html(items)
return list
def get_error_page(self):
list = """
<html>
<head>
<body>
<h1>Error 404</h1>
<body>
</head>
<body>
Page not found
</body>
</html>
"""
return list
def write_html(self,items):
list = """
<html>
<head></head>
<body>
<ol>
"""
for element in items:
list += "<li>" +element+ "</li>"
list += """
</ol>
</body>
</html>
"""
return list
# HTTPRequestHandler class
class testHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
""" class that manages the HTTP request from web clients """
def execute(self,html):
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(bytes(html, "utf8"))
# GET
def do_GET(self):
# Write content as utf-8 data
if self.path == "/":
# Get main page
HTML = OpenFDAHTML()
self.send_response(200)
html = HTML.get_main_page()
self.execute(html)
elif "/listDrugs?limit=" in self.path:
HTML = OpenFDAHTML()
self.send_response(200)
web= self.path.split("=")
limit= web[-1]
html= HTML.get_second_page(limit)
self.execute(html)
elif "/listCompanies?limit=" in self.path:
HTML = OpenFDAHTML()
self.send_response(200)
web= self.path.split("=")
limit= web[-1]
html= HTML.get_third_page(limit)
self.execute(html)
elif '/searchDrug?drug=' in self.path:
HTML = OpenFDAHTML()
self.send_response(200)
web= self.path.split("=")
drug= web[-1]
html= HTML.get_drug(drug)
self.execute(html)
elif '/searchCompany?company=' in self.path:
HTML = OpenFDAHTML()
self.send_response(200)
web= self.path.split("=")
comp= web[-1]
html= HTML.get_company_html(comp)
self.execute(html)
elif "/listGender?limit=" in self.path:
HTML = OpenFDAHTML()
self.send_response(200)
web= self.path.split("=")
limit= web[-1]
html= HTML.get_patient_sex(limit)
self.execute(html)
elif "/secret" in self.path:
self.send_response(401)
self.send_header('WWW-Authenticate','Basic realm="User Visible Realm"')
self.end_headers()
elif "/redirect" in self.path:
self.send_response(302)
self.send_header('Location', 'http://localhost:8000/')
self.end_headers()
else:
HTML = OpenFDAHTML()
self.send_response(404)
html= HTML.get_error_page()
self.execute(html)
return
|
#!/usr/bin/env python
"""
Provides functionality to crawl and extract news articles from a single WARC file from commoncrawl.org. Filter criteria, such as publish date
and host list, can be defined. Currently, the WARC file will be downloaded to the path WORKINGDIR/cc_download_warc, if
not otherwise specified.
"""
import logging
import os
import sys
import time
from ago import human
import boto3
import botocore
from dateutil import parser
from hurry.filesize import size
from scrapy.utils.log import configure_logging
from six.moves import urllib
from warcio.archiveiterator import ArchiveIterator
from .. import NewsPlease, EmptyResponseError
from . import commoncrawl_crawler
__author__ = "Felix Hamborg"
__copyright__ = "Copyright 2017"
__credits__ = ["Sebastian Nagel"]
class CommonCrawlExtractor:
# remote url where we can download the warc file
__warc_path = None
# download dir for warc files
__local_download_dir_warc = './cc_download_warc/'
# hosts (if None or empty list, any host is OK)
__filter_valid_hosts = [] # example: ['elrancaguino.cl']
# start date (if None, any date is OK as start date), as datetime
__filter_start_date = None
# end date (if None, any date is OK as end date)
__filter_end_date = None
# if date filtering is string, e.g., if we could not detect the date of an article, we will discard the article
__filter_strict_date = True
# if True, the script checks whether a file has been downloaded already and uses that file instead of downloading
# again. Note that there is no check whether the file has been downloaded completely or is valid!
__reuse_previously_downloaded_files = True
# continue after error
__continue_after_error = False
# ignore unicode errors
__ignore_unicode_errors = False
# fetch images
__fetch_images = False
# log level
__log_level = logging.INFO
__delete_warc_after_extraction = True
__log_pathname_fully_extracted_warcs = None
# commoncrawl.org
__cc_base_url = 'https://data.commoncrawl.org/'
__cc_bucket = 'commoncrawl'
__cc_news_crawl_names = None
# event handler called when an article was extracted successfully and passed all filter criteria
__callback_on_article_extracted = None
# event handler called when a warc file is fully processed
__callback_on_warc_completed = None
# if the download progress is shown
__show_download_progress = False
# logging
logging.basicConfig(level=__log_level)
__logger = logging.getLogger(__name__)
def __setup(self):
"""
Setup
:return:
"""
os.makedirs(self.__local_download_dir_warc, exist_ok=True)
# make loggers quiet
configure_logging({"LOG_LEVEL": "ERROR"})
logging.getLogger('requests').setLevel(logging.CRITICAL)
logging.getLogger('readability').setLevel(logging.CRITICAL)
logging.getLogger('PIL').setLevel(logging.CRITICAL)
logging.getLogger('newspaper').setLevel(logging.CRITICAL)
logging.getLogger('newsplease').setLevel(logging.CRITICAL)
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
boto3.set_stream_logger('botocore', self.__log_level)
boto3.set_stream_logger('boto3', self.__log_level)
boto3.set_stream_logger('s3transfer', self.__log_level)
# set own logger
logging.basicConfig(level=self.__log_level)
self.__logger = logging.getLogger(__name__)
self.__logger.setLevel(self.__log_level)
def __register_fully_extracted_warc_file(self, warc_path):
"""
Saves the URL warc_url in the log file for fully extracted WARC URLs
:param warc_url:
:return:
"""
if self.__log_pathname_fully_extracted_warcs is not None:
with open(self.__log_pathname_fully_extracted_warcs, 'a') as log_file:
log_file.write(warc_path + '\n')
def filter_record(self, warc_record, article=None):
"""
Returns true if a record passes all tests: hosts, publishing date
:param warc_record:
:return: A tuple of (True or False) and an article (might be None)
"""
# filter by host
if self.__filter_valid_hosts:
url = warc_record.rec_headers.get_header('WARC-Target-URI')
# very simple check, check if one of the required host names is contained in the url of the WARC transaction
# better would be to extract the host name from the WARC transaction Target URI and then check for equality
# because currently something like g.co?forward_url=facebook.com would yield a positive filter test for
# facebook.com even though the actual host is g.co
for valid_host in self.__filter_valid_hosts:
if valid_host in url:
break
else:
return False, article
# filter by date
if self.__filter_start_date or self.__filter_end_date:
if not article:
article = self._from_warc(warc_record)
publishing_date = self.__get_publishing_date(warc_record, article)
if not publishing_date:
if self.__filter_strict_date:
return False, article
else: # here we for sure have a date
# is article published too early?
if self.__filter_start_date and publishing_date < self.__filter_start_date:
return False, article
if self.__filter_end_date and publishing_date > self.__filter_end_date:
return False, article
return True, article
def __get_publishing_date(self, warc_record, article):
"""
Extracts the publishing date from the record
:param warc_record:
:return:
"""
if hasattr(article, 'date_publish'):
return parser.parse(article.date_publish) if isinstance(article.date_publish, str) else article.date_publish
else:
return None
def __get_remote_index(self):
"""
Gets the index of news crawl files from commoncrawl.org and returns an array of names
:return:
"""
return commoncrawl_crawler.__get_remote_index()
def __on_download_progress_update(self, blocknum, blocksize, totalsize):
"""
Prints some download progress information
:param blocknum:
:param blocksize:
:param totalsize:
:return:
"""
if not self.__show_download_progress:
return
readsofar = blocknum * blocksize
if totalsize > 0:
s = "\r%s / %s" % (size(readsofar), size(totalsize))
sys.stdout.write(s)
if readsofar >= totalsize: # near the end
sys.stderr.write("\r")
else: # total size is unknown
sys.stdout.write("\rread %s" % (size(readsofar)))
def __download(self, path):
"""
Download and save a file locally.
:param url: Where to download from
:return: File path name of the downloaded file
"""
local_filename = urllib.parse.quote_plus(path)
local_filepath = os.path.join(self.__local_download_dir_warc, local_filename)
if os.path.isfile(local_filepath) and self.__reuse_previously_downloaded_files:
self.__logger.info("found local file %s, not downloading again due to configuration", local_filepath)
return local_filepath
else:
# cleanup
try:
os.remove(local_filepath)
except OSError:
pass
# download
if self.__s3_client:
with open(local_filepath, 'wb') as file_obj:
self.__s3_client.download_fileobj(self.__cc_bucket, path, file_obj)
return local_filepath
else:
url = self.__cc_base_url + path
self.__logger.info('downloading %s (local: %s)', url, local_filepath)
urllib.request.urlretrieve(url, local_filepath, reporthook=self.__on_download_progress_update)
self.__logger.info('download completed, local file: %s', local_filepath)
return local_filepath
def _from_warc(self, record):
return NewsPlease.from_warc(record, decode_errors="replace" if self.__ignore_unicode_errors else "strict", fetch_images=self.__fetch_images)
def __process_warc_gz_file(self, path_name):
"""
Iterates all transactions in one WARC file and for each transaction tries to extract an article object.
Afterwards, each article is checked against the filter criteria and if all are passed, the function
on_valid_article_extracted is invoked with the article object.
:param path_name:
:return:
"""
counter_article_total = 0
counter_article_passed = 0
counter_article_discarded = 0
counter_article_error = 0
start_time = time.time()
with open(path_name, 'rb') as stream:
for record in ArchiveIterator(stream):
try:
if record.rec_type == 'response':
counter_article_total += 1
# if the article passes filter tests, we notify the user
try:
filter_pass, article = self.filter_record(record)
except (UnicodeDecodeError, EmptyResponseError):
filter_pass = False
if filter_pass:
try:
if not article:
article = self._from_warc(record)
except (UnicodeDecodeError, EmptyResponseError):
filter_pass = False
if filter_pass:
counter_article_passed += 1
self.__logger.info('article pass (%s; %s; %s)', article.source_domain, article.date_publish,
article.title)
self.__callback_on_article_extracted(article)
else:
counter_article_discarded += 1
if article:
self.__logger.info('article discard (%s; %s; %s)', article.source_domain,
article.date_publish,
article.title)
else:
self.__logger.info('article discard (%s)',
record.rec_headers.get_header('WARC-Target-URI'))
if counter_article_total % 10 == 0:
elapsed_secs = time.time() - start_time
secs_per_article = elapsed_secs / counter_article_total
self.__logger.info('statistics')
self.__logger.info('pass = %i, discard = %i, error = %i, total = %i',
counter_article_passed,
counter_article_discarded, counter_article_error, counter_article_total)
self.__logger.info('extraction from current WARC file started %s; %f s/article',
human(start_time), secs_per_article)
except:
if self.__continue_after_error:
self.__logger.error('Unexpected error: %s (%s)', *sys.exc_info()[0:2])
self.__logger.error(sys.exc_info()[2], exc_info=True)
counter_article_error += 1
pass
else:
raise
# cleanup
if self.__delete_warc_after_extraction:
os.remove(path_name)
self.__register_fully_extracted_warc_file(self.__warc_path)
self.__callback_on_warc_completed(self.__warc_path, counter_article_passed, counter_article_discarded,
counter_article_error, counter_article_total)
def __run(self):
"""
Main execution method, which consists of: get an up-to-date list of WARC files, and for each of them: download
and extract articles. Each article is checked against a filter. Finally, for each valid article the method
on_valid_article_extracted will be invoked after the extraction of the article has completed.
:return:
"""
self.__setup()
local_path_name = self.__download(self.__warc_path)
self.__process_warc_gz_file(local_path_name)
def extract_from_commoncrawl(self, warc_path, callback_on_article_extracted,
callback_on_warc_completed=None,
valid_hosts=None,
start_date=None, end_date=None,
strict_date=True, reuse_previously_downloaded_files=True, local_download_dir_warc=None,
continue_after_error=True, ignore_unicode_errors=False,
show_download_progress=False, log_level=logging.ERROR, delete_warc_after_extraction=True,
log_pathname_fully_extracted_warcs=None, fetch_images=False):
"""
Crawl and extract articles form the news crawl provided by commoncrawl.org. For each article that was extracted
successfully the callback function callback_on_article_extracted is invoked where the first parameter is the
article object.
:param log_pathname_fully_extracted_warcs:
:param delete_warc_after_extraction:
:param warc_path:
:param callback_on_article_extracted:
:param callback_on_warc_completed:
:param valid_hosts:
:param start_date:
:param end_date:
:param strict_date:
:param reuse_previously_downloaded_files:
:param local_download_dir_warc:
:param continue_after_error:
:param show_download_progress:
:param log_level:
:return:
"""
self.__warc_path = warc_path
self.__filter_valid_hosts = valid_hosts
self.__filter_start_date = start_date
self.__filter_end_date = end_date
self.__filter_strict_date = strict_date
if local_download_dir_warc:
self.__local_download_dir_warc = local_download_dir_warc
self.__reuse_previously_downloaded_files = reuse_previously_downloaded_files
self.__continue_after_error = continue_after_error
self.__ignore_unicode_errors = ignore_unicode_errors
self.__fetch_images = fetch_images
self.__callback_on_article_extracted = callback_on_article_extracted
self.__callback_on_warc_completed = callback_on_warc_completed
self.__show_download_progress = show_download_progress
self.__log_level = log_level
self.__delete_warc_after_extraction = delete_warc_after_extraction
self.__log_pathname_fully_extracted_warcs = log_pathname_fully_extracted_warcs
self.__s3_client = None
try:
s3_client = boto3.client('s3')
# Verify access to commoncrawl bucket
s3_client.head_bucket(Bucket=self.__cc_bucket)
self.__s3_client = s3_client
except (botocore.exceptions.ClientError, botocore.exceptions.NoCredentialsError):
self.__logger.info('Failed to read %s bucket, using monthly WARC file listings', self.__cc_bucket)
self.__run()
|
# import sys
# input = sys.stdin.readline
#A1 ~ Aiまでの和 O(logN)
#A1 ~ Aiまでの和 O(logN)
# def BIT_query(BIT,idx):
# res_sum = 0
# if idx == 0:
# return 0
# while idx > 0:
# res_sum += BIT[idx]
# idx -= idx&(-idx)
# return res_sum
# #Ai += x O(logN)
# def BIT_update(BIT,idx,x,n):
# while idx <= n:
# BIT[idx] += x
# idx += idx&(-idx)
# return
def main():
N, Q = map( int, input().split())
C = list( map( int, input().split()))
LRI = []
for i in range(Q):
l, r = map( int, input().split())
LRI.append((r,l,i))
LRI.sort(key=lambda x:x[0])
lastAppend = [-1]*(N+1)
BIT = [0]*(N+1)
#A1 ~ Aiまでの和 O(logN)
def BIT_query(idx):
res_sum = 0
if idx == 0:
return 0
while idx > 0:
res_sum += BIT[idx]
idx -= idx&(-idx)
return res_sum
#Ai += x O(logN)
def BIT_update(idx,x):
while idx <= N:
BIT[idx] += x
idx += idx&(-idx)
return
ANS = [0]*Q
now = 1
for r, l, i in LRI:
while now <= r:
c = C[now-1]
if lastAppend[c] == -1:
BIT_update(now, 1)
else:
BIT_update( now, 1)
BIT_update(lastAppend[c],-1)
lastAppend[c] = now
now += 1
ANS[i] = BIT_query(r) - BIT_query(l-1)
for ans in ANS:
print(ans)
if __name__ == '__main__':
main()
|
import random
n = 50
print n
xs = []
for ix in range(n):
xs.append(random.randrange(10))
print ' '.join(map(str, xs))
print n
for ix in range(n):
x = random.randrange(1, n + 1)
y = random.randrange(1, n + 1)
print min(x, y), max(x, y)
|
# 8-queens Problem
def make_board() :
d={}
for col in range(0,8) :
for row in range(0, 8) :
d[col, row] = 0
return d
#print(d)
def show_board(d) :
for row in range(0,8) :
print(d[(0,row)],d[(1,row)],d[(2,row)],d[(3,row)],d[(4,row)],d[(5,row)],d[(6,row)],d[(7,row)] )
def test_placement(a, p) :
# Borrowed straight from Prof. Raphan's slides
dif= tuple (x-y for x, y in zip (a[0], p[1]))
print(dif)
y, x = dif #The column and row components of the diff tuple are extracted
print (x)
print (y)
#This checks if placement of queen is safe
if abs(x)==abs(y) or x==0:
print ("the placement is unsafe")
else:
print ("safe placement")
dif= tuple (x-y for x, y in zip (a[0], p[0]))
print(dif)
y, x = dif #The column and row components of the diff tuple are extracted
print (x)
print (y)
#This checks if placement of queen is safe
if abs(x)==abs(y) or x==0:
print ("the placement is unsafe")
else:
print ("safe placement")
return True
def safe_placement(a, p) :
# Borrowed straight from Prof. Raphan's slides
dif= tuple (x-y for x, y in zip (a, p))
#print(dif)
y, x = dif #The column and row components of the diff tuple are extracted
#print (x)
#print (y)
#This checks if placement of queen is safe
if abs(x)==abs(y) or x==0:
#print ("the placement is unsafe")
return False
else:
#print ("safe placement")
return True
def placin_agent() :
print("Hey o")
#def main() :
board = make_board()
show_board(board)
board[(0,0)] = 1
a = [(0,0)] # a will hold list of placed queens
print("\nFirst placement is set above but it may be changed")
print("to any position in column 1 to generate a different solution.")
show_board(board)
print()
didnt_work = []
back_track = []
# We can assume one queen per column as part of problem design
col = 1
row = 0
exit = False
while not exit :
print(len(a))
while len(a) != 8 :
safe = True # will hold results of tests for all placed queens
# Test this potential placement against all previously placed queens
for q in a :
if not safe_placement(q, (col,row)) :
safe = False
# if safe against all queens already placed
if safe :
# place queen, show board, increment search position to next column
a.append((col,row))
board[(col,row)] = 1
show_board(board)
print(" Queen is placed! ", (col,row), "\n")
col = col + 1
row = 0
# if search reaches last row in column without finding safe placement
elif row == 7 :
print ("Drat! no safe position in col ", col, " must backtrack")
didnt_work = a[:] # save didnt_work up to impossible row
# HOW DO WE KNOW IF WE'VE BACKTRACKED YET
#print ("Must devise test against previous backtracking.")
# If no backtracking has occurred yet
if back_track == [] :
#print("This behavior is for first backtrack only. (Or it happens once.)")
back_track.append(col-1) # record column that failed in back_track list
print (" Didnt_work is ", didnt_work)
print (" Back_track is ", back_track)
x, y = a.pop() # remove last successful queen placement (which was in col -1 )
# and get its position in x, y
board[(x,y)] = 0 # update board
# IF spaces remain to test in previous column
if y < 7 :
print("Trying remaining rows in last column.")
col = x
row = y + 1 # begin testing again at next row from now rejected safe queen placement
# previous column has been completely tested
print("Must devise logic for backtracking when last column is row 7 first try.")
print(" ... or do I?")
# Else case where backtracking must have occurred already
else :
print(" This behavior is used for every backtrack after first.")
print(" It looks at column of last saved back_track, and backtracks to the one before.")
print(" This time back_track is ", back_track[-1])
x, y = a.pop() # remove last successful queen placement
board[(x,y)] = 0 # update board
# Make certain to backtrack to before last back_track
# or before last column if we had completely tested it
while x >= back_track[-1] or y == 7 :
x, y = a.pop() # remove one more successful queen placement (which was in row - )
board[(x,y)] = 0 # update board
# IF spaces remain to test in column
# if y < 7 : (DONT NEED THIS IT MUST NOT BE 7)
col = x
row = y + 1 # begin testing again at next row from rejected safe queen placement
print(" So, let's begin again at ", col, row)
print("\n")
# increment row after safe test has failed and not yet row == 7
else : row = row + 1
exit = True
# if back_track instruction exists
# (which means made it to row 7 with no good placement)
# if back_track :
# col = back_track.pop()
# x,y = didnt_work.pop()
# row = y+1
# else :
# row = row + 1
# col = col + 1
print("One solution is ", a)
print("\nahem.")
#main()
# a = [k for k,v in board.items() if v == 1]
# print("List 'a' of queens is", a)
|
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier
models_list = ['CatBoostClassifier', 'LGBMClassifier', "Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA"]
MODELS_STR_TO_OBJECT = {'CatBoostClassifier': CatBoostClassifier(verbose=False),
'LGBMClassifier': LGBMClassifier(verbose=-1),
'Nearest Neighbors': KNeighborsClassifier(3),
'Linear SVM': SVC(kernel="linear", C=0.025),
'RBF SVM': SVC(gamma=2, C=1),
'Gaussian Process': GaussianProcessClassifier(1.0 * RBF(1.0)),
'Decision Tree': DecisionTreeClassifier(max_depth=5),
'Random Forest': RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
'Neural Net': MLPClassifier(alpha=1, max_iter=1000),
'AdaBoost': AdaBoostClassifier(),
'Naive Bayes': GaussianNB(),
'QDA': QuadraticDiscriminantAnalysis()}
|
import numpy as np
import cv2
cap=cv2.VideoCapture('video.mp4')
fgbg=cv2.createBackgroundSubtractorMOG2()
while True:
ret,frame=cap.read()
fgmask=fgbg.apply(frame)
cv2.imshow('fgmask',fgmask)
cv2.imshow('orjinal',frame)
k=cv2.waitKey(25) &0xff
if k==27:
break
cap.release()
cv2.destroyAllWindows() |
from django.db import models
from django_extmodels.manager import ExtManager
from django_extmodels.options import ExtOptions
from django_extmodels.settings import ext_settings
META_CLASS_NAME = ext_settings['META_NAME']
META_ATTR_NAME = ext_settings['META_ATTR']
class ExtModelBase(models.base.ModelBase):
def __new__(cls, name, bases, attrs):
super_new = super().__new__
# If this isn't a subclass of Model, don't bother with initilization
parents = [b for b in bases if isinstance(b, ExtModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Preserve ExtOptions inherited by most recent parent
base_meta = None
for base in bases:
base_meta = getattr(base, META_ATTR_NAME, None)
if base_meta: break # noqa
# Contribute to attrs before super constructor
attrs[META_ATTR_NAME] = ExtOptions(base_meta)
# If new_class declared `class ExtMeta` this will override the parent options
meta = attrs.get(META_CLASS_NAME, None)
if meta:
attrs.get(META_ATTR_NAME)._merge_options(meta)
return super_new(cls, name, bases, attrs)
class ExtModel(models.Model, metaclass=ExtModelBase):
objects = ExtManager()
class Meta:
abstract = True
|
from collections.abc import Callable
from inspect import FullArgSpec
from typing import Any, TypeVar
from typing_extensions import ParamSpec
_T = TypeVar('_T')
_P = ParamSpec('_P')
def arginfo(callable: Callable[..., Any]) -> FullArgSpec: ...
def is_cached(callable: Callable[..., Any]) -> bool: ...
def get_callable_info(callable: Callable[_P, _T]) -> tuple[Callable[_P, _T], object, bool]: ...
def fake_empty_init() -> None: ...
class Dummy: ...
WRAPPER_DESCRIPTOR: object
def get_class_init(class_: type[Any]) -> Callable[..., _T]: ...
|
import os, sys, time
# force run on CPU?
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
caffe_root = os.path.dirname(os.path.abspath(__file__))+'/../../'
sys.path.insert(0, caffe_root+'python')
#os.environ['GLOG_minloglevel'] = '2'
import numpy as np
np.set_printoptions(linewidth=200)
import cv2
import caffe
if not os.path.isdir(caffe_root+'examples/images/CatLMDB'):
import subprocess
with open(caffe_root+'examples/images/cat.txt','w') as listfile:
listfile.write('cat.jpg 0')
subprocess.check_output([caffe_root+'build/tools/convert_imageset',
'--encoded=1',
'--encode_type=png',
caffe_root+'examples/images/',
caffe_root+'examples/images/cat.txt',
caffe_root+'examples/images/CatLMDB'])
caffe.set_mode_gpu()
nnet = caffe.Net(caffe_root+'examples/BILATERAL_TEST/Test.prototxt', caffe.TEST)
def displayable(caffeimage, idx=0):
return np.round(np.transpose(caffeimage[idx,:,:,:],(1,2,0))).astype(np.uint8)
for ii in range(10000):
beftime = time.time()
nnet.forward()
afttime = time.time()
caffeim = nnet.blobs['data_rgb'].data
filt_bilat = nnet.blobs['filt_bilat'].data
print("forward time: "+str(afttime - beftime)+" seconds")
for mbidx in range(caffeim.shape[0]):
cv2.imshow(str(mbidx)+'caffeim', displayable(caffeim,mbidx))
cv2.imshow(str(mbidx)+'filt_bilat', displayable(filt_bilat,mbidx))
cv2.waitKey(0)
|
import numpy as np
import adios2 as ad
import io
from PIL import Image
from ast import literal_eval as make_tuple
#Given a writable opened adios file (returned by adios2.open), image data of the form used by PIL (or corresponding numpy array), and variable name, write the image to the adios file.
def write_image_hl (ad_file, image, var_name, end_step=False):
image = np.array(image)
if not image.shape[2] == 3:
raise TypeError("Expecting RGB Data, size of third dimension must be 3") #todo: deal with other formats
ad_file.write("%s/__plxr_schema_type"%var_name, "__plxr:image-rgb-8")
ad_file.write("%s/__plxr_data"%var_name, image, image.shape, (0,0,0), image.shape, end_step=end_step)
def write_png_image_hl (ad_file, image, var_name, end_step=False):
image = np.array(image)
if not image.shape[2] == 3:
raise TypeError("Expecting RGB Data, size of third dimension must be 3") #todo: deal with other formats
image = Image.fromarray(image)
#Write image to IO buffer as png
buf = io.BytesIO()
image.save(buf, format="PNG")
contents = np.frombuffer(buf.getvalue(), dtype=np.dtype('b'))
#print (contents)
#Write buffered data to adios
ad_file.write("%s/__plxr_schema_type"%var_name, "__plxr:image-png")
ad_file.write("%s/__plxr_data"%var_name, contents, contents.shape, (0,), contents.shape, end_step=end_step)
def write_image_from_matplotlib_hl (ad_file, fig, var_name, end_step=False):
fig.canvas.draw()
img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
write_image(ad_file, img, var_name, end_step=end_step)
def write_png_image_from_matplotlib_hl (ad_file, fig, var_name, end_step=False):
fig.canvas.draw()
img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
write_png_image_hl(ad_file, img, var_name, end_step=end_step)
def get_available_image_steps_hl (ad_file, var_name):
ad_vars = ad_file.available_variables()
return int(ad_vars['%s/__plxr_data'%var_name]['AvailableStepsCount'])
def read_image_hl (ad_step, var_name):
#Check image type
ad_vars = ad_step.available_variables()
schema_type = ad_step.read_string(var_name + '/__plxr_schema_type')[0]
if "__plxr:image-rgb-8" in schema_type:
# Should allow steps to contain images of different sizes (as for image-png)
# for now, assume steps are the same size
shape = make_tuple(ad_vars['%s/__plxr_data'%var_name]['Shape'])
#img_data = ad_step.read("%s/__plxr_data"%var_name, start=(0,0,0), count=shape, step_start=step, step_count=1)[0] #Returns a list of one step
img_data = ad_step.read("%s/__plxr_data"%var_name, start=(0,0,0), count=shape)[0] #Returns a list of one step
return Image.fromarray(img_data)
elif "__plxr:image-png" in schema_type:
shapes = []
#Loop through metadata and capture shape data
#Can improve this later by capturing these once rather than at every read
ad_vars = ad_step.available_variables()
shape = [int(ad_vars['%s/__plxr_data'%var_name]['Shape'])]
print ("Shape is {}".format(shape))
#print ("Reading step {}, shapes[step] is {}".format(step, shapes[step]))
img_data = ad_step.read("%s/__plxr_data"%var_name, start=[0], count=shape)
#img_data = ad_step.read("%s/__plxr_data"%var_name, start=[0], count=shape)[0] #Returns a list of one step
#print (img_data)
buf = io.BytesIO(img_data)
return Image.open(buf)
else:
print ("Unsupported schema type in read_image")
print (schema_type)
print ("__plxr:image-png")
return None
def get_image_names_hl (ad_file):
rv = []
ad_vars = ad_file.available_variables()
for ad_var in ad_vars.keys():
# "Value" no longer provided for strings, use read_string instead...
#if ad_var.split('/')[-1].startswith('__plxr_schema_type') and 'image' in ad_vars[ad_var]['Value']:
if ad_var.split('/')[-1].startswith('__plxr_schema_type') and 'image' in ad_file.read_string(ad_var, step_start=0, step_count=1)[0]:
rv.append(ad_var[0:ad_var.rfind('/')])
return rv
def get_raw_var_names_hl (ad_file):
rv = []
ad_vars = ad_file.available_variables()
for ad_var in ad_vars.keys():
if not ad_var.split('/')[-1].startswith('__plxr'):
rv.append(ad_var)
return rv
def write_image_ll (io, engine, image, var_name):
pass
def write_png_image_ll (ad_io, ad_engine, image, var_name):
image = np.array(image)
if not image.shape[2] == 3:
raise TypeError("Expecting RGB Data, size of third dimension must be 3") #todo: deal with other formats
image = Image.fromarray(image)
#Write image to IO buffer as png
buf = io.BytesIO()
image.save(buf, format="PNG")
contents = np.frombuffer(buf.getvalue(), dtype=np.dtype('b'))
#print (contents)
schema_type_str = "__plxr:image-png"
start = (0,)
#Write buffered data to adios
var_schema_type = ad_io.DefineVariable("%s/__plxr_schema_type"%var_name)
#, schema_type_str.shape, (0,), schema_type_str.shape, ad.ConstantDims)
var_data = ad_io.DefineVariable("%s/__plxr_data"%var_name, contents, contents.shape, start, contents.shape, ad.ConstantDims)
ad_engine.Put(var_schema_type, schema_type_str)
ad_engine.Put(var_data, contents)
#ad_file.write("%s/__plxr_schema_type"%var_name, "__plxr:image-png")
#ad_file.write("%s/__plxr_data"%var_name, contents, contents.shape, (0,), contents.shape, end_step=end_step)
def write_image_from_matplotlib_ll (io, engine, fig, var_name):
pass
def write_png_image_from_matplotlib_ll (io, engine, fig, var_name):
pass
def get_available_image_steps_ll (io, engine, var_name):
pass
def read_image_ll (ad_io, ad_engine, var_name):
#Check image type
var_type = ad_io.InquireVariable("{}/__plxr_schema_type".format(var_name) )
#print (dir(var_type))
print (ad_io.AvailableVariables() )
schema_type = ' '
ad_engine.Get(var_type, schema_type)
ad_engine.PerformGets()
print ("schema_type is ({})".format(schema_type) )
# ad_vars = ad_step.available_variables()
# schema_type = ad_step.read_string(var_name + '/__plxr_schema_type')[0]
if "__plxr:image-rgb-8" in schema_type:
# Should allow steps to contain images of different sizes (as for image-png)
# for now, assume steps are the same size
shape = make_tuple(ad_vars['%s/__plxr_data'%var_name]['Shape'])
#img_data = ad_step.read("%s/__plxr_data"%var_name, start=(0,0,0), count=shape, step_start=step, step_count=1)[0] #Returns a list of one step
img_data = ad_step.read("%s/__plxr_data"%var_name, start=(0,0,0), count=shape)[0] #Returns a list of one step
return Image.fromarray(img_data)
elif "__plxr:image-png" in schema_type:
var_data = ad_io.InquireVariable("%s/__plxr_data"%var_name)
#print(dir(var_data))
data_size = var_data.Shape()
data = np.zeros(data_size, dtype=np.int8)
print (data)
ad_engine.Get(var_data, data)
ad_engine.PerformGets()
buf = io.BytesIO(data)
return Image.open(buf)
# shapes = []
# #Loop through metadata and capture shape data
# #Can improve this later by capturing these once rather than at every read
# ad_vars = ad_step.available_variables()
# shape = [int(ad_vars['%s/__plxr_data'%var_name]['Shape'])]
#
# print ("Shape is {}".format(shape))
#
# #print ("Reading step {}, shapes[step] is {}".format(step, shapes[step]))
# img_data = ad_step.read("%s/__plxr_data"%var_name, start=[0], count=shape)
# #img_data = ad_step.read("%s/__plxr_data"%var_name, start=[0], count=shape)[0] #Returns a list of one step
# #print (img_data)
# buf = io.BytesIO(img_data)
# return Image.open(buf)
else:
print ("Unsupported schema type in read_image")
print (schema_type)
print ("__plxr:image-png")
return None
# var_inTemperature = ioRead.InquireVariable("temperature2D")
# if(var_inTemperature is False):
# raise ValueError('var_inTemperature is False')
#
# assert var_inTemperature is not None
# readOffset = [2, 2]
# readSize = [4, 4]
#
# var_inTemperature.SetSelection([readOffset, readSize])
# inTemperatures = np.zeros(readSize, dtype=np.int)
# ibpStream.Get(var_inTemperature, inTemperatures, adios2.Mode.Sync)
def get_image_names_ll (io, engine):
pass
def get_raw_var_names_ll (io, engine):
pass
|
from question1 import clearConsole,bcolors
from stackclass import stack
from stackclasswithLL import stack as LLstack
isStackWithLL= False ## True : Implementation with linked list stack | False : ##Implementation with array stack
def run(isstackWithLL):
global isStackWithLL
isStackWithLL = isstackWithLL
clearConsole()
print((bcolors.RED + " algebra equation parantez adder " + bcolors.ENDC).center(110,"~"))
print("\n")
print("enter algebra equation:".center(100," "))
eq = input("".center(40," "))
eqPasvandi = mianvandiToPasvandi(eq) ## ex: 25*65 => 25 65 *
eqParantesi = pasvandiToparantes(eqPasvandi) ## ex 25 65 * => (25*65)
print((bcolors.YELLOW+"↓ parantes form ↓"+ bcolors.ENDC).center(100," "))
print((bcolors.YELLOW + str(eqParantesi)+ bcolors.ENDC).center(100," "))
try:
ans1 = calparantez(eqParantesi) ##calculate answer of parantezi equation
ans2 = calcPasvandi(eqPasvandi) ##calculate answer of pasvandy equation
except:
ans1 = None
ans2 = None
if ans1 != None and ans1==ans2 :
print("\n")
print((bcolors.CYAN + f"ans with parantes:{ans1} | ans with olaviat:{ans2} =>" + bcolors.ENDC+bcolors.GREEN + "True" + bcolors.ENDC).center(120," "))
#print((bcolors.GREEN + "True" + bcolors.ENDC).center(100," "))
if ans1 == None or ans2 == None:
print((bcolors.RED + "cant calculate answer to check corrcty of operation but it is True :)" + bcolors.ENDC).center(100," "))
print("press enter to continue ...".center(100," "))
input("".center(50," "))
def calcPasvandi(eq): ## calculate answer of pasvandy equation
global isStackWithLL
funcs = [["/","*"],["+","-"]]
if isStackWithLL==False: ##Implementation with array stack
mystack = stack()
else: ## Implementation with linked list stack
mystack = LLstack()
neweq = ""
reset =True ## uses for supporting number with more than 1 digite
for i,char in enumerate(eq):
if char == funcs[0][1] or char == funcs[0][0] or char == funcs[1][1] or char == funcs[1][0] : ## if input is " + , - , * , / "
num2 = float(mystack.pop())
num1 = float(mystack.pop())
ans = cal(char,num1,num2)
mystack.push(str(ans))
else:
if reset == True: ##pushing first digite of number
mystack.push(char)
reset = False
else:
if eq[i]==" ": ## split numbers with space
reset = True
else: ##pushing other digite of number if exist
temp2 = mystack.pop()
temp2 = temp2 + str(char)
mystack.push(temp2)
while mystack.topIndex>-1: ##pop answer form stack
neweq = mystack.pop() + neweq
return neweq
def calparantez(eq): ##calculate answer of parantezi equation
while eq.find("(") != -1:
funcs = [["/","*"],["+","-"]]
func=0
befpar = 0
aftpar = len(eq)-1
for i in range(len(eq)-1,-1,-1): ##find index of inner "(" and ")"
if eq[i]=="(":
befpar = i
for j in range(i,len(eq)):
if eq[j] == ")":
aftpar = j
break
break
for i in range(befpar+1,aftpar):
if eq[i] == funcs[0][1] or eq[i] == funcs[0][0] or eq[i] == funcs[1][1] or eq[i] == funcs[1][0] : ## find " + , - , * , / " betwwen paranteses
char = i
break
ans = cal(eq[char],eq[befpar+1:char],eq[char+1:aftpar])
eq = eq[0:befpar]+ str(ans) + eq[aftpar+1:]
return eq
def cal(func,num1,num2):
if func=="*":
ans = float(num1)*float(num2)
elif func=="/":
ans = float(num1)/float(num2)
elif func=="-":
ans = float(num1)-float(num2)
else :
ans = float(num1)+float(num2)
return ans
def pasvandiToparantes(eq):
global isStackWithLL
funcs = [["/","*"],["+","-"]]
if isStackWithLL==False: ##Implementation with array stack
mystack = stack()
else: ## Implementation with linked list stack
mystack = LLstack()
neweq = ""
reset =True ## uses for supporting number with more than 1 digite
for i,char in enumerate(eq):
if char == funcs[0][1] or char == funcs[0][0] or char == funcs[1][1] or char == funcs[1][0] :
temp = mystack.pop() + ")"
temp = char + temp
temp = "(" + mystack.pop() + temp
mystack.push(temp)
else:
if reset == True: ##pushing first digite of number
mystack.push(char)
reset = False
else:
if eq[i]==" ": ## split numbers with space
reset = True
else: ##pushing other digite of number if exist
temp2 = mystack.pop()
temp2 = temp2 + str(char)
mystack.push(temp2)
while mystack.topIndex>-1: ##pop answer form stack
neweq = mystack.pop() + neweq
return neweq
def mianvandiToPasvandi(eq):
global isStackWithLL
funcs = [["/","*"],["+","-"]]
if isStackWithLL==False: ##Implementation with array stack
mystack = stack()
else: ## Implementation with linked list stack
mystack = LLstack()
neweq = ""
for i,char in enumerate(eq):
if char == funcs[0][1] or char == funcs[0][0]: # '*' , '/'
beforefunc = mystack.peek()
if beforefunc == False: ## if stack is empty
mystack.push(char)
elif beforefunc == funcs[1][1] or beforefunc == funcs[1][0]: # beforefunc== "-" or "+"
mystack.push(char)
else:
while mystack.topIndex > -1 and (mystack.peek()==funcs[0][1] or mystack.peek()==funcs[0][0]): # beforefunc== "*" or "/"
neweq = neweq + str(mystack.pop())
mystack.push(char)
elif char == funcs[1][1] or char == funcs[1][0]:# '+' , '-'
while mystack.topIndex > -1 :##if stack isn`t empty
neweq = neweq + str(mystack.pop())
mystack.push(char)
else: # numbers
neweq = neweq + char
if (i+1<len(eq) and ( eq[i+1]== funcs[0][1] or eq[i+1]== funcs[0][0] or eq[i+1]== funcs[1][1] or eq[i+1]== funcs[1][0])) or i+1==len(eq):
neweq = neweq + " "
while(mystack.topIndex!=-1):
neweq = neweq + str(mystack.pop())
return neweq |
import os
from configparser import ConfigParser
""" This file simply loads and store all information needed for the bot. """
# Reading Config File
config = ConfigParser()
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
CONFIG_PATH = os.path.join(PROJECT_ROOT, 'credentials.ini')
config.read(CONFIG_PATH)
# BOT'S INFO ========================================
OWNER_ID = config.get('Admin', 'OWNER_ID')
CLIENT_ID = config.get('Discord Bot Credentials', 'CLIENT_ID')
CLIENT_TOKEN = config.get('Discord Bot Credentials', 'CLIENT_TOKEN')
CLIENT_SECRET = config.get('Discord Bot Credentials', 'CLIENT_SECRET')
CLIENT_PREFIX = config.get('Discord Bot Credentials', 'CLIENT_PREFIX')
CLIENT_USERNAME = config.get('Discord Bot Credentials', 'CLIENT_USERNAME')
# ===================================================
# GOOGLE'S YOUTUBE API CREDENTIALS / CONFIGS ========
YOUTUBE_API_KEY = config.get('Youtube API Credentials', 'YOUTUBE_API_KEY')
YOUTUBE_CLIENT_ID = config.get('Youtube API Credentials', 'YOUTUBE_CLIENT_ID')
YOUTUBE_CLIENT_SECRET = config.get('Youtube API Credentials', 'YOUTUBE_CLIENT_SECRET')
YOUTUBE_DEFAULT_SEARCH_RETURNS = config.getint('Youtube Search Config', 'YOUTUBE_DEFAULT_SEARCH_RETURNS')
YOUTUBE_MAX_SEARCH_RETURNS = config.getint('Youtube Search Config', 'YOUTUBE_MAX_SEARCH_RETURNS')
# ===================================================
# TWITCH's NEW API CREDENTIALS ======================
TWITCH_CLIENT_ID = config.get('Twitch API Credentials', 'TWITCH_CLIENT_ID')
TWITCH_CLIENT_SECRET = config.get('Twitch API Credentials', 'TWITCH_CLIENT_SECRET')
# ===================================================
# YOUTUBE_DL CONFIGS ================================
YOUTUBE_DL_CONFIGS = {
'nocheckcertificate': 'True',
'default_search': 'auto',
'quiet': True,
'format': 'bestaudio/best',
'noplaylist': True,
'prefer-ffmpeg': True
}
MUSIC_VOLUME_DEFAULT = config.getfloat('Music Config', 'MUSIC_VOLUME_DEFAULT')
# ===================================================
# SOUNDCLOUD CREDENTIALS / CONFIGS ==================
# SoundCloud stop giving out API Keys, so empty for now
# ===================================================
# SMUG IMAGES INITIALIZATION ========================
SMUG_LIST = []
for path, subdirs, files in os.walk(r'images/smug'):
for filename in files:
SMUG_LIST.append(filename)
SMUG_LIST_SIZE = len(SMUG_LIST)
# ===================================================
# LINK IMAGES INITIALIZATION ========================
LINK_LIST = []
for f_path, f_sub_dirs, f_files in os.walk(r'images/link'):
for f_filename in f_files:
LINK_LIST.append(f_filename)
LINK_LIST_SIZE = len(LINK_LIST)
# RIP IMAGES INITIALIZATION =========================
RIP_LIST = []
for r_path, r_sub_dirs, r_files in os.walk(r'images/rip'):
for r_filename in r_files:
RIP_LIST.append(r_filename)
RIP_LIST_SIZE = len(RIP_LIST)
|
import sys
import os
TMP = "/tmp/demacro"
os.system("mkdir -p %s" %TMP)
arg = sys.argv[1]
if ".tex" in arg:
file_root_name = arg[0:-4]
elif arg[-1] == ".":
file_root_name = arg[0:-1]
else:
file_root_name = arg
current_dir = os.getcwd()
preamble = ""
doc_started = False
with open("%s.tex" %file_root_name) as f, open("%s/source.tex" %TMP, "w") as g:
print >>g, f.readline().strip() # print \documentclass
for line in f:
if line.strip() == r'\begin{document}':
doc_started = True
print >>g, r'\usepackage{demacro-private}'
print >>g, r'\def\blue{\bgroup\color{blue}}'
print >>g, r'\def\endblue{\egroup}'
print >>g, r'\def\red{\bgroup\color{red}}'
print >>g, r'\def\endred{\egroup}'
print >>g, r'\def\green{\bgroup\color{green}}'
print >>g, r'\def\endgreen{\egroup}'
print >>g, r''
print >>g, line.strip() # begin document
elif not doc_started:
preamble += line
if not r"newcommand" in line:
print >>g, line.strip()
elif doc_started:
print >>g, line.strip()
os.system("cp -f *.tex " + TMP)
os.system("cp -f ~/dotfiles/py-scripts/demacro-private.sty " + TMP)
with open("%s/demacro-private.sty" %TMP, "a") as f:
print >>f, "\n" + preamble
os.chdir(TMP)
os.system("de-macro source.tex")
os.system("python2 ~/dotfiles/py-scripts/latex2wp.py source-clean.tex")
os.chdir(current_dir)
os.system("cp -f %s/source-clean.html %s.html" %(TMP, file_root_name))
os.system("cp -f %s/source-clean.tex %s.clean" %(TMP, file_root_name))
|
'''
server.py
---------
v-1.0.0
It is a basic web server built to be used for development and debugging,
it is not intended for production use!
'''
import socket
# Handling the request so that we can serve the response
def handle_request(request):
"""Handles HTTP requests"""
# I will add the code later on ...
return ""
# Server host and port number
SERVER_HOST = "0.0.0.0"
SERVER_PORT = 8080
# sock_stream is used because this is a TCP connection
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# binding the server socket with host and port
server_socket.bind((SERVER_HOST, SERVER_PORT))
# Starts listening to the serer only accepts 1 request per time
server_meta_info = (
"* WARNING: Do not use this server on production level.\n"
"* Debugger: ... (I will add it later on)\n"
"* Running on http://0.0.0.0:8080/ (press CTRL+C to quit)\n"
)
print(server_meta_info)
server_socket.listen(1)
while True:
# Wait for client connections
client_connection, client_address = server_socket.accept()
# Get the client's request
request = client_connection.recv(1024).decode()
print("---\n\nREQUEST:\n\n" + request)
# Get the headers
filename = request.split()[1]
if (filename == '/'):
filename = "index.html"
# Get the content of index.html
fin = open('./' + filename)
content = fin.read()
fin.close()
# Send HTTP response
response = 'HTTP/1.0 200 OK\n\n' + content
client_connection.send(response.encode())
client_connection.close()
# Close socket (? i am unsure about this)
# server_socket.close()
|
liste = [5,8,7,3,2,4,6,1]
for i in range(len(liste)):
val = liste[i]
j = i
while j > 0 and liste[j - 1] > val:
liste[j] = liste[j-1]
j -= 1
liste[j] = val
print(liste) |
from django.apps import AppConfig
class LugdunumConfig(AppConfig):
name = 'Lugdunum'
|
class Solution(object):
def gameOfLife(self, board):
if len(board) == 0 or len(board[0]) == 0: return
m, n = len(board), len(board[0])
for i in range(m):
for j in range(n):
nnb = self.findLiveNeighbor(board, i, j)
if board[i][j] == 0 or board[i][j] == 2:
if nnb == 3: board[i][j] = 2
else:
if nnb < 2 or nnb > 3: board[i][j] = 3
for i in range(m):
for j in range(n):
if board[i][j] == 2: board[i][j] = 1
if board[i][j] == 3: board[i][j] = 0
def findLiveNeighbor(self, board, i, j):
count = 0
for a, b in [(i - 1, j - 1), (i - 1, j), (i - 1, j + 1), (i + 1, j - 1), (i + 1, j), (i + 1, j + 1),
(i, j - 1), (i, j + 1)]:
if 0 <= a < len(board) and 0 <= b < len(board[0]) and board[a][b] % 2 == 1:
count += 1
return count |
from itertools import izip
from copy import deepcopy
import socket
import re
def is_bool(obj):
return isinstance(obj, bool) and not isinstance(obj, int)
def dict_to_dictlist(d):
return [{k: v} for k, v in d.items()]
def dictlist_to_dict(l):
res = {}
for d in l:
if len(d) != 1:
raise ValueError("Not a dictlist!")
for k, v in d.items():
res[k] = v
return res
def consistent_uuid(key):
h = str(__salt__['grains.get_or_set_hash'](key, length=32, chars='abcdef0123456789'))
return '-'.join((h[0:8], h[8:12], h[12:16], h[16:20], h[20:32]))
NET_REMAP = {'ip': 'ip_address'}
def remap(k):
if k in NET_REMAP:
return NET_REMAP[k]
return k
NET_PARAMS = ['name', 'bridge', 'gw', 'ip', 'type', 'ip6', 'hwaddr', 'tag', 'model']
KEEP_ANYWAY = ['name', 'ip']
def filter_netparams(param_dictlist):
return [{remap(k): v} for d in param_dictlist for k, v in d.items() if k not in NET_PARAMS or k in KEEP_ANYWAY]
def mknet(name='eth0', bridge='vmbr0', gw=None, ip=None, type='veth', model='', hwaddr='', **kwargs):
if ip and '/' not in ip:
ip += '/24'
if gw:
kwargs['gw'] = gw
if ip:
kwargs['ip'] = ip
if hwaddr:
kwargs['hwaddr'] = hwaddr
kwargs.update({
'name': name,
'bridge': bridge,
'type': type,
})
# Prefix the model if it's present, for VMs
return ','.join(([model + (('=' + hwaddr) if hwaddr else '')] if model else []) + ['='.join((k,str(v))) for k, v in kwargs.items() if v and k in NET_PARAMS])
def is_list(obj):
return isinstance(obj, list)
def is_dict(obj):
return isinstance(obj, dict)
def is_ip(obj):
return is_str(obj) and re.match('^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$', obj)
def is_str(obj):
return isinstance(obj, str)
def bool_lc(obj):
return "true" if obj else "false"
def is_int(obj):
return isinstance(obj, int)
def grouped(iterable, n):
return izip(*[iter(iterable)]*n)
def pairwise(l):
return grouped(l, 2)
def exclude_keys(dic, *keys):
return {k: v for k, v in dic.iteritems() if k not in keys}
def copy(dic):
return deepcopy(dic)
def is_listdict(d):
return isinstance(d, list) and all((isinstance(n, dict) and len(n) == 1 for n in d))
def resolve(hostname):
return socket.gethostbyname(hostname)
def merged(base, top):
res = copy(base)
merge(res, top)
return res
def merged_pillars(base, top, base_default=None):
if base_default is None:
base_default = {}
return merged(__salt__['pillar.get'](base, base_default), __salt__['pillar.get'](top, {}))
def merge_listdict(a, b):
"merges b into a"
a_dict = {}
b_dict = {}
for elm in a:
a_dict.update(elm)
for elm in b:
b_dict.update(elm)
res_dict = merge(a_dict, b_dict)
return [{k: v} for k, v in res_dict.items()]
def merge(a, b, path=None):
"merges b into a"
if path is None: path = []
if is_listdict(a) and is_listdict(b):
return merge_listdict(a, b)
else:
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
a[key] = b[key]
else:
a[key] = b[key]
return a
|
import xlwt
import xlrd
import names
from random import randint
from firesdk.user_classes.users import XLSUser
def generate_users(number_of_users):
first_names = []
last_names = []
for _ in range(number_of_users):
first_names.append(names.get_first_name())
last_names.append(names.get_last_name())
return [last_names, first_names]
def users_to_xls(last_names, first_names):
wb = xlwt.Workbook()
ws = wb.add_sheet('Users')
row = 0
for last, first in zip(last_names, first_names):
departments = random_departments()
departments_str = ','.join(departments)
ws.write(row, 0, last) # last name
ws.write(row, 1, first) # first name
ws.write(row, 2, 'fakeEmail@gmail.com' + str(row)) # email
ws.write(row, 3, 'Store Standards Associate' + str(randint(0, 2))) # position
ws.write(row, 4, departments_str)
ws.write(row, 5, randint(0, 1)) # is part time - 0:False, 1:True
ws.write(row, 6, 0) # account_type - 0:Basic, 1: Manager, 2: Master
row += 1
wb.save('sample_users.xls')
def random_departments():
possible_departments = ['Store Standards', 'GHS', 'Customer Service']
number_of_departments = randint(1, 3)
used_departments = set([])
for _ in range(number_of_departments):
index = randint(0, 2)
used_departments.add(possible_departments[index])
return list(used_departments)
def create_data():
user_count = 100
name_list = generate_users(user_count)
last_name_list = name_list[0]
first_name_list = name_list[1]
users_to_xls(last_name_list, first_name_list)
###############################################################
def parse_json_from_excel(data):
users = []
for user in data:
last_name = data[user]['lastName']
first_name = data[user]['firstName']
email = user
position = data[user]['position']
departments = data[user]['departments']
is_pt = data[user]['isPt']
account_type = data[user]['accountType']
current_user = XLSUser(last_name, first_name, email, position, departments, is_pt, account_type)
users.append(current_user)
return users
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 5 21:30:56 2016
@author: sautt
"""
import matplotlib.pyplot as plt
import numpy as np
load = []
disp = []
with open("load.txt", "r") as fol:
for line in fol:
load.append(abs(float(line)))
fol.close()
with open("displacement.txt", "r") as fod:
for line in fod:
disp.append(abs(float(line)))
fod.close()
#custom_edit
E = 210000000000
A =0.01
EA =E*A
x =2
y =1
L =np.sqrt(x**2+y**2)
L3 = L*L*L
v =0.00
dv =0.01
loadAN =[]
dispAN =[]
for i in range(220):
loadAN.append((EA/(2*L3))*(v*v -2*y*v)*(v-y))
dispAN.append(v)
v = v + dv
plt.plot(disp,load,color = '#FFA500', linewidth=3.0, label = 'TRUSS3D2N',antialiased=True)
plt.plot(dispAN,loadAN,'--', color = '#696969', label = 'analytical',antialiased=True)
plt.legend()
plt.xlabel('displacement [m]')
plt.ylabel('load [N]')
plt.title('load-displacement')
plt.grid()
#plt.savefig('disp_v_truss.pdf')
plt.show()
|
import consus
c = consus.Client()
t = c.begin_transaction()
assert t.put('the table', 'the key', 'the value')
t.commit()
|
import base64
import io
import csv
import json
import os
import shutil
import glob
import cv2
import numpy as np
from datetime import datetime
from PIL import Image
from flask import render_template, url_for, request, redirect, Blueprint, session
from libs.utils import (
app_dir,
touch,
)
from collections import defaultdict
tool_name = 'tod_annotation_progress'
local_bp = Blueprint(tool_name, __name__)
input_dir = os.path.join(app_dir, "output", 'damage_type_annotation')
@local_bp.route('/', methods=['GET'])
def main():
total_stats = defaultdict(int)
for fol in glob.glob(os.path.join(input_dir, "*")):
for f in glob.glob("{}/*.lock".format(fol)):
user = f.split("/")[-1].replace('.lock', '')
total_stats[user] += 1
output_list = sorted(
[
{"name": k, "num": v} for k, v in total_stats.items()
],
key=lambda x: x["num"],
reverse=True
)
return render_template(
'tod_annotation_progress.html',
annotation_stats=output_list
) |
#!/home/zain101/Documents/Django_Stuff/polls/polls_venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'PyJWT==1.4.0','console_scripts','jwt'
__requires__ = 'PyJWT==1.4.0'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('PyJWT==1.4.0', 'console_scripts', 'jwt')()
)
|
from onegov.core import Framework
from onegov.quill import QuillField
from pytest import fixture
from pytest_localserver.http import WSGIServer
from tests.shared.utils import create_app
from wtforms import Form
from onegov.quill import QuillApp
@fixture(scope='function')
def quill_app(request):
class QuillTestApp(Framework, QuillApp):
pass
@QuillTestApp.path(path='')
class Root:
pass
class QuillForm(Form):
x = QuillField(tags=['strong', 'ol'])
y = QuillField(tags=['h3'])
@QuillTestApp.form(model=Root, form=QuillForm)
def handle_form(self, request, form):
request.include('quill')
return f"""
<!doctype html>
<html>
<body>
<form>
{form.x()}
{form.y()}
</form>
<script>
window.addEventListener("load", function() {{
loaded = true;
}});
</script>
</body>
</html>
"""
result = str(form.x()) + str(form.y())
return result
app = create_app(QuillTestApp, request, use_maildir=False)
yield app
app.session_manager.dispose()
@fixture(scope='function')
def wsgi_server(request, quill_app):
quill_app.print_exceptions = True
server = WSGIServer(application=quill_app)
server.start()
yield server
server.stop()
@fixture(scope='function')
def browser(request, browser, wsgi_server):
browser.baseurl = wsgi_server.url
browser.wsgi_server = wsgi_server
yield browser
def test_init(browser):
# FIXME: Getting rid of this error might require updating
# to a newer version of quill
browser.visit('/', expected_errors=[{
'level': 'WARNING', 'rgxp': 'Consider using MutationObserver'
}])
assert 'quill.bundle.js' in browser.html
browser.wait_for_js_variable('loaded')
toolbars = browser.find_by_css('.ql-toolbar')
assert len(toolbars) == 2
assert toolbars[0].find_by_css('button.ql-bold')
assert toolbars[0].find_by_css('button.ql-list')
assert not toolbars[0].find_by_css('button.ql-header')
assert not toolbars[1].find_by_css('button.ql-bold')
assert not toolbars[1].find_by_css('button.ql-list')
assert toolbars[1].find_by_css('button.ql-header')
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^login/', views.user_login),
url(r'^logout/', views.logout),
url(r'^register/', views.register),
url(r'^order/', views.order),
url(r'^order1/', views.order1),
] |
# Generated by Django 2.2.11 on 2020-06-07 20:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pokryvala', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='pokryvala',
options={'verbose_name': 'Покрывала', 'verbose_name_plural': 'Покрывала'},
),
]
|
"""狄克斯特拉算法的Python实现
这里用了换钢琴的例子
"""
# 首先构建这个图
def generate_graph():
# 图
graph = {}
# 记录边的权重
graph['score'] = {}
graph['score']['poster'] = 0 # 乐谱换海报
graph['score']['disc'] = 5 # 乐谱换唱片
graph['disc'] = {}
graph['disc']['guitar'] = 15 # 唱片换吉他
graph['disc']['drum'] = 20 # 唱片换架子鼓
graph['poster'] = {}
graph['poster']['guitar'] = 30 # 海报换吉他
graph['poster']['drum'] = 35 # 海报换架子鼓
graph['guitar'] = {}
graph['guitar']['piano'] = 20 # 吉他换钢琴
graph['drum'] = {}
graph['drum']['piano'] = 10 # 吉他换钢琴
graph['piano'] = {}
return graph
def dijkstars(node, costs, parents):
"""狄克斯特拉算法
"""
for sub_node in graph[node]:
cost = costs[node] + graph[node][sub_node]
if sub_node not in costs or cost < costs[sub_node]:
costs[sub_node] = cost
parents[sub_node] = node
for node in graph[node]:
dijkstars(node, costs, parents)
def get_path(parents, end, path=''):
if end in parents:
if path == '':
path = parents[end] + "-->" + end + path
else:
path = parents[end] + "-->" + path
return get_path(parents, parents[end], path)
else:
return path
if __name__ == '__main__':
infinity = float("inf") # 无穷大
graph = generate_graph()
parents = {} # 父节点
# 初始化开销
costs = {k: infinity if k != 'score' else 0 for k in graph.keys()}
dijkstars('score', costs, parents)
print(parents)
path = get_path(parents, 'piano')
print(path)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# informe_final.py
#%% ejercicio 7.7
import fileparse
def leer_camion(nombre_archivo):
'''Computa el precio total del camion (cajones * precio) de un archivo'''
with open(nombre_archivo) as f:
camion = fileparse.parse_csv(f, select = ['nombre', 'cajones', 'precio'], types = [str, int, float], has_headers = True)
return camion
def leer_precios(nombre_archivo):
with open(nombre_archivo) as f:
precios = fileparse.parse_csv(f, types = [str, float], has_headers = False)
return precios
def hacer_informe(camion, precios):
lista = []
for lote in camion:
cambio = precios[lote['nombre']] - lote['precio']
t = (lote['nombre'], lote['cajones'], lote['precio'], cambio)
lista.append(t)
return lista
def imprimir_informe(informe):
print(' Nombre Cajones Precio Cambio')
print('---------- ---------- ---------- ----------')
for nombre, cajones, precio, cambio in informe:
precio = f'${precio}'
print(f'{nombre:>10s} {cajones:>10d} {precio:>10s} {cambio:>10.2f}')
def informe_camion(nombre_archivo_camion, nombre_archivo_precios):
camion = leer_camion(nombre_archivo_camion)
lista_precios = leer_precios(nombre_archivo_precios)
precios = dict(lista_precios)
informe = hacer_informe(camion, precios)
imprimir_informe(informe)
#%%
def f_principal(argumentos):
informe_camion(argumentos[1], argumentos[2])
if __name__ == '__main__':
import sys
f_principal(sys.argv)
|
# que https://leetcode.com/problems/find-all-numbers-disappeared-in-an-array/solution/
# solution 1
def findDisappearedNumbers(arr):
n = len(arr)
arr = list(set(arr))
for i in range(1, n+1):
if(i in arr):
arr.remove(i)
else:
arr.append(i)
return arr
print(findDisappearedNumbers([4, 3, 2, 7, 8, 2, 3, 1]))
# solution 2
def findDisappearedNumbers(arr):
n = len(arr)
set_of_nums = set(range(1, n+1))
arr = list(set(arr))
for i in arr:
if(i in set_of_nums):
set_of_nums.remove(i)
return list(set_of_nums)
print(findDisappearedNumbers([4, 3, 2, 7, 8, 2, 3, 1]))
|
#!/usr/local/bin/python3
#-*-coding:utf-8 -*-
############### FONCTION ###############
#le but de ce script est d'effectuer des opérations sur des courbes; elle permet de soustraire
#à un .dat, un autre .dat pour s'affranchir des certain phénomènes parasites de fond
############### VERSION 1 ###############
#version 1.0 (2016/02/05): création du script
############### AMELIORATIONS ###############
#intégration de ce code dans quick_plot en indiquant quelle courbe opère sur toutes les autres
# on reprend la même base que quick_plot, mais on va interpoler les données pour manipuler plus simplement
from numpy import *
from scipy import *
from scipy.interpolate import interp1d
from pylab import *
import matplotlib.pyplot as plt
import glob, inspect, os
# NE MODIFIER QUE LES VARIABLES ENTRES LES BARRES #
#######################################################
################## PEUT ETRE MODIFIE ##################
##################VVVVVVVVVVVVVVVVVVV##################
extension='.dat' #extension des fichiers à considérer pour tracer les courbes (inclure le .)
print_graph='non' #'oui'/'non' pour générer ou non une copie .png
delimiteur='' #rien par default
inter='linear' #type de l'interpolation sur les données ('linear' ou 'cubic'). 'cubic' est beaucoup plus long
tracer=2 #tracer la courbe indiquée
background=1 #courbe qui va agir sur tracer
action='-' #quelle action 'background' opère sur les 'tracer' (+,-,/,*)
#informations relatives aux fichiers de données
data_set_up=5 #nombre de premières lignes ignorées dans le fichier de données .dat
data_set_low=0 #nombre de dernières lignes ignorées dans le fichier de données .dat
data_x_axis=1 #colonne de données correspondant aux abscisses
data_y_axis=4 #colonne de données correspondant aux ordonnées
col_freq=6 #colonne où est indiquée la fréquence des mesures
col_amp=8 #colonne où est indiquée l'amplitude des mesures
unit_amp='V' #unité de l'amplitude du signal en entrée
#informations relatives aux tracés
titref="default title: x-axis --> column " +str(data_x_axis) + " y-axis --> column " +str(data_y_axis) + " of " + extension + " files in folder"
nom_axe_x='Temperature (K)'
nom_axe_y='Magnitude (V)'
grille='oui'
#informations relative au traitement des données
liste_offset=['non',(3,2),(0,1)] #si offset[0]='oui', 1er chiffre du tuple --> offset X, 2e --> offset Y. 1er tuple pour 'tracer' et 2e tuple pour 'background'
# le nombre de tuples ne peux pas être supérieur on nombre de .dat dans le dossier
auto_offset=('non', 0) #pour mettre un offset à toutes les courbes de telle sorte qu'elles commencent toutes
# au point d'ordonnée indiqué. Cette option est maîtresse sur liste_offset (y seulement).
###/ \#################################################/ \### ||
##/ | \############### NE PAS MODIFIER ###############/ | \## ||
#/__°__\#############VVVVVVVVVVVVVVVVVVV#############/__°__\# _||_
# \ /
# \/
script_dir=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))+'/' # to get the path to the directory containing the script and the data
liste_fichiers=glob.glob(script_dir+'*'+extension) #list of all .dat files in script_dir
nombre_fichiers=len(liste_fichiers) #number of files in directory
######## AFFICHAGE INFO ########
print('--> plot '+str(tracer)+' '+str(action)+' '+str(background)+' on column 4')
######## DEFINITION FONCTIONS TRAITEMENT ########
# fonction importation des données des axes à tracer:
def import_axe_data(nom_fichier, delimiteur, data_set_up, data_set_low, data_x_axis, data_y_axis):
# Import the data from a text file and save as a 2D matrix. Adapter le 'skip_header' au .dat
print("---> fichier", extension, "en cours de traitement :", nom_fichier)
return genfromtxt(nom_fichier, delimiter=delimiteur, skip_header=data_set_up, skip_footer=data_set_low, usecols=(data_x_axis, data_y_axis))#stock que les données utilisées
# fonction importation des données relatives aux conditions expérimentales:
def import_exp_data(nom_fichier, delimiteur, data_set_up, data_set_low, col_freq, col_amp):
matrice_donnees_exp=zeros((2,1)) #stock la fréquence (ligne0) et l'amplitude (ligne1)
#on utilise genfromtxt à 2 endroits différents du fichier données usecols(col_freq-1 et col_amp-1) car ces colonnes peuvent être de longueur différentes
matrice_donnees_exp[0] = genfromtxt(nom_fichier, delimiter=delimiteur, skip_header=data_set_up, skip_footer=data_set_low, usecols=(col_freq-1))[0]#stock la fréquence
matrice_donnees_exp[1] = genfromtxt(nom_fichier, delimiter=delimiteur, skip_header=data_set_up, skip_footer=data_set_low, usecols=(col_amp-1))[0]#stock l'amplitude
return matrice_donnees_exp
######## REMPLISSAGE DES MATRICES DE DONNEES ########
donnees_tracer=import_axe_data(liste_fichiers[tracer-1], delimiteur, data_set_up, data_set_low, data_x_axis-1, data_y_axis-1)
donnees_background=import_axe_data(liste_fichiers[background-1], delimiteur, data_set_up, data_set_low, data_x_axis-1, data_y_axis-1)
######## INTERVALLES DE TRACAGE X ########
#intersection des intervalles X de 'background' et de 'tracer'
intervalle_x=array([max(min(donnees_tracer[:,0]),min(donnees_background[:,0])),min(max(donnees_tracer[:,0]),max(donnees_background[:,0]))])
#matrice contenant tous les points x pour les fonctions interpolées
x=linspace(min(intervalle_x), max(intervalle_x), num=int((max(intervalle_x)-min(intervalle_x))/0.005), endpoint=True)
######## INTERPOLATION ########
#interpolation de 'background'
fonction_background=interp1d(donnees_background[:,0], donnees_background[:,1], kind=inter)
#interpolation de 'tracer'
fonction_tracer=interp1d(donnees_tracer[:,0], donnees_tracer[:,1], kind=inter)
######## MATRICE OFFSET ########
#fabrication et initialisation matrice des offsets
offset=array(zeros((3,3)),str) # stocke les offset de 'background' et 'tracer'
offset[0,0]='id courbe' # titre 1ere colonne
offset[0,1]='offset X' # titre 2e colonne
offset[0,2]='offset Y' # titre 3e colonne
offset[1,0]=str(tracer)+' (modified)'
offset[2,0]=str(background)+' (background)'
if 'oui' in liste_offset or 'Oui' in liste_offset or 'OUI' in liste_offset or 'O' in liste_offset or 'o' in liste_offset:
#on remplit les offset sur X
offset[1,1]=liste_offset[1][0]
offset[2,1]=liste_offset[2][0]
#auto_offset[0]='oui'
if 'oui' in auto_offset or 'Oui' in auto_offset or 'OUI' in auto_offset or 'O' in auto_offset or 'o' in auto_offset:
offset[1,2]=auto_offset[1]-donnees_tracer[0,1]
offset[2,2]=auto_offset[1]-donnees_background[0,1]
#on peut aussi choisir de remplacer donnees_tracer[0,1] (ou donnees_background[0,1]) par le Y en Xmax. Dans ce cas on utilisera:
#float(donnees_tracer[where(donnees_tracer==max(donnees_tracer[:,0]))[0],1])
#donnees_[where(donnees_==max(donnees_[:,0]))[0],1] --> donne l'ordonnée à l'abscisse maximale (where est une fonction de numpy)
#cas où auto_offset[0]!='oui'
else:
# on change les offset en y seulement dans la matrice offset
offset[1,2]= liste_offset[1][1]
offset[2,2]= liste_offset[2][1]
elif 'oui' in auto_offset or 'Oui' in auto_offset or 'OUI' in auto_offset or 'O' in auto_offset or 'o' in auto_offset:
offset[1,2]=auto_offset[1]-donnees_tracer[0,1]
offset[2,2]=auto_offset[1]-donnees_background[0,1]
######## DONNEES ET TRACAGE ########
#données expérimentales
donnees_exp_tracer=import_exp_data(liste_fichiers[tracer-1], delimiteur, data_set_up, data_set_low, col_freq, col_amp)
donnees_exp_background=import_exp_data(liste_fichiers[background-1], delimiteur, data_set_up, data_set_low, col_freq, col_amp)
#obtention nom fichier sans chemin absolu (trop long)
decoupage_tracer=liste_fichiers[tracer-1].split('/') # decoupage est une liste d'éléments de chemin vers le .dat (chaque partie encadrée par / est un element de la liste)
decoupage_background=liste_fichiers[background-1].split('/')
#label des courbes pour 'tracer' et 'background'
if float(offset[1,1])!=0 or float(offset[1,2])!=0:
label_tracer='graph '+str(1)+'(col '+str(data_y_axis)+'): '+decoupage_tracer[len(decoupage_tracer)-1]+'\n ---> f='+str(donnees_exp_tracer[0,0])+'Hz | Vo='+str(donnees_exp_tracer[1,0])+unit_amp+' | offset X='+offset[1,1]+' | offset Y='+offset[1,2]
else:
label_tracer='graph '+str(1)+'(col '+str(data_y_axis)+'): '+decoupage_tracer[len(decoupage_tracer)-1]+'\n ---> f='+str(donnees_exp_tracer[0,0])+'Hz | Vo='+str(donnees_exp_tracer[1,0])+unit_amp
if float(offset[2,1])!=0 or float(offset[2,2])!=0:
label_background='graph '+str(1)+'(col '+str(data_y_axis)+'): '+decoupage_background[len(decoupage_background)-1]+'\n ---> f='+str(donnees_exp_background[0,0])+'Hz | Vo='+str(donnees_exp_background[1,0])+unit_amp+' | offset X='+offset[2,1]+' | offset Y='+offset[2,2]
else:
label_background='graph '+str(1)+'(col '+str(data_y_axis)+'): '+decoupage_background[len(decoupage_background)-1]+'\n ---> f='+str(donnees_exp_background[0,0])+'Hz | Vo='+str(donnees_exp_background[1,0])+unit_amp
#données pour le traçage de 'tracer'
donnees_tracer_off=donnees_tracer
donnees_tracer_off[:,0]+=float(offset[1,1])
donnees_tracer_off[:,1]+=float(offset[1,2])
#données pour le traçage de 'background'
donnees_background_off=donnees_background
donnees_background_off[:,0]+=float(offset[2,1])
donnees_background_off[:,1]+=float(offset[2,2])
#données 'tracer' avec l'action 'action' de 'background' mais sur l'intervalle x seulement et 'offseté' à Y=0 en Xmax
if action=='+':
offset_add=fonction_tracer(max(x))+fonction_background(max(x))
y=fonction_tracer(x)+fonction_background(x)-offset_add
label_modifie='graph '+str(3)+'(col '+str(data_y_axis)+'): '+' graph1 + graph2'
elif action=='-':
offset_add=fonction_tracer(max(x))-fonction_background(max(x))
y=fonction_tracer(x)-fonction_background(x)-offset_add
label_modifie='graph '+str(3)+'(col '+str(data_y_axis)+'): '+' graph1 - graph2'
elif action=='/':
offset_add=fonction_tracer(max(x))/fonction_background(max(x))
y=fonction_tracer(x)/fonction_background(x)-offset_add
label_modifie='graph '+str(3)+'(col '+str(data_y_axis)+'): '+' graph1 / graph2'
elif action=='*':
offset_add=fonction_tracer(max(x))*fonction_background(max(x))
y=fonction_tracer(x)*fonction_background(x)-offset_add
label_modifie='graph '+str(3)+'(col '+str(data_y_axis)+'): '+' graph1 * graph2'
else:
offset_add=fonction_tracer(max(x))
y=fonction_tracer(x)-offset_add
label_modifie='graph '+str(3)+'(col '+str(data_y_axis)+'): '+' graph1'
plt.plot(donnees_tracer_off[:,0],donnees_tracer_off[:,1],label=label_tracer) #on trace la courbe de 'tracer'
print("\t ----traitement accompli sans erreur (courbe", tracer, "|colonne", data_y_axis, "tracée)----")
plt.plot(donnees_background_off[:,0],donnees_background_off[:,1],label=label_background) #on trace la courbe de 'background'
print("\t ----traitement accompli sans erreur (courbe", background, "|colonne", data_y_axis, "tracée)----")
plt.plot(x,y,label=label_modifie) #on trace la courbe de 'tracer' 'action' 'background'
print("\t ----traitement accompli sans erreur (courbe ", tracer, action, background, "|colonne", data_y_axis, "tracée)----")
######## GESTION DES ECHELLES DES AXES ########
#2 matrices de stockage des échelles (une pour x et l'autre pour y) avec min en col 0 et max en col 1
xscale=array([min(min(donnees_tracer_off[:,0]),min(donnees_background_off[:,0])),max(max(donnees_tracer_off[:,0]),max(donnees_background_off[:,0]))])
yscale=array([min(min(donnees_tracer_off[:,1]),min(donnees_background_off[:,1]),min(y)),max(max(donnees_tracer_off[:,1]),max(donnees_background_off[:,1]),max(y))])
print('min(donnees_tracer_off[:,0]),min(donnees_background_off[:,0])')
print(min(donnees_tracer_off[:,0]),min(donnees_background_off[:,0]))
print('max(donnees_tracer_off[:,0]),max(donnees_background_off[:,0])')
print(max(donnees_tracer_off[:,0]),max(donnees_background_off[:,0]))
print('min(donnees_tracer_off[:,1]),min(donnees_background_off[:,1]),min(y)')
print(min(donnees_tracer_off[:,1]),min(donnees_background_off[:,1]),min(y))
print('max(donnees_tracer_off[:,1]),max(donnees_background_off[:,1]),max(y)')
print(max(donnees_tracer_off[:,1]),max(donnees_background_off[:,1]),max(y))
#affiche la matrice des offset dans le terminal pour vérification par l'utilisateur
print('------------------------------------')
if 'oui' in liste_offset or 'Oui' in liste_offset or 'OUI' in liste_offset or 'O' in liste_offset or 'o' in liste_offset:
if 'oui' in auto_offset or 'Oui' in auto_offset or 'OUI' in auto_offset or 'O' in auto_offset or 'o' in auto_offset:
print('---> le mode liste des offset est ACTIF et les offset sont:\n', liste_offset)
print('\n---> le mode auto offset est ACTIF et par conséquent les offset sur Y precedents ne sont par considérés.')
print('\n---> matrice offset:\n', offset)
titre = titref+'\n offset: ON | auto_offset: ON'
else:
print('---> le mode liste des offset est ACTIF et les offsets sont:\n', liste_offset)
print('\n---> le mode auto offset est INACTIF.')
print('\n---> matrice offset:\n', offset)
titre = titref+'\n offset: ON | auto_offset: OFF'
else:
if 'oui' in auto_offset or 'Oui' in auto_offset or 'OUI' in auto_offset or 'O' in auto_offset or 'o' in auto_offset:
print('---> le mode liste des offset est INACTIF. Le mode auto offset est ACTIF.')
print('\n---> matrice offset:\n', offset)
titre = titref+'\n offset: OFF | auto_offset: ON'
else:
print('---> le mode liste des offset est INACTIF. Le mode auto offset est INACTIF.')
print('\n---> matrice offset:\n', offset)
titre = titref+'\n offset: OFF | auto_offset: OFF'
print('------------------------------------')
#on récupère les min et max pour x
xscale_low=min(xscale)
xscale_up=max(xscale)
#on récupère les min et max pour y
yscale_low=min(yscale)
yscale_up=max(yscale)
print('xscale_low,xscale_up')
print(xscale_low,xscale_up)
print('yscale_low,yscale_up')
print(yscale_low,yscale_up)
xdelta=(xscale_up-xscale_low)/100
ydelta=(yscale_up-yscale_low)/100
plt.legend(fontsize=8, loc='best')
plt.title(titre)
plt.axis([xscale_low-xdelta, xscale_up+xdelta, yscale_low-ydelta, yscale_up+ydelta])
plt.xlabel(nom_axe_x)
plt.ylabel(nom_axe_y)
if grille=='oui' or grille=='OUI' or grille == 'Oui' or grille=='o' or grille=='O':
plt.grid(True)
else:
plt.grid(False)
if print_graph=='oui' or print_graph=='OUI' or print_graph == 'Oui' or print_graph=='o' or print_graph=='O':
#pour permettre de créer un .png par combinaison de courbes tracées et axes représentés
titreplt="courbe"+tracer+" et "+background+ " et courbe"+tracer+action+background+ "| colonne"+data_y_axis
plt.savefig(script_dir+titreplt)
plt.show() |
from rest_framework import serializers
from accounts.serializers import UserSerializer
class SubmissionSerializer(serializers.Serializer):
id = serializers.IntegerField()
grade = serializers.IntegerField()
repo = serializers.CharField()
user_id = serializers.IntegerField()
activity_id = serializers.IntegerField()
class ActivitySerializer(serializers.Serializer):
id = serializers.IntegerField()
title = serializers.CharField()
points = serializers.IntegerField()
submission_set = SubmissionSerializer(many=True) |
'''
merge all file in the folder data/name match/json/
in a single json file in data/name match/
filter and create two files one for home_team an one for away_team
with the separete events
'''
import os
import json
events = []
events_home_team = []
events_away_team = []
count = 0
count_home = 0
count_away = 0
file_json = 'static/game/match/match.json'
data = json.load(open(file_json))
match_code = data["home_team"]["team_id"] + " " + data["away_team"]["team_id"]
home_team = data["home_team"]["team_id"]
away_team = data["away_team"]["team_id"]
name_folder = "./Data/" + match_code + "/json"
#read all single ivent in all files of events, save in a array
for file in os.listdir(name_folder):
if file.endswith('.json'):
with open(name_folder+"/"+file,'r') as fi:
dict = json.load(fi)
print("number of events in " + file + " : " + str(len(dict)))
for event in dict:
if event != {}:
event["event_id"] =count
count += 1
events.append(event)
if event["team_id"] == home_team :
count_home += 1
events_home_team.append(event)
else:
events_away_team.append(event)
count_away += 1
else:
print("sorry, this file isn't json")
print("Number of events in Complete File : " + str(count))
print("Number of events " + home_team + " File : " + str(count_home))
print("Number of events " + away_team + " File : " + str(count_away))
# write the array with events in a json file in Data\Name event
name_file_json = "./Data/"+match_code+"/"+match_code+".json"
with open(name_file_json, "w") as outfile:
json.dump(events, outfile)
# write the array with events of Home team in a json file in Data\Name event
name_home_file_json = "./Data/"+match_code+"/"+home_team+".json"
with open(name_home_file_json, "w") as outfile:
json.dump(events_home_team, outfile)
# write the array with events of Away team in a json file in Data\Name event
name_away_file_json = "./Data/"+match_code+"/"+away_team+".json"
with open(name_away_file_json, "w") as outfile:
json.dump(events_away_team, outfile)
|
# -*- coding:utf-8 -*-
"""
求出1~13的整数中1出现的次数,并算出100~1300的整数中1
出现的次数?为此他特别数了一下1~13中包含1的数字有1、
10、11、12、13因此共出现6次,但是对于后面问题他就没
辙了。ACMer希望你们帮帮他,并把问题更加普遍化,可以很
快的求出任意非负整数区间中1出现的次数(从1 到 n 中1
出现的次数)。
"""
class Solution:
def NumberOf1Between1AndN_Solution(self, n):
# write code here
countSum = 0
for i in range(1, n+1):
temp = str(i)
countSum += temp.count('1')
return countSum
s = Solution()
print(s.NumberOf1Between1AndN_Solution(13)) |
import copy
import base64
import random
import string
import xml.etree.ElementTree as ET
import os
import json
from uuid import uuid4
import zipfile, os
from collections import OrderedDict
import shutil
def get_zip_file(input_path, result):
"""
对目录进行深度优先遍历
:param input_path:
:param result:
:return:
"""
files = os.listdir(input_path)
for file in files:
if os.path.isdir(input_path + '/' + file):
get_zip_file(input_path + '/' + file, result)
else:
result.append(input_path + '/' + file)
def zip_file_path(input_path, output_path, output_name):
"""
压缩文件
:param input_path: 压缩的文件夹路径
:param output_path: 解压(输出)的路径
:param output_name: 压缩包名称
:return:
"""
f = zipfile.ZipFile(os.path.join(output_path, output_name), 'w', zipfile.ZIP_DEFLATED)
filelists = []
get_zip_file(input_path, filelists)
for file in filelists:
f.write(file)
# 调用了close方法才会保证完成压缩
f.close()
return output_path + r"/" + output_name
class FPSParser(object):
def __init__(self, fps_path):
self.fps_path = fps_path
@property
def _root(self):
root = ET.ElementTree(file=self.fps_path).getroot()
version = root.attrib.get("version", "No Version")
if version not in ["1.1", "1.2"]:
raise ValueError("Unsupported version '" + version + "'")
return root
def parse(self):
ret = []
for node in self._root:
if node.tag == "item":
ret.append(self._parse_one_problem(node))
return ret
def _parse_one_problem(self, node):
sample_start = True
test_case_start = True
problem = {
"title": "No Title",
"description": "No Description",
"input": "No Input Description",
"output": "No Output Description",
"memory_limit": {"unit": None, "value": None},
"time_limit": {"unit": None, "value": None},
"samples": [],
"images": [],
"append": [],
"template": [],
"prepend": [],
"test_cases": [],
"hint": None,
"source": None,
"spj": None,
"solution": []
}
for item in node:
tag = item.tag
if tag in ["title", "description", "input", "output", "hint", "source"]:
if(tag=='input' or tag=='output'):
if(not item.text):
item.text = "No description"
problem[item.tag] = item.text
elif tag == "time_limit":
unit = item.attrib.get("unit", "s")
if unit not in ["s", "ms"]:
raise ValueError("Invalid time limit unit")
problem["time_limit"]["unit"] = item.attrib.get("unit", "s")
value = int(item.text)
if value <= 0:
raise ValueError("Invalid time limit value")
problem["time_limit"]["value"] = value
elif tag == "memory_limit":
unit = item.attrib.get("unit", "MB")
if unit not in ["MB", "KB", "mb", "kb"]:
raise ValueError("Invalid memory limit unit")
problem["memory_limit"]["unit"] = unit.upper()
value = int(item.text)
if value <= 0:
raise ValueError("Invalid memory limit value")
problem["memory_limit"]["value"] = value
elif tag in ["template", "append", "prepend", "solution"]:
lang = item.attrib.get("language")
if not lang:
raise ValueError("Invalid " + tag + ", language name is missed")
if tag == 'solution':
if lang not in ['Pascal', 'C#']:
if lang == 'Python':
lang = 'Python2'
problem[tag].append({"language": lang, "code": item.text})
elif tag == 'spj':
lang = item.attrib.get("language")
if not lang:
raise ValueError("Invalid spj, language name if missed")
problem["spj"] = {"language": lang, "code": item.text}
elif tag == "img":
problem["images"].append({"src": None, "blob": None})
for child in item:
if child.tag == "src":
problem["images"][-1]["src"] = child.text
elif child.tag == "base64":
problem["images"][-1]["blob"] = base64.b64decode(child.text)
elif tag == "sample_input":
if not sample_start:
raise ValueError("Invalid xml, error 'sample_input' tag order")
problem["samples"].append({"input": " "if not item.text else item.text, "output": None})
sample_start = False
elif tag == "sample_output":
if sample_start:
raise ValueError("Invalid xml, error 'sample_output' tag order")
problem["samples"][-1]["output"] = item.text
sample_start = True
elif tag == "test_input":
if not test_case_start:
raise ValueError("Invalid xml, error 'test_input' tag order")
problem["test_cases"].append({"input": item.text, "output": None})
test_case_start = False
elif tag == "test_output":
if test_case_start:
raise ValueError("Invalid xml, error 'test_output' tag order")
problem["test_cases"][-1]["output"] = item.text
test_case_start = True
return problem
class QDUOJ_OBJ:
data = {}
def __init__(self, problem: dict, save_path="qduoj_data", tag=["算法"]):
self._problem = problem
self.save_path = save_path
self.data = OrderedDict([
('display_id', self._problem['display_id']),
('title', self._problem['title']),
('description', {"format": "html", "value": self._problem['description']}),
('tags', tag),
('input_description', {
"format": "html",
"value": self._problem['input']
}),
('output_description', {
"format": "html",
"value": self._problem['output']
}),
('test_case_score', None),
('hint', {
"format": "html",
"value": ""
}),
('time_limit', 1000),
('memory_limit', 256),
('samples', self._problem['samples']),
('template', {}),
('spj', None),
('rule_type', "ACM"),
('source', "SU Online Judgement http://127.0.0.1"),
('answers', self._problem['solution'] if "solution" in self._problem else [])
])
def save_test_case(self, problem, base_dir, input_preprocessor=None, output_preprocessor=None):
for index, item in enumerate(problem["test_cases"]):
with open(os.path.join(base_dir, str(index + 1) + ".in"), "w", encoding="utf-8") as f:
if input_preprocessor:
input_content = input_preprocessor(item["input"])
else:
input_content = item["input"]
f.write(input_content)
with open(os.path.join(base_dir, str(index + 1) + ".out"), "w", encoding="utf-8") as f:
if output_preprocessor:
output_content = output_preprocessor(item["output"])
else:
output_content = item["output"]
f.write(output_content)
def save_flat_file(self, target_dir):
PATH = ""
for path in os.path.split(target_dir):
PATH = os.path.join(PATH, path)
if not os.path.exists(PATH):
os.mkdir(PATH)
problem_json = json.dumps(self.data, indent=4)
with open(os.path.join(target_dir, 'problem.json'), mode='w', encoding='utf-8') as f:
f.write(problem_json)
test_case_dir = os.path.join(target_dir, 'testcase')
if not os.path.exists(test_case_dir):
os.mkdir(test_case_dir)
self.save_test_case(self._problem, test_case_dir)
print("Saved :" + str(target_dir))
def save_zipfile(self):
global INT_ID
print("Zipped :" + str(INT_ID))
tmp_dir = "1"
self.save_flat_file(tmp_dir)
# shutil.make_archive(os.path.join(self.save_path, str(uuid4())[:8]), 'zip', target_dir)
zip_file_path(tmp_dir, self.save_path, str(uuid4())[:8] + '.zip')
shutil.rmtree(tmp_dir) # 递归删除文件夹
def get_pids_from_fname(fname: str):
back = fname.split('-')[2]
front = back.split('.')[0]
pids = front.split(',')
return pids
if (__name__ == "__main__"):
SKIP = 0
skip = 0
FPS_DATA = 'fps_data'
for tag_str in os.listdir(FPS_DATA):
QDUOJ_FILE_DIR = tag_str
tags = tag_str.split(',')
fatherdir = os.path.join(FPS_DATA, tag_str)
INDEX = 0
for fname in os.listdir(fatherdir):
pids = get_pids_from_fname(fname)
print(tags, pids)
parser = FPSParser(os.path.join(fatherdir, fname))
problems = parser.parse()
for i,p in enumerate(problems):
if(len(p['test_cases'])==0):
print("Warning: %s have no test cases!" % str(pids[i]))
continue
# if(not p['samples'][0]['input']):
# print("Warning :Sample input Empty! %s" % str(pids[i]))
p['display_id'] = str(pids[i])
print("Get problem:" + str(p['display_id']))
qobj = QDUOJ_OBJ(p, tag=tags)
qobj.save_flat_file(os.path.join(QDUOJ_FILE_DIR, str(INDEX+1)))
INDEX+=1
if(INDEX%10==0):
INDEX=0
QDUOJ_FILE_DIR+="_"
# shutil.make_archive(tag_str, 'zip', QDUOJ_FILE_DIR, )
|
"""
Given values for various macronutrients in grams, determine the number of
food points represented.
"""
def calculate_food_points(protein=0.0,
carbs=0.0,
fat=0.0,
fiber=0.0,
alcohol=0.0,
sugar_alcohol=0.0):
points = (
(float(protein) / 10.9375)
+ (float(carbs) / 9.2105)
+ (float(fat) / 3.8889)
+ (float(alcohol) / 3.0147)
- (float(fiber) / 12.5)
- (float(sugar_alcohol) / 23.0263)
)
if points > 0:
return int(round(points))
else:
return 0
"""
Given an age in years, a weight in kilograms and a height in meters,
determine the daily energy expenditure for a man in kCal.
"""
def calculate_male_daily_energy_expenditure(age, weight, height):
return (
864.0
- (9.72 * float(age))
+ 1.12 * (14.2 * float(weight) + 503 * float(height))
)
"""
Given an age in years, a weight in kilograms and a height in meters,
determine the daily energy expenditure for a woman in kCal.
"""
def calculate_female_daily_energy_expenditure(age, weight, height):
return (
387.0
- (7.31 * float(age))
+ 1.14 * (10.9 * float(weight) + 660.7 * float(height))
)
"""
Adjust a daily energy expenditure value to an arbitrary range that is
used in the target points calculation.
"""
def adjust_daily_energy_expenditure(expenditure):
return 0.9 * float(expenditure) + 200.0
def _calculate_target_points(age, weight, height, exp_func):
exp = exp_func(age, weight, height)
adj = adjust_daily_energy_expenditure(exp)
target = int(round(max(adj - 1000.0, 1000.0) / 35.0) - 11.0)
return min(max(target, 26), 71)
"""
Given an age in years, a weight in kilograms and a height in meters,
determine the daily points target for a man.
"""
def calculate_male_target_points(age, weight, height):
return _calculate_target_points(
age,
weight,
height,
calculate_male_daily_energy_expenditure,
)
"""
Given an age in years, a weight in kilograms and a height in meters,
determine the daily points target for a man.
"""
def calculate_female_target_points(age, weight, height):
return _calculate_target_points(
age,
weight,
height,
calculate_female_daily_energy_expenditure,
)
"""
Given a number of calories expended, determine the corresponding
activity points.
"""
def calculate_activity_points(calories=0.0):
return int(round(float(calories) / 70.0))
|
from bs4 import BeautifulSoup
import requests
source = requests.get('https://www.flipkart.com').text
soup = BeautifulSoup(source, 'lxml')
itemName = []
itemDicount = []
for nameofitem in soup.find_all('div', class_='iUmrbN'):
textnameofitem = nameofitem.text
itemName.append(textnameofitem)
for discount in soup.find_all('div', class_='BXlZdc'):
textdiscount = discount.text
itemDicount.append(textdiscount)
for i in range(len(itemName)):
print(itemName[i])
print(itemDicount[i])
print() |
from django.apps import AppConfig
class OnmyojiModelsConfig(AppConfig):
name = 'onmyoji_models'
|
from bibliopixel.animation import BaseMatrixAnim
from bibliopixel import colors
from websocket import create_connection
import threading
import numpy as np
import PIL
from PIL import Image
import cv2
WS_FRAME_WIDTH = 640
WS_FRAME_HEIGHT = 480
WS_FRAME_SIZE = WS_FRAME_WIDTH * WS_FRAME_HEIGHT
def clamp(v, _min, _max):
return max(min(v, _max), _min)
def lerp(n, low, high):
return clamp((n - low) / (high - low), 0.0, 1.0)
MIN_Z = 440.0
MAX_Z = 1100.0
NEAR_Z = 760.0
MID_Z = ((MAX_Z + NEAR_Z) / 2.0)
FAR_Z = MAX_Z
# MIN_Z = 0.0
# MAX_Z = 65000.0
#
# NEAR_Z = 2000.0
# MID_Z = ((MAX_Z + NEAR_Z) / 2.0)
# FAR_Z = 60000.0
near_color = np.array([255, 0, 0]) # np.array(colors.hex2rgb('#e56b00'))
mid_color = np.array([0, 0, 255]) # np.array(colors.hex2rgb('#280072'))
far_color = np.array([0, 255, 0]) # np.array(colors.hex2rgb('#02020c'))
near_color = np.array(colors.hex2rgb('#e56b00'))
mid_color = np.array(colors.hex2rgb('#280072'))
far_color = np.array(colors.hex2rgb('#02020c'))
# far_color = np.array([0, 0, 255])
def z_color(z):
z = float(z)
alpha = 1.0
if z <= MID_Z:
ns = lerp(z, NEAR_Z, MID_Z)
color = (1.0 - ns) * near_color + ns * mid_color
else: # z must be between MID_Z and FAR_Z
fs = lerp(z, MID_Z, FAR_Z)
color = (1.0 - fs) * mid_color + fs * far_color
alpha = 1.0 - lerp(z, MIN_Z, MAX_Z)
if z <= -MIN_Z:
alpha = 0.0
# gl_FragColor = vec4(color, alpha) * texture2D( texture, gl_PointCoord )
return (color * alpha).astype(np.uint8).tolist()
def rebin(a, shape):
sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]
return a.reshape(sh).mean(-1).mean(1)
def thread_lock():
e = threading.Event()
e.lock = e.clear
e.release = e.set
e.is_released = e.is_set
e.release()
return e
class ws_thread(threading.Thread):
def __init__(self, server):
super(ws_thread, self).__init__()
self.setDaemon(True)
self._stop = threading.Event()
self._reading = thread_lock()
self.dt = np.dtype(np.uint16)
self.dt = self.dt.newbyteorder('<')
self._data = [np.zeros(WS_FRAME_SIZE, self.dt),
np.zeros(WS_FRAME_SIZE, self.dt)]
self._buf = False
self.ws = create_connection("ws://{}/".format(server))
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def get_frame(self):
self._reading.lock()
d = np.copy(self._data[0 if self._buf else 1])
self._reading.release()
return d
def run(self):
while not self.stopped():
d = self.ws.recv()
d = np.frombuffer(d, dtype=self.dt)
self._reading.wait()
self._data[1 if self._buf else 0] = d
self._buf = not self._buf
self.ws.close()
class Kimotion(BaseMatrixAnim):
def __init__(self, led, server="localhost:1337", mirror=True, crop=True):
super(Kimotion, self).__init__(led)
self.max_depth = 1200
self.server = server
self.mirror = mirror
self.crop = crop
self.min = np.iinfo(np.uint16).min
self.max = np.iinfo(np.uint16).max
self.fw = WS_FRAME_WIDTH
self.fh = WS_FRAME_HEIGHT
self.frame_aspect = (float(WS_FRAME_WIDTH) / float(WS_FRAME_HEIGHT))
self.aspect = (float(self.width) / float(self.height))
self.resize_box = (self.width, self.height)
self.crop_box = (0, 0, self.width, self.height)
if self.frame_aspect > self.aspect:
self.resize_box[0] = int(self.height * self.frame_aspect)
half = (self.resize_box[0] - self.width) / 2
self.crop_box[0] = half
self.crop_box[2] = self.resize_box[0] - half
elif self.frame_aspect < self.aspect:
self.resize_box[1] = int(self.width / self.aspect)
half = (self.resize_box[1] - self.height) / 2
self.crop_box[1] = half
self.crop_box[3] = self.resize_box[1] - half
self.z_colors = np.array([z_color(z) for z in range(0, self.max_depth + 1)]).tolist()
self._ws_thread = ws_thread(self.server)
self._ws_thread.start()
def _exit(self, type, value, traceback):
self._ws_thread.stop()
def step(self, amt=1):
d = self._ws_thread.get_frame()
d = d.reshape(WS_FRAME_HEIGHT, WS_FRAME_WIDTH)
if self.mirror:
d = np.fliplr(d)
d = rebin(d, (self.height, self.width)).astype(np.uint16)
for y in range(self.height):
for x in range(self.width):
c = self.z_colors[d[y][x]]
self._led.set(x, y, c)
MANIFEST = [
{
"class": Kimotion,
"controller": "matrix",
"desc": "Pull Kinect data from Michael Clayton's Kimotion server",
"display": "Kimotion",
"id": "Kimotion",
"params": [
{
"default": "localhost:1337",
"help": "Kimotion server address (minus the ws://)",
"id": "server",
"label": "Server",
"type": "str"
},
{
"default": True,
"help": "Crop input video to display size.",
"id": "crop",
"label": "Crop",
"type": "bool"
},
{
"default": True,
"help": "Mirrors image along vertical. Useful for webcam video.",
"id": "mirror",
"label": "Mirror",
"type": "bool"
}
],
"type": "animation"
}
]
|
#!/usr/bin/python
"""
+-----------------------------------------------------------------------+
| This is an ETL process which extracts invoice line data from the Xero|
| finance application and stages it into S3 before loading into the |
| Redshift data Warehouse. |
| |
| The overall pipeline can be summarised as: |
| |
| Xero API (Extract, Transform) ===> S3 (Stage) ===> Redshift (Load) |
| | |
| The below script takes care of the first part. |
+-----------------------------------------------------------------------+
"""
import psycopg2
import sys
import pymysql
import csv
import time
import json
import datetime
import boto3
import os
from xero import Xero
from xero.auth import PrivateCredentials
def get_line_items(inv,out_file,counter):
date_handler = lambda obj: (
obj.isoformat()
if isinstance(obj, datetime.datetime)
or isinstance(obj, datetime.date)
else None
)
l = 0
y = json.dumps(xero.invoices.get(inv),default=date_handler)
x = json.loads(y)
line_item = []
while l <= (len(x[0]['LineItems'])-1):
line_item.append((x[0]['LineItems'][l]))
line_item[l]['InvoiceID'] = x[0]['InvoiceID']
line_item[l]['ContactID'] = x[0]['Contact']['ContactID']
l = l + 1
line_item1 = json.dumps(line_item)
line_item2 = line_item1[1:-1]
line_item3 = line_item2.replace('}, {','} {')
line_item4 = str(line_item3)
counter.append(line_item4)
out_file.write(str(line_item4))
#create client for S3
s3 = boto3.client('s3'
,aws_access_key_id='XXXXXXXXXXXXXXX'
,aws_secret_access_key='XXXXXXXXXXXXXXXXXXXXX'
,region_name='region-subregion-number')
#retrieve RSA key
with open('/tmp/private_key_xero.key') as keyfile:
rsa_key = keyfile.read()
#authorise API access
credentials = PrivateCredentials('key', rsa_key)
xero = Xero(credentials)
# Connect to target database (Redshift).
redshift = "host='some-resredshiftcluster-some.cluster.eu-west-1.redshift.amazonaws.com' dbname='public' user='user' password='XXX' port='5439'"
rs_pgconn = psycopg2.connect(redshift)
#set autocommit to true
rs_pgconn.set_session(autocommit=True)
# Create a cursor for target.
rs_cursor = rs_pgconn.cursor()
#query to extract invoices that need invoice lines in redshift - the query execution follows straight after
sql = """
SELECT DISTINCT i.invoice_id
FROM xero.invoices i
WHERE i.date >= (SELECT MAX(date)
FROM xero.invoices
WHERE NOT EXISTS (SELECT DISTINCT invoice_id
FROM xero.invoice_line_items2))
"""
rs_cursor.execute(sql)
invoice_ids = rs_cursor.fetchall()
#this will be the file name for our S3 json
file_name = time.strftime("%d_%m_%Y") + '_xero_line_items.json'
#begin querying Xero API
with open(file_name,'w', encoding='utf-8') as xero_invoices:
line_item = []
line_items_counter = []
seconds = 0
total_run_time = 0
for invoice in invoice_ids:
#iteration start time
t0 = time.clock()
#retrieve line items
get_line_items(*invoice,xero_invoices,line_items_counter)
#calculate run time and append to total time
seconds += time.clock() - t0
total_run_time += seconds
#Check for the following limitation: No more than 59 calls per 59 seconds.
if len(line_items_counter) == 59 or seconds >= 59:
time.sleep(60)
line_items_counter = []
seconds = 0
else:
continue
#stick into S3 bucket
s3.upload_file(file_name,'json-repository',file_name)
#remove staging file from server memory
os.remove(file_name)
#close connection to redshift
rs_cursor.close()
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python3
import config
#from time import time
import time
import telebot
from telebot import types
import requests
from aiohttp import web
import ssl
bot = telebot.TeleBot(config.TOKEN)
@bot.message_handler(commands=['start']) # работает
def handler_start(message):
user_markup = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
user_markup.row('Переводчик', 'Конвертер', 'Котопёсики')
bot.send_message(message.from_user.id, 'Привет, {0.first_name}!\nЯ Genesis-бот. Я многое уже умею, например, я могу перевести любую фразу с русского на английский, могу конвертировать руб/usd, могу выдать картинки с котикаме и пёсикаме'.format(message.from_user), reply_markup=user_markup)
@bot.message_handler(commands=['help']) # работает
def handler_help(message):
bot.send_message(message.chat.id, 'Нужна помощь?')
@bot.message_handler(commands=['about']) # работает
def handler_about(message):
bot.send_message(message.chat.id, 'Меня создали в Кибердайн Систем, теперь я буду следить за тобой всегда!')
@bot.message_handler(commands=['contact']) # работает
def send_contact(message):
# bot.send_contact(message.chat.id, request_contact=True)
bot.send_contact(message.chat.id, phone_number='03', first_name='mynameisbot')
@bot.message_handler(commands=['location']) # работает
def send_location(message):
bot.send_location(message.chat.id, latitude=48.858252, longitude=2.294489)
print(message.location)
@bot.message_handler(commands=['sticker']) # работает
def send_sticker(message):
bot.send_sticker(message.chat.id, 'CAADAgADCAsAAi8P8AZv5AABGV_1eF8WBA')
# bot.send_sticker(message.chat.id, 'CAADAgADRAADq1fEC1nUzBZy6Z-0FgQ')
@bot.message_handler(commands=['photo']) # работает
def send_photo(message):
bot.send_photo(message.chat.id, 'AgADBAADBqsxGwAB0u1Qno0sRRL27S7A0yAbAAQBAAMCAAN4AAPrGwMAARYE')
@bot.message_handler(commands=['video'])
def sand_video(message):
bot.send_video(message.chat.id, message.video.file_id)
@bot.message_handler(commands=['audio'])
def send_audio(message):
bot.send_audio(message.chat.id, file_id)
@bot.message_handler(content_types=['document'])
def send_doc(message):
bot.send_document(message.chat.id, file_id)
@bot.message_handler(commands=['geophone']) # работает
def geophone(message):
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
button_phone = types.KeyboardButton(text="Отправить номер телефона", request_contact=True)
button_geo = types.KeyboardButton(text='Отправить геолокацию', request_location=True)
keyboard.add(button_phone, button_geo)
bot.send_message(message.from_user.id, 'Для заказа сообщи, пожалуйста, свой номер телефона', reply_markup=keyboard)
@bot.message_handler(commands=['convert']) # работает, ReplyKeyboard
def convert(message):
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
button1 = types.KeyboardButton(text='Хочу перевести рубли в доллары')
button2 = types.KeyboardButton(text='Хочу перевести доллары в рубли')
keyboard.add(button1)
keyboard.add(button2)
bot.send_message(message.chat.id, 'Что будем делать?', reply_markup=keyboard)
@bot.message_handler(commands=['cat'])
def getcat(message):
keyboard = types.ReplyKeyboardMarkup(True, True)
keyboard.row('Получить котика')
bot.send_message(message.chat.id, 'Нажми кнопарик ниже', reply_markup=keyboard)
@bot.message_handler(regexp="Получить котика")
def cat(message):
# картинки с котиками
url = 'https://api.thecatapi.com/v1/images/search?mime_type=jpg'
res = requests.get(url)
data = res.json()
cat = data[0]['url']
bot.send_photo(message.chat.id, cat)
@bot.message_handler(commands=['dog'])
def getdog(message):
keyboard = types.ReplyKeyboardMarkup(True, True)
keyboard.row('Получить пёсика')
bot.send_message(message.chat.id, 'Нажми кнопочку ниже', reply_markup=keyboard)
@bot.message_handler(regexp="Получить пёсика")
def dog(message):
# картинки с собачками
url = 'https://api.thedogapi.com/v1/images/search?mime_type=jpg'
res = requests.get(url)
data = res.json()
dog = data[0]['url']
bot.send_photo(message.chat.id, dog)
@bot.message_handler(content_types=["text"])
def text(message):
if message.text == 'Хочу перевести рубли в доллары':
bsm = bot.send_message(message.chat.id, 'Сколько рублей хотите перевести?')
bot.register_next_step_handler(bsm, next_usd)
elif message.text == 'Хочу перевести доллары в рубли':
bsm = bot.send_message(message.chat.id, 'Сколько долларов хотите перевести?')
bot.register_next_step_handler(bsm, next_rub)
elif message.text == 'Конвертер':
bot.send_message(message.from_user.id, 'Командой /convert можете начать конвертацию')
elif message.text == 'Переводчик':
bot.send_message(message.from_user.id, 'Это функция-транслейтер, напишите какую-нибудь фразу на русском языке и я переведу это на английский')
elif message.text == 'Котопёсики':
# keyboard = types.ReplyKeyboardMarkup(True)
# bot.send_message(message.from_user.id, 'Теперь без кнопок', reply_markup=keyboard)
# hide_keyboard = types.ReplyKeyboardRemove()
bot.send_message(message.from_user.id, text='Выберите котика командой /cat или пёсика командой /dog')
else:
json = translate_me(message.text) # перевод введенного текста
bot.send_message(message.chat.id, '__pycache__/'.join(json['text']))
def next_rub(message):
bot.send_message(message.chat.id, 'Сумма в рублях: ' + str(float(message.text) * rate()))
def next_usd(message):
bot.send_message(message.chat.id, 'Сумма в доларах: ' + str(float(message.text) / rate()))
def rate(): # курс руб к баксу
url = 'https://api.exchangerate-api.com/v4/latest/USD'
res = requests.get(url).json()
return float(res['rates']['RUB'])
# bot.send_message(message.chat.id, 'Сумма:', + message.text)
def translate_me(my_text): # перевод текста
params = {
'key': config.ya_api_key,
'text': my_text,
'lang': 'ru-en' # с какого языка на какой будем переводить
}
response = requests.get(config.ya_api_url, params=params)
return response.json()
@bot.message_handler(commands=["reg"])
def start (message):
keyboard = types.InlineKeyboardMarkup()
but_1 = types.InlineKeyboardButton(text="Зарегестрироваться", callback_data="register")
keyboard.add(but_1)
bot.send_message(message.chat.id, "Добро пожаловать!", reply_markup=keyboard)
@bot.callback_query_handler(func=lambda call: True)
def callback_inline(call):
if call.message:
if call.data == "register":
bot.send_message(call.message.chat.id, "Вы были зарегестрированы!")
if __name__ == '__main__':
while True:
try:
bot.polling(none_stop=True)
except Exception as ex:
print(ex)
time.sleep(10)
|
"""
Storing model data in the database
FIXME: Most of this has nothing to do with database storage, but rather manipulating
pandas DataFrames. Move/rename so as not to imply anything I/O related
"""
import logging
from typing import List
import pandas as pd
import yaml
from summer.model import CompartmentalModel
from autumn.core.db.database import get_database, BaseDatabase
from . import process
logger = logging.getLogger(__name__)
class Table:
MCMC = "mcmc_run"
PARAMS = "mcmc_params"
OUTPUTS = "outputs"
DERIVED = "derived_outputs"
def save_mle_params(database_path: str, target_path: str):
"""
Saves the MCMC parameters for the MLE run as a YAML file in the target path.
"""
db = get_database(database_path)
mcmc_df = db.query("mcmc_run")
param_df = db.query("mcmc_params")
mle_params = process.find_mle_params(mcmc_df, param_df)
with open(target_path, "w") as f:
yaml.dump(mle_params, f)
def save_model_outputs(dest_db: BaseDatabase, **kwargs):
"""
Convenience wrapper to save a set of model outputs (DataFrames) to a database store
Usually use by dumping a dictionary to kwargs
Args:
dest_db: The target database in which to store the specified outputs
**kwargs: Argument pairs of the form TableName=DataFrame
Examples:
>>> save_model_outputs(outputs_db, **collated_output_dict)
"""
for table_name, df in kwargs.items():
dest_db.dump_df(table_name, df)
def build_outputs_table(models: List[CompartmentalModel], run_id: int, chain_id=None):
outputs_df = None
for idx, model in enumerate(models):
names = [str(c) for c in model.compartments]
# Save model outputs
df = pd.DataFrame(model.outputs, columns=names)
df.insert(0, column="chain", value=chain_id)
df.insert(1, column="run", value=run_id)
df.insert(2, column="scenario", value=idx)
df.insert(3, column="times", value=model.times)
if outputs_df is not None:
outputs_df = outputs_df.append(df, ignore_index=True)
else:
outputs_df = df
return outputs_df
def build_derived_outputs_table(models: List[CompartmentalModel], run_id: int, chain_id=None):
derived_outputs_df = None
for idx, model in enumerate(models):
# Save model derived outputs
df = pd.DataFrame.from_dict(model.derived_outputs)
df.insert(0, column="chain", value=chain_id)
df.insert(1, column="run", value=run_id)
df.insert(2, column="scenario", value=idx)
df.insert(3, column="times", value=model.times)
if derived_outputs_df is not None:
derived_outputs_df = derived_outputs_df.append(df, ignore_index=True)
else:
derived_outputs_df = df
return derived_outputs_df
|
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
from allauth.socialaccount import app_settings
class PinterestAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('id')
def get_avatar_url(self):
return self.account.extra_data.get('image').get("60x60").get("url")
def to_str(self):
dflt = super(PinterestAccount, self).to_str()
name = self.account.extra_data.get('username', dflt)
first_name = self.account.extra_data.get('first_name', None)
last_name = self.account.extra_data.get('last_name', None)
#if first_name and last_name:
# name = first_name + ' ' + last_name
return name
class PinterestProvider(OAuth2Provider):
id = 'pinterest'
# Name is displayed to ordinary users -- don't include protocol
name = 'Pinterest'
package = 'allauth.socialaccount.providers.pinterest'
account_class = PinterestAccount
def extract_uid(self, data):
import json
print json.dumps(data)
return str(data['id'])
def get_profile_fields(self):
default_fields = ['id',
'username',
'first_name',
'last_name',
'bio',
'counts',
'image',
'created_at']
fields = self.get_settings().get('PROFILE_FIELDS',
default_fields)
return fields
def get_default_scope(self):
scope = ["read_public", "write_public"]
return scope
def extract_common_fields(self, data):
return dict(username=data.get("username"),
first_name=data.get('first_name'),
last_name=data.get('last_name'),
avatar=data.get('image')
)
providers.registry.register(PinterestProvider)
|
# Osuran Uzaylilar
def run():
# importing required modules
from sys import exit
from random import randint
from textwrap import dedent
class Death:
quips = ["Ben olsam daha iyi oynardım",
"Bir daha dene şansını",
"Sen oynayamıyorsun"]
def enter(self):
print(Death.quips[randint(0, len(self.quips)-1)])
exit(1)
class Tuvalet:
def enter(self):
print(dedent("""
Tuvalet'e girdin.
Hazır gelmişken sıç ta git.
Sıçmak için 1, işemek için 2, osurmak için 3 yaz
Sifonu çekmeyi unutma...
"""))
sic_ise = input('> ')
if int(sic_ise) == 1:
print("Sıçıp çıktın, inşallah sifonu çekmişsindir")
return 'kopru'
elif int(sic_ise) == 2:
print("İşeyip çıktın, inşallah sifonu çekmişsindir")
return 'kopru'
else:
print("Osurdun, ooofff kedi mi yedin ne yaptın leş gibi koktu\nÇıktın tuvaletten")
return 'kopru'
class Hamam:
def enter(self):
print(dedent("""
Aha Abuzittin'i buldun.
Puşt hamam da keyif yaparken sıcaktan bayılmış
ve baygın yatıyor.
Abuzittin'i aldın.
Şimdi bir şekilde çıkmanız lazım.
Sadece bir odada gizli bir çıkış var ve bu çıkışı bulman lazim.
Hamamdan Köprü'ye geri çıkmak için 1 yaz.
"""))
selection = input('> ')
if int(selection) == 1:
return 'kopru2'
else:
return 'death'
class Kopru2:
def enter(self):
print(dedent("""
Köprü'ye geri döndün.
Uzaylılar sizi bulmadan gizli çıkışın olduğu odayı bulman lazim.
Ya Giriş'e geri dön, ya da Tuvalet veya Hamam'a tekrar git.
Odalardan birini seç:
1. Giriş'e geri dön'
2. Tuvalet
3. Hamam
"""))
selection = input('> ')
if int(selection) == 1:
return 'giris2'
elif int(selection) == 2:
print(dedent("""
Tuvalet'e girdin ve çok vakit kaybettin.
Az ye az sıç bir daha...
Uzaylılar geldi osurarak öldürdüler sizi.
"""))
return 'death'
else:
print(dedent("""
Hamam'a girdiniz ve Çok vakit kaybettiniz.
Üç uzaylı geldi ve üzerinize sıçtılar.
Bokun altında kalıp öldünüz...
"""))
return 'death'
class Kopru:
def enter(self):
print(dedent("""
Köprü'ye girdin.
Karşına iki uzaylı çıktı ve elindeki odunla ikisininde kafasina vurup öldürdün.
Simdi, iki tane daha odaya açılan kapı var.
Odalardan birini seç:
1. Tuvalet
2. Hamam
"""))
selection = input('> ')
if int(selection) == 1:
return 'tuvalet'
else:
print(dedent("""
Kapıda parolalı kilit var.
2 rakamlı bir şifresi var(mesela 21).
Rakamlar sadece 1 ve 2 den oluşuyor
Yalnızca 3 hakkın var
"""))
paswd = int(f"{randint(1,2)}{randint(1,2)}")
selection = input('Password : ')
trial = 1
while int(selection) != paswd and trial < 3:
print("Try again!")
trial += 1
selection = input('Password : ')
if int(selection) == paswd:
print("Tebrikler şifreyi doğru yazdın ve Hamam'a girdin")
return 'hamam'
else:
print(dedent("""
Çok geç kaldın.
2 uzaylı geldi.
Üzerine işediler öldün!
"""))
return 'death'
class Kiler:
def enter(self):
print(dedent("""
Kiler'e girdin. Karşına bir uzaylı çıktı.
Kafasına odunla vurup öldürdün.
Yalnız baktın Kiler'de Abuzittin falan yok.
Giriş'e geri dönmek için 1 yaz
"""))
selection = input('> ')
if int(selection) == 1:
return 'giris3'
else:
return 'death'
class Kiler2:
def enter(self):
print(dedent("""
Kiler'e geldin. Karşına bir uzaylı çıktı.
Osurup uzaylıyı öldürdün.
Etrafa dikkatlice bak.
Karşında bir ayna olacak.
1. Aynayı kır
2. Giriş'e geri dön
"""))
selection = input('> ')
if int(selection) == 1:
print(dedent("""
Aynanın arkasında kocaman bir kapı var.
Kapı da yine parolalı kilit var.
Parolayı doğru bilmen lazım.
Parola 3 rakamlı (mesela 311)
Rakamlar sadece 1, 2 ve 3 den oluşuyor.
Sadece 10 hakkın var.
"""))
paswd = int(f"{randint(1,3)}{randint(1,3)}{randint(1,3)}")
selection = input('Password : ')
trial = 1
while int(selection) != paswd and trial < 10:
print("Try again!")
trial += 1
selection = input('Password : ')
if int(selection) == paswd:
print("Tebrikler şifreyi dogru yazdın ve gizli çıkısı buldun")
return 'bitis'
else:
print(dedent("""
Parolayı bilemedin.
10 uzaylı geldi ve osuruk partisi yaptılar.
Kokudan zehirlenip ikiniz de öldünüz.
"""))
return 'death'
else:
return 'giris2'
class AnaOda:
def enter(self):
print(dedent("""
Ana odaya girdin. Odada 5 tane uzaylı var.
Hepsi birden osurdular ve anında kanser olup öldün.
"""))
return 'death'
# defining class called Giris
class Giris:
def enter(self):
print(dedent("""
Karargahın girişine vardın.
Bugün şanslı günündesin ve uzaylılar uyku molasında.
İçeriye girdin ve karşına üç oda çıktı.
Abuzittin'i kapattıkları odayı bulman gerekiyor.
Üç odadan birini seç:
1. Ana oda
2. Köprü
3. Kiler
"""))
selection = input('> ')
if int(selection) == 1:
return 'anaoda'
elif int(selection) == 2:
return 'kopru'
else:
return 'kiler'
class Giris2:
def enter(self):
print(dedent("""
Giriş'e geri geldin.
Karşında 3 oda var.
Uzaylılar iyice kıllanmadan hemen odayı bul.
Üç odadan birini seç:
1. Ana oda
2. Köprü
3. Kiler
"""))
selection = input('> ')
if int(selection) == 1:
return 'anaoda'
elif int(selection) == 2:
print(dedent("""
Köprü'ye tekrar girmeye çalışırken uzaylılar sizi yakaladı.
3 uzaylı geldi üzerinize sıçtılar bokun altında kalıp ikinizde öldünüz...
"""))
return 'death'
else:
return 'kiler2'
class Giris3:
def enter(self):
print(dedent("""
Giriş'e geri geldin.
Karşında 3 oda var.
Abuzittin'i kapattıkları odayı bulman gerekiyor.
Üç odadan birini seç:
1. Ana oda
2. Köprü
3. Kiler
"""))
selection = input('> ')
if int(selection) == 1:
return 'anaoda'
elif int(selection) == 2:
return 'kopru'
else:
return 'kiler'
class Bitis:
def enter(self):
print(dedent("""
Tebrikler!.
Görevi basarıyla tamamladın.
Abuzittin'i kurtarıp dünyaya geri getirdin.
Abuzittin başına gelenlerden sonra artık sadece
koyundan yapıyor dönerleri.
"""))
return 'bitis'
# class called Map
class Map:
# rooms and main actions are stored
scenes = {'giris' : Giris(),
'giris2' : Giris2(),
'giris3' : Giris3(),
'anaoda' : AnaOda(),
'kopru' : Kopru(),
'kopru2' : Kopru2(),
'kiler' : Kiler(),
'kiler2' : Kiler2(),
'death' : Death(),
'tuvalet' : Tuvalet(),
'hamam' : Hamam(),
'bitis': Bitis()
}
def __init__(self, scene_name):
self.scene_name = scene_name
def simdiki_oda(self):
return Map.scenes.get(self.scene_name)
# welcome message to the game
print(dedent("""
OSURAN UZAYLILAR'a hosgeldiniz\n\n
Gecenlerde sizin mahalleye 100 kadar uzayli gelmisti Marstan.
Bizim dönerci Abuzittin uzaylıların kaldiklari yeri keşfetmiş.
Uzaylıları yakalayıp, kesip dönerlere doldurup koyun eti diye kakalıyormuş.
Neyse, bunu ögrenen uzaylılar Mars'tan bir ekip yollayıp bizim Abuzittin'i
kaçırmışlar ve Mars'a götürmüşler.
Senin görevin Mars'a gidip Abuzittin'i kapattıkları
karargahtan onu kaçırıp dünyaya geri getirmek.
Sen de 50 milyon dolar ödülü duyar duymaz
vakit kaybetmeden galeriden aldın bir uzay gemisi
yardırdın Mars'a
"""))
room_map = Map('giris')
while True:
sonraki_oda = room_map.simdiki_oda().enter()
room_map = Map(sonraki_oda)
if sonraki_oda == 'bitis':
sonraki_oda = room_map.simdiki_oda().enter()
break
|
# Generated by Django 2.0.6 on 2019-09-04 12:08
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AuthGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80, unique=True)),
],
options={
'db_table': 'auth_group',
'managed': False,
},
),
migrations.CreateModel(
name='AuthGroupPermissions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'auth_group_permissions',
'managed': False,
},
),
migrations.CreateModel(
name='AuthPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('codename', models.CharField(max_length=100)),
],
options={
'db_table': 'auth_permission',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128)),
('last_login', models.DateTimeField(blank=True, null=True)),
('is_superuser', models.IntegerField()),
('username', models.CharField(max_length=150, unique=True)),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=150)),
('email', models.CharField(max_length=254)),
('is_staff', models.IntegerField()),
('is_active', models.IntegerField()),
('date_joined', models.DateTimeField()),
],
options={
'db_table': 'auth_user',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUserGroups',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'auth_user_groups',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUserUserPermissions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'auth_user_user_permissions',
'managed': False,
},
),
migrations.CreateModel(
name='Car',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.IntegerField(blank=True, null=True)),
('book_id', models.IntegerField(blank=True, null=True)),
('products_price', models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True)),
('discount_price', models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True)),
('products_count', models.IntegerField(blank=True, null=True)),
('column_7', models.CharField(blank=True, db_column='Column_7', max_length=20, null=True)),
],
options={
'db_table': 'car',
'managed': False,
},
),
migrations.CreateModel(
name='DCategory',
fields=[
('category_id', models.DecimalField(decimal_places=0, max_digits=20, primary_key=True, serialize=False)),
('category_name', models.CharField(max_length=20)),
('book_counts', models.DecimalField(blank=True, decimal_places=0, max_digits=10, null=True)),
('category_pid', models.DecimalField(blank=True, decimal_places=0, max_digits=20, null=True)),
],
options={
'db_table': 'd_category',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoAdminLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action_time', models.DateTimeField()),
('object_id', models.TextField(blank=True, null=True)),
('object_repr', models.CharField(max_length=200)),
('action_flag', models.PositiveSmallIntegerField()),
('change_message', models.TextField()),
],
options={
'db_table': 'django_admin_log',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoContentType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app_label', models.CharField(max_length=100)),
('model', models.CharField(max_length=100)),
],
options={
'db_table': 'django_content_type',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoMigrations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('applied', models.DateTimeField()),
],
options={
'db_table': 'django_migrations',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoSession',
fields=[
('session_key', models.CharField(max_length=40, primary_key=True, serialize=False)),
('session_data', models.TextField()),
('expire_date', models.DateTimeField()),
],
options={
'db_table': 'django_session',
'managed': False,
},
),
migrations.CreateModel(
name='DOrderiterm',
fields=[
('shop_id', models.DecimalField(decimal_places=0, max_digits=20, primary_key=True, serialize=False)),
('shop_num', models.DecimalField(decimal_places=0, max_digits=20)),
('total_price', models.DecimalField(decimal_places=2, max_digits=8)),
],
options={
'db_table': 'd_orderiterm',
'managed': False,
},
),
migrations.CreateModel(
name='TAddress',
fields=[
('id', models.DecimalField(decimal_places=0, max_digits=20, primary_key=True, serialize=False)),
('name', models.CharField(max_length=20)),
('detail_address', models.CharField(max_length=100)),
('zipcode', models.CharField(max_length=20)),
('telphone', models.CharField(blank=True, max_length=20, null=True)),
('addr_mobile', models.CharField(blank=True, max_length=20, null=True)),
('user_id', models.CharField(blank=True, max_length=20, null=True)),
],
options={
'db_table': 't_address',
'managed': False,
},
),
migrations.CreateModel(
name='TBook',
fields=[
('book_id', models.BigAutoField(primary_key=True, serialize=False)),
('book_name', models.CharField(blank=True, max_length=255, null=True)),
('book_author', models.CharField(blank=True, max_length=255, null=True)),
('book_publish', models.CharField(blank=True, max_length=255, null=True)),
('publish_time', models.CharField(blank=True, max_length=255, null=True)),
('revision', models.IntegerField(blank=True, null=True)),
('book_isbn', models.CharField(blank=True, max_length=255, null=True)),
('word_count', models.CharField(blank=True, max_length=64, null=True)),
('page_count', models.IntegerField(blank=True, null=True)),
('open_type', models.CharField(blank=True, max_length=64, null=True)),
('book_category', models.IntegerField(blank=True, null=True)),
('book_wrapper', models.CharField(blank=True, max_length=255, null=True)),
('book_price', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('book_dprice', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('editor_recommendation', models.CharField(blank=True, max_length=2000, null=True)),
('content_introduction', models.CharField(blank=True, max_length=2000, null=True)),
('author_introduction', models.CharField(blank=True, max_length=2000, null=True)),
('menu', models.CharField(blank=True, max_length=2000, null=True)),
('media_review', models.CharField(blank=True, max_length=2000, null=True)),
('digest_image_path', models.CharField(blank=True, max_length=2000, null=True)),
('product_image_path', models.CharField(blank=True, max_length=2000, null=True)),
('series_name', models.CharField(blank=True, max_length=128, null=True)),
('printing_time', models.DateField(blank=True, null=True)),
('impression', models.CharField(blank=True, max_length=64, null=True)),
('stock', models.IntegerField(blank=True, null=True)),
('shelves_date', models.DateField(blank=True, null=True)),
('customer_socre', models.DecimalField(blank=True, decimal_places=2, max_digits=3, null=True)),
('book_status', models.DecimalField(blank=True, decimal_places=0, max_digits=1, null=True)),
('sales', models.DecimalField(blank=True, decimal_places=0, max_digits=10, null=True)),
('book_size', models.CharField(blank=True, max_length=255, null=True)),
('book_pager', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'db_table': 't_book',
'managed': False,
},
),
migrations.CreateModel(
name='TOrder',
fields=[
('id', models.DecimalField(decimal_places=0, max_digits=20, primary_key=True, serialize=False)),
('num', models.DecimalField(blank=True, decimal_places=0, max_digits=20, null=True)),
('create_date', models.DateTimeField(blank=True, null=True)),
('price', models.DecimalField(decimal_places=0, max_digits=10)),
('status', models.DecimalField(blank=True, decimal_places=0, max_digits=1, null=True)),
],
options={
'db_table': 't_order',
'managed': False,
},
),
migrations.CreateModel(
name='TUser',
fields=[
('user_id', models.DecimalField(decimal_places=0, max_digits=20, primary_key=True, serialize=False)),
('user_email', models.CharField(max_length=50)),
('user_password', models.CharField(max_length=300)),
('user_name', models.CharField(blank=True, max_length=30, null=True)),
('user_status', models.CharField(blank=True, max_length=200, null=True)),
],
options={
'db_table': 't_user',
'managed': False,
},
),
]
|
"""
#------------------------------------------------------------------------------
# simple_crane_trapezoidal.py
#
# Generate a simple crane model to compare feedback control and input shaping methods
# I hope to use this in a jupyter notebook to serve as a tutorial on input shaping
#
# Created: 1/30/18 - Daniel Newman -- dmn3669@louisiana.edu
#
# Modified:
# * 1/30/18 - DMN -- dmn3669@louisiana.edu
# - Added documentation for this script
# TODO:
# * Eliminate external dependencies
#------------------------------------------------------------------------------
"""
import numpy as np
import control
from scipy.integrate import odeint
import pdb
import sys
import os
# Add my local path to the relevant modules list
sys.path.append('/Users/Daniel/Github/Crawlab-Student-Code/Daniel Newman/Python Modules')
# Import my python modules
# TODO: Break out these modules to demonstrate what exactly I'm doing
import InputShaping as shaping
import Generate_Plots as genplt
folder = 'Figures/{}/'.format(
sys.argv[0],
)
# Ensure that the folder we want to save to exists
if not os.path.exists(folder):
os.makedirs(folder)
g = 9.81
tmax = 15
dt = 0.01
t = np.arange(0,tmax,dt)
l = 1.
f_max = 10.
# Initial states
theta0 = 0.0
theta0_dot = 0.
x0 = 0.
x0_dot = 0.
# Desired states
thetad = 0.
thetad_dot = 0.
xd = 1.5
xd_dot = 0.
Amax = 10.
Vmax = 0.5
# These are the initial and desired states
Xd = np.array([[thetad],[thetad_dot],[xd],[xd_dot]])
X0 = np.array([[theta0],[theta0_dot],[x0],[x0_dot]])
Disturbance = np.zeros([len(t),4])
#Disturbance[:,1] = shaping.pulse(t,20,0.1,7.5)
def actuator_effort(response,K,Shaper,time=t):
X_ref = np.zeros([len(time),len(X0)])
shape_step = np.floor(Shaper[-1,0] / dt).astype(int) - 1
shaped_pos = shaping.shaped_input(shaping.step_input,time,Shaper,(Xd - X0)[2])
X_ref[:,2] = shaped_pos
tau = np.zeros([len(time),1])
for i in range(len(time)):
tau[i,:] = np.matmul(K,(X_ref[i,:] - response[i,:]))
return tau[:,0]
def linear_eigs(A,B,Gains):
closed_loop = A - np.matmul(B,Gains.T)
eigenvals, eigenvects = np.linalg.eig(closed_loop)
eigen = eigenvals[0:A.shape[0]:2]
eigen_abs = np.abs(eigen)
damp = np.abs(np.real(eigen)/eigen_abs)
damp_index = np.argsort(damp)
eigen_index = np.argsort(eigen_abs)
nat_freq_to_total = np.zeros(int(A.shape[0]/2))
damp_to_total = np.zeros(int(A.shape[0]/2))
nat_freq_columns = np.column_stack(eigen_abs[eigen_index][::1][:int(A.shape[0]/2)][0:int(A.shape[0]/2)])
nat_freq_to_total = np.vstack((nat_freq_to_total,nat_freq_columns))
damp_columns = np.column_stack(damp[eigen_index][::1][:int(A.shape[0]/2)][0:int(A.shape[0]/2)])
damp_to_total = np.vstack((damp_to_total, damp_columns))
damp_to_total = np.delete(damp_to_total,0,0)
nat_freq_to_total = np.delete(nat_freq_to_total,0,0)
nat_freq_to_total[np.abs(nat_freq_to_total) < 1e-2] = 0
return nat_freq_to_total[np.nonzero(nat_freq_to_total)],damp_to_total[np.nonzero(nat_freq_to_total)]
def response(X0,t,Xd,params,input_type,Gains=None,Disturbance=None,optimize=False):
l,f_max = params
X_ref = np.zeros(len(X0))
Dist = np.zeros(len(X0))
des_states = np.zeros([len(t),len(X0)])
A = np.array([[0, 1, 0, 0],
[-g/l, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]])
B = np.array([[0],[1/l],[0],[1]])
C = np.array([[1, 0, 0, 0], # theta
[0, 0, 1, 0], # x
[-l, 0, 1, 0]]) # x - l theta (paylod horiz position)
D = np.array([[0],[0],[0]])
if input_type == 'LQR':
Shaper = np.array([[0.,1]])
Q = np.array([[ 1000, 0, 0, 0],
[ 0, 10, 0, 0],
[ 0, 0, 1000, 0],
[ 0, 0, 0, 0]])
R = 0.01
Gains,S,E = control.lqr(A,B,Q,R)
# Generate the reference angular trajectories based on the desired reference input and the input shaper
shaped_pos = shaping.shaped_input(shaping.step_input,t,Shaper,(Xd - X0)[2])
des_states[:,2] = shaped_pos
omegas,zetas = linear_eigs(A,B,np.atleast_2d(Gains).T)
print('Omegas: {}'.format(omegas))
print('Zetas: {}'.format(zetas))
elif input_type == 'ZV':
Gains = np.array([0,0,1,0])
omegas,zetas = linear_eigs(A,B,np.atleast_2d(Gains).T)
omegas /= 2 * np.pi
print('Omegas: {}'.format(omegas))
print('Zetas: {}'.format(zetas))
print('Shaper 1: {}'.format(shaping.ZV(omegas[0],zetas[0]).shaper))
print('Shaper 2: {}'.format(shaping.ZV(omegas[1],zetas[1]).shaper))
Shaper = shaping.ZV_2mode(omegas[0],zetas[0],omegas[1],zetas[1]).shaper
print(Shaper)
shaped_pos = shaping.shaped_input(shaping.step_input,t,Shaper,(Xd - X0)[2])
des_states[:,2] = shaped_pos
elif isinstance(input_type,np.ndarray):
Gains = np.array([0,0,1,0])
Shaper = input_type
shaped_pos = shaping.shaped_input(shaping.step_input,t,Shaper,(Xd - X0)[2])
des_states[:,2] = shaped_pos
else:
Gains = np.array([0,0,1,0])
Shaper = np.array([[0,1]])
omegas,zetas = linear_eigs(A,B,np.atleast_2d(Gains).T)
print('Omegas: {}'.format(omegas))
print('Zetas: {}'.format(zetas))
shaped_pos = shaping.shaped_input(shaping.step_input,t,Shaper,(Xd - X0)[2])
des_states[:,2] = shaped_pos
#pdb.set_trace()
# function which computes the derivatives of parameters
def gradient(x, currtime):
if Disturbance is not None:
for i in range(len(x)):
Dist[i] = np.interp(currtime,t,Disturbance[:,i])
for i in range(0,len(x)):
X_ref[i] = np.interp(currtime,t,des_states[:,i])
u = np.matmul(Gains,(X_ref - x))
if optimize == False:
u = np.clip(u,-f_max,f_max)
sol = np.matmul(A,x) + B.T * u
sol += Dist
return np.array(sol)[0]
return odeint(gradient, X0.flatten().tolist(), t),Shaper,Gains
A = np.array([[0, 1, 0, 0],
[-g/l, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]])
B = np.array([[0],[1/l],[0],[1]])
Gains = np.array([0,0,0,0])
omega = np.sqrt(g / l)
omega /= 2 * np.pi
tau = np.pi / (omega)
m = 2
t_shape = np.arange(0,tau,tau/m)
#K = np.exp(-zeta1 * np.pi / np.sqrt(1 - zeta1**2))
K = 1
test_shaper = np.zeros([2*m,2])
def impulse_pair(t,K):
A_1 = (1 / m) / (1 + K)
A_2 = (K / m) / (1 + K)
T_2 = t + tau
pair = np.array([[t,A_1],
[T_2, A_2]])
return pair
for i in range(len(t_shape)):
test_shaper[2*i:2*(i+1),:] = impulse_pair(t_shape[i],K)
test_shaper = test_shaper[test_shaper[:,0].argsort()]
test_response, test_shaper, Gains = response(X0,t,Xd,[l,f_max],test_shaper,optimize=True,Disturbance=Disturbance)
unshaped_response, Shaper, Gains = response(X0,t,Xd,[l,f_max],'Unshaped',optimize=True,Disturbance=Disturbance)
shaped_response, ZV_Shaper, Gains = response(X0,t,Xd,[l,f_max],'ZV',optimize=True,Disturbance=Disturbance)
lqr_response, UnShaper, LQR_Gains = response(X0,t,Xd,[l,f_max],'LQR',optimize=True,Disturbance=Disturbance)
shaped_effort = actuator_effort(shaped_response,Gains,ZV_Shaper)
lqr_effort = actuator_effort(lqr_response,LQR_Gains,UnShaper)
test_effort = actuator_effort(test_response,Gains,test_shaper)
print('LQR Gains: {}'.format(LQR_Gains))
freq,amp = genplt.sensplot(ZV_Shaper,0,1,0)
genplt.compare_responses(freq,
amp[:,0],' ',
#np.ones_like(zvic_amp[:,0]) * 5,'Vtol',
name_append='ZV_2modeSens',
xlabel=r'Frequency (Hz)',ylabel='Percent Vibration',
folder=folder,grid=False,save_data=False,ncol=2,ymax=0.1
)
genplt.compare_responses(t,
shaped_effort,'ZV',
#lqr_effort,'LQR',
test_effort,'Test',
name_append='Effort',
xlabel='Time (s)',ylabel='Actuator Effort (N)',
folder=folder,grid=False,save_data=False,ncol=1,legend_loc='upper right',ymax=0.1,
)
genplt.compare_responses(t,
#np.rad2deg(unshaped_response[:,0]),'Unshaped',
np.rad2deg(shaped_response[:,0]),'ZV',
np.rad2deg(lqr_response[:,0]),'LQR',
name_append='Theta_Unshaped',
xlabel='Time (s)',ylabel='Angle (deg)',
folder=folder,grid=False,save_data=False,ncol=2,legend_loc='upper right',ymax=0.1,
)
genplt.compare_responses(t,
shaped_response[:,3],'ZV',
lqr_response[:,3],'LQR',
name_append='Xdot_Unshaped',
xlabel='Time (s)',ylabel=r'$x$ Velocity ($\frac{\mbox{m}}{\mbox{s}}$)',
folder=folder,grid=False,save_data=False,ncol=1,legend_loc='upper right',ymax=0.1,
)
genplt.compare_responses(t,
shaped_response[:,2],'ZV',
lqr_response[:,2],'LQR',
name_append='X_Unshaped',
xlabel='Time (s)',ylabel=r'$x$ (m)',
folder=folder,grid=False,save_data=False,ncol=1,legend_loc='lower right',ymax=0.1,
)
|
import math
import pyproj
coords = [
(37.4001100556, -79.1539111111, 208.38),
(37.3996955278, -79.153841, 208.48),
(37.3992233889, -79.15425175, 208.18),
(37.3989114167, -79.1532775833, 208.48),
(37.3993285556, -79.1533773333, 208.28),
(37.3992801667, -79.1537883611, 208.38),
(37.3992441111, -79.1540981944, 208.48),
(37.3992616389, -79.1539428889, 208.58),
(37.3993530278, -79.1531711944, 208.28),
(37.4001223889, -79.1538085556, 208.38),
(37.3992922222, -79.15368575, 208.28),
(37.3998074167, -79.1529132222, 208.18),
(37.400068, -79.1542711389, 208.48),
(37.3997516389, -79.1533794444, 208.38),
(37.3988933333, -79.1534320556, 208.38),
(37.3996279444, -79.154401, 208.58),
]
def gps_to_ecef_pyproj(lat, lon, alt):
ecef = pyproj.Proj(proj="geocent", ellps="WGS84", datum="WGS84")
lla = pyproj.Proj(proj="latlong", ellps="WGS84", datum="WGS84")
x, y, z = pyproj.transform(lla, ecef, lon, lat, alt, radians=False)
return x, y, z
def gps_to_ecef_custom(lat, lon, alt):
rad_lat = lat * (math.pi / 180.0)
rad_lon = lon * (math.pi / 180.0)
a = 6378137.0
finv = 298.257223563
f = 1 / finv
e2 = 1 - (1 - f) * (1 - f)
v = a / math.sqrt(1 - e2 * math.sin(rad_lat) * math.sin(rad_lat))
x = (v + alt) * math.cos(rad_lat) * math.cos(rad_lon)
y = (v + alt) * math.cos(rad_lat) * math.sin(rad_lon)
z = (v * (1 - e2) + alt) * math.sin(rad_lat)
return x, y, z
def run_test():
for pt in coords:
print("pyproj", gps_to_ecef_pyproj(pt[0], pt[1], pt[2]))
print("custom", gps_to_ecef_custom(pt[0], pt[1], pt[2]))
run_test()
|
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from emailr.settings.config import DB_USERNAME, DB_PASSWORD, DB_ENDPOINT, \
DB_DATABASE
'''Database model for the application.'''
# The code below was mostly copied from:
# http://flask.pocoo.org/docs/0.12/patterns/sqlalchemy/#declarative
# engine = create_engine('sqlite:///emailr.db', convert_unicode=True)
engine = create_engine('postgresql://' + DB_USERNAME + ':' + DB_PASSWORD + '@'
+ DB_ENDPOINT + '/' + DB_DATABASE,
client_encoding='utf8')
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False, bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
import emailr.models
# from emailr.models import User, Event
Base.metadata.create_all(bind=engine)
|
# -*- coding: utf-8 -*-
"""
S3: some wrong functions on binarytrees
"""
from algopy import bintree
def searchBST(x, B):
if B == None:
return None
elif x == B.key:
return B
else:
if x < B.key:
searchBST(x, B.left)
else:
searchBST(x, B.right)
def insertBST(x, B):
if B == None:
return bintree.BinTree(x, None, None)
else:
if x == B.key:
return B
else:
if x < B.key:
return insertBST(x, B.left)
else:
return insertBST(x, B.right)
# after the copy, change a value in the initial tree and take a look at the copy!
def copy(B):
if B == None:
return binTree.BinTree(None, None, None)
else:
C = bintree.BinTree(B.key, B.left, B.right)
copy(B.left)
copy(B.right)
return C
def copy2(B, C = None):
if B == None:
C = bintree.BinTree(None, None, None)
else:
C = bintree.BinTree(B.key, B.left, B.right)
copy2(B.left, C.left)
copy2(B.right, C.left)
return C
|
# Script to web scrape vulnerability information into mongodb
from bs4 import BeautifulSoup
from pymongo import MongoClient
from urllib.request import Request, urlopen
import csv
import os
client = MongoClient('localhost', 27017)
db = client.project
vulns = db.vulnerabilities
vulns.drop()
vulns = db.vulnerabilities
mapping1 = { 'High': 2, 'Low': 1, 'None': 0 }
mapping2 = { 'Admin': 2, 'User': 1, 'None': 0 }
file_input = input("Enter CSV file (including extension) to read CVEs from: ")
filename = file_input.split("/")[-1]
directory = file_input.replace(filename, '')
if directory:
curr_dir = os.getcwd()
os.chdir(directory)
print(os.getcwd())
try:
with open(filename) as csv_file:
# os.chdir(curr_dir)
csv_reader = csv.reader(csv_file)
for row in csv_reader:
CVE = row[0]
url = "https://www.cvedetails.com/cve/" + CVE
req = Request(url, headers={ 'User-Agent': 'Mozilla/5.0' })
html_doc = urlopen(req).read()
soup = BeautifulSoup(html_doc, 'lxml')
table = soup.find("table", { 'id': 'cvssscorestable', 'class': 'details' })
field_row = table.findAll("tr")[6]
field_value = field_row.find("span").string
gained_access = mapping2[field_value]
url = "https://nvd.nist.gov/vuln/detail/" + CVE
html_doc = urlopen(url)
soup = BeautifulSoup(html_doc, 'lxml')
tag = soup.find('span', { 'data-testid': 'vuln-cvssv3-pr' })
if tag:
field_value = tag.string.strip()
else:
field_value = "None" # By default, "None" privileges are required
required_priv = mapping1[field_value]
tag = soup.find('span', { 'data-testid': 'vuln-cvssv2-av' })
attack_vector = tag.string.strip()
# Add entry
document = {}
document['cveName'] = CVE
document['gained_access'] = gained_access
document['required_priv'] = required_priv
document['access_vector'] = attack_vector
vulns.insert_one(document)
print("Successfully imported CVE details")
except IOError:
print("File {} does not exist".format(filename))
exit()
|
import jax.numpy as jnp
def zero_mean(X):
return jnp.zeros(X.shape[0])
|
import math
import numpy as np
class OffloadPCA:
def __init__(self, model, scaler):
self.scaler = scaler
self.n_components = model.n_components_
self.mean_vector = model.mean_
self.pca_components = model.components_.T
self.dim = len(self.mean_vector)
if scaler:
self.u = scaler.mean_
self.p = np.reciprocal(scaler.scale_)
def get_params(self):
return {'Number_PCA_Components':self.n_components, 'Mean_Vector':self.mean_vector, 'PCA_Components':self.pca_components}
def get_pca_params_string(self):
pca_components_terms = []
for term in self.pca_components:
temp = '{' + ', '.join([str(x) for x in term]) + '}'
pca_components_terms.append(temp)
str_pca_components = ', '.join(pca_components_terms)
str_mean_vector = ', '.join([str(x) for x in self.mean_vector])
return str_pca_components, str_mean_vector
def get_scaling_params_string(self):
str_u = ', '.join([str(x) for x in self.u])
str_p = ', '.join([str(x) for x in self.p])
return str_u, str_p
def unscaled_pca_arduino_code(self):
str_pca_components, str_mean_vector = self.get_pca_params_string()
code = f"""double pca_components[{str(self.dim)}][{str(self.n_components)}] = {{{str_pca_components}}};
double mean_vector[] = {{{str_mean_vector}}};
void setup() {{
Serial.begin(9600);
}}
void loop() {{
//Data Section: To Be Coded Manually
float data[{str(self.dim)}]; //This is your feature vector. Retrive your data into this array.
//ML Inference Section
for(int i=0; i<{str(self.dim)}; i++)
{{
data[i] = data[i] - mean_vector[i]; //Center the feature vector.
}}
double data_pca_transformed[{str(self.n_components)}] = {{ 0.0 }};
for(int col=0; col<{str(self.n_components)}; col++)
{{
double temp = 0.0;
for(int i=0; i<{str(self.dim)}; i++)
{{
data_pca_transformed[col] += data[i] * pca_components[i][col];
}}
}}
//Do something with the PCA transformed feature vector: data_pca_transformed.
Serial.println("PCA Transformation Complete");
delay(1000);
}}"""
return code
def scaled_pca_arduino_code(self):
str_pca_components, str_mean_vector = self.get_pca_params_string()
str_u, str_p = self.get_scaling_params_string()
code = f"""double pca_components[{str(self.dim)}][{str(self.n_components)}] = {{{str_pca_components}}};
double mean_vector[] = {{{str_mean_vector}}};
double u[] = {{{str_u}}};
double p[] = {{{str_p}}};
void setup() {{
Serial.begin(9600);
}}
void loop() {{
//Data Section: To Be Coded Manually
float data[{str(self.dim)}]; //This is your feature vector. Retrive your data into this array.
//ML Inference Section
for(int i=0; i<{str(self.dim)}; i++)
{{
data[i] = (((data[i] - u[i]) * p[i]) - mean_vector[i]); //Standard Scaling and Centering.
}}
double data_pca_transformed[{str(self.n_components)}] = {{ 0.0 }};
for(int col=0; col<{str(self.n_components)}; col++)
{{
double temp = 0.0;
for(int i=0; i<{str(self.dim)}; i++)
{{
data_pca_transformed[col] += data[i] * pca_components[i][col];
}}
}}
//Do something with the PCA transformed feature vector: data_pca_transformed.
Serial.println("PCA Transformation Complete");
delay(1000);
}}"""
return code
def get_arduino_code(self):
if self.scaler:
return self.scaled_pca_arduino_code()
return self.unscaled_pca_arduino_code()
|
""" Common class/style used on tables """
table_class = ['table',
'table-bordered',
'table-striped',
'vertical-table',
]
table_style = ['margin-left: auto;',
'margin-right: auto;',
'width: auto;',
]
table_args = "class='{}' style='{}'".format(' '.join(table_class), ' '.join(table_style))
caption_args = "class='h3' style='text-align: center;'"
|
import geoipgen
from os import _exit
from gui import time
import connection as connection
def byCountryCode(country_code):
cur = connection.mydb.cursor()
cur.execute("select cidr from cidr where country_code='"+country_code+"' and scaned=0 and scanning=0 ORDER BY RAND() LIMIT 1")
resultado = cur.fetchall()
cur.close()
cidr=resultado[0][0]
ip_list=geoipgen.generate.rangeIP(cidr)
cur = connection.mydb.cursor()
cur.execute("UPDATE cidr SET scanning=1 WHERE cidr='"+cidr+"'")
connection.mydb.commit()
cur.close()
for i in range(len(ip_list)):
try:
cur = connection.mydb.cursor()
cur.execute("INSERT INTO ip (cidr,ip) VALUES ('"+cidr+"','"+ip_list[i]+"')")
connection.mydb.commit()
cur.close()
print("\033[34m[\033[01m"+time()+"]\033[0m\033[34m Added ("+ip_list[i]+")\033[0m -\033[36m "+country_code+"\033[0m")
except:
print("\033[31m[ "+ip_list[i]+" DUPLICATE ENTRY ]\033[0m")
_exit(0)
cur = connection.mydb.cursor()
cur.execute("UPDATE cidr SET scaned=1 WHERE cidr='"+cidr+"'")
connection.mydb.commit()
cur.close()
cur = connection.mydb.cursor()
cur.execute("UPDATE cidr SET scanning=0 WHERE cidr='"+cidr+"'")
connection.mydb.commit()
cur.close()
def byCIDR(cidr):
cur = connection.mydb.cursor()
cur.execute("select cidr,country_code from cidr where cidr='"+cidr+"'")
resultado = cur.fetchall()
cur.close()
cidr=resultado[0][0]
country_code=resultado[0][1]
ip_list=geoipgen.generate.rangeIP(cidr)
cur = connection.mydb.cursor()
cur.execute("UPDATE cidr SET scanning=1 WHERE cidr='"+cidr+"'")
connection.mydb.commit()
cur.close()
for i in range(len(ip_list)):
try:
cur = connection.mydb.cursor()
cur.execute("INSERT INTO ip (cidr,ip) VALUES ('"+cidr+"','"+ip_list[i]+"')")
connection.mydb.commit()
cur.close()
print("\033[35m[\033[01m"+time()+"]\033[0m\033[35m Added ("+ip_list[i]+") - "+country_code+"\033[0m")
except:
print("\033[31m[ "+ip_list[i]+" DUPLICATE ENTRY ]\033[0m")
cur = connection.mydb.cursor()
cur.execute("UPDATE cidr SET scaned=1 WHERE cidr='"+cidr+"'")
connection.mydb.commit()
cur.close()
cur = connection.mydb.cursor()
cur.execute("UPDATE cidr SET scanning=0 WHERE cidr='"+cidr+"'")
connection.mydb.commit()
cur.close()
def randomCIDR():
cur = connection.mydb.cursor()
cur.execute("select cidr,country_code from cidr where scaned=0 and scanning=0 ORDER BY RAND() LIMIT 1")
resultado = cur.fetchall()
cur.close()
cidr=resultado[0][0]
country_code=resultado[0][1]
ip_list=geoipgen.generate.rangeIP(cidr)
cur = connection.mydb.cursor()
cur.execute("UPDATE cidr SET scanning=1 WHERE cidr='"+cidr+"'")
connection.mydb.commit()
cur.close()
for i in range(len(ip_list)):
try:
cur = connection.mydb.cursor()
cur.execute("INSERT INTO ip (cidr,ip) VALUES ('"+cidr+"','"+ip_list[i]+"')")
connection.mydb.commit()
cur.close()
print("\033[35m[\033[01m"+time()+"]\033[0m\033[35m Added ("+ip_list[i]+") - country: "+country_code+"\033[0m")
except:
print("\033[31m[ "+ip_list[i]+" DUPLICATE ENTRY ]\033[0m")
cur = connection.mydb.cursor()
cur.execute("UPDATE cidr SET scaned=1 WHERE cidr='"+cidr+"'")
connection.mydb.commit()
cur.close()
cur = connection.mydb.cursor()
cur.execute("UPDATE cidr SET scanning=0 WHERE cidr='"+cidr+"'")
connection.mydb.commit()
cur.close() |
from jinja2 import Environment, select_autoescape, FileSystemLoader
class Config:
# Markup
CONTENT_POST_DIR = "./content/posts"
# Static directory
IMAGE_DIR = "./static/img"
TEMPLATE_DIR = "./static/templates"
CSS_DIR = "./static/stylesheets"
# Jinja environment variable
JINJA_ENV = Environment(
loader=FileSystemLoader(TEMPLATE_DIR),
autoescape=False
)
# Build
BUILD_DIR = "./build"
|
import os
import sys
import subprocess
import json
cpumachines = [3,4,5,6,7,8,9,10,11,12,13,14,16,17,18,19,20,21,22,23,24,25,26,28,29,31,32,33,34,35,36,37,38]
gpumachines = [3,5,6,7,10,11,12,13,14]#[2,3,4,6,7,8,10,11,12,14,15,20]#[2,3,5,7,8,10,11,12,13,14,20]
cmd = []
for m in cpumachines:
cmd.append('ssh -f vision%02d \'killall -u ztzhang \''%m)
for m in gpumachines:
cmd.append('ssh -t visiongpu%02d \'killall -u ztzhang\' '%(m))
print 'HA!'
for c in cmd:
print c
subprocess.call(c,shell=True)
|
import sys
import numpy as np
import pandas as pd
sys.path.append("../util/")
import dataloader as dl
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
class MatrixFactorization:
def __init__(self, dim_of_factor=10, learning_rate=1e-5, regularization=0.015, max_iter=100, tolerance=1e-5):
self.dim_of_factor = dim_of_factor # dimension of factor matrix
self.learning_rate = learning_rate # learning rate in training process
self.max_iter = max_iter # max number of training iterations
self.regularization = regularization # L2 regularization to avoid overfitting for both user and item factor matrices
self.tolerance = tolerance # tolerance value to determine convergence
self.ratings = None # rating instance of Ratings
self.user_factor_matrix = None # matrix of user factors
self.item_factor_matrix = None # matrix of item factors
self.complete_rating_matrix = None # final rating matrix with missing rating filled by the factor matrix production
self.loss_val = np.inf # value of loss function for each iteration
# training process: fit the user factor matrix and item factor matrix using least square optimization
def fit(self, trainset_feature, trainset_target):
self.fit_by_raw_matrix_factorization(trainset_feature, trainset_target)
# apply matrix factorization to the raw rating matrix
def fit_by_raw_matrix_factorization(self, trainset_feature, trainset_target):
trainset_df = pd.DataFrame(trainset_feature, columns=['userId', 'itemId', 'timestamp'])
trainset_df['rating'] = pd.Series(trainset_target)
self.ratings = dl.Ratings(trainset_df) # load rating data into user-item rating matrix
self.user_factor_matrix = np.matrix(np.random.rand(self.ratings.num_of_users, self.dim_of_factor))
self.item_factor_matrix = np.matrix(np.random.rand(self.ratings.num_of_items, self.dim_of_factor))
max_rating = self.ratings.max_rating
min_rating = self.ratings.min_rating
observe_matrix = (self.ratings.rating_matrix == 0) # initialize and select unrated values to set zero
count_iter = 0 # iteration counter
self.complete_rating_matrix = self.user_factor_matrix * self.item_factor_matrix.transpose()
#self.complete_rating_matrix[self.complete_rating_matrix > max_rating] = max_rating # rescale predicted ratings by top and bottom limits
#self.complete_rating_matrix[self.complete_rating_matrix < min_rating] = min_rating
err_matrix = self.ratings.rating_matrix - self.complete_rating_matrix
err_matrix[observe_matrix] = 0
self.loss_val = np.sum(np.power(err_matrix, 2)) \
+ self.regularization / 2 * np.sum(np.power(self.user_factor_matrix, 2)) \
+ self.regularization / 2 * np.sum(np.power(self.item_factor_matrix, 2))
while (count_iter<=self.max_iter and self.loss_val>self.tolerance):
count_iter += 1
user_factor_matrix_update = self.learning_rate * (err_matrix * self.item_factor_matrix
- self.regularization * self.user_factor_matrix)
item_factor_matrix_update = self.learning_rate * (err_matrix.transpose() * self.user_factor_matrix
- self.regularization * self.item_factor_matrix)
self.user_factor_matrix += user_factor_matrix_update
self.item_factor_matrix += item_factor_matrix_update
self.complete_rating_matrix = self.user_factor_matrix * self.item_factor_matrix.transpose()
#self.complete_rating_matrix[self.complete_rating_matrix > max_rating] = max_rating # rescale predicted ratings
#self.complete_rating_matrix[self.complete_rating_matrix < min_rating] = min_rating
err_matrix = self.ratings.rating_matrix - self.complete_rating_matrix
err_matrix[observe_matrix] = 0
self.loss_val = np.sum(np.power(err_matrix, 2)) \
+ self.regularization / 2 * np.sum(np.power(self.user_factor_matrix, 2)) \
+ self.regularization / 2 * np.sum(np.power(self.item_factor_matrix, 2))
# given test feature, predict the rating
def predict(self, testset_feature):
results = list()
for x in testset_feature:
userId = x[0]
itemId = x[1]
find_user = self.ratings.check_user(userId)
find_item = self.ratings.check_item(itemId)
if find_user and find_item:
userIndex = self.ratings.users_indices[userId]
itemIndex = self.ratings.items_indices[itemId]
overall_rating = self.complete_rating_matrix[userIndex, itemIndex]
results.append(overall_rating)
elif find_user:
results.append(self.ratings.get_user_rating_bias(userId))
elif find_item:
results.append(self.ratings.get_item_rating_bias(itemId))
else:
results.append(self.ratings.get_rating_average())
return results
# evaluate the model
def score(self, testset_feature, testset_target):
predictions = self.predict(testset_feature)
return np.sqrt(mean_squared_error(testset_target, predictions))
# display the setting of the model
def display_model_setting(self):
print "Model Setting:"
print "\tnumber of factors: %d" % self.dim_of_factor
print "\tlearning rate: %f" % self.learning_rate
print "\tmax number of iterations: %d" % self.max_iter
print "\tconvergence tolerance: %f" % self.tolerance
print "\tregularization factor: %f" % self.regularization
# main function to test module
def main():
#datafilepath = '../data/amazon_review/ratings_x.csv'
datafilepath = '../data/ml-latest-small/ratings_x.csv'
dataset = dl.get_rating_table_from_csv(datafilepath)
trainset, testset = train_test_split(dataset, test_size=0.1, random_state=0)
print "training and test size:"
print trainset.size, testset.size
trainset_feature = trainset[['userId', 'itemId', 'timestamp']].values
trainset_target = trainset['rating'].values
testset_feature = testset[['userId', 'itemId', 'timestamp']].values
testset_target = testset['rating'].values
mf = MatrixFactorization(dim_of_factor=15, max_iter=100)
mf.display_model_setting()
mf.fit(trainset_feature, trainset_target)
predictions = mf.predict(testset_feature)
print "Prediction vs Actual Value"
print zip(predictions, testset_target)
rmse = mf.score(testset_feature, testset_target)
print "RMSE:"
print rmse
if __name__ == "__main__":
main() |
from geopy.geocoders import Nominatim
from geopy import distance
import ipregistry
# in the scope of chatbot responses, the Location class is solely responsible for distanceByLatLong.
# getLocation is a helper function because many of the requests require lat/long coords
class Location(object):
#takes in a place as a string, returns a tuple containing lat/long coords (lat, long)
def getLocation(place = None):
if place is None or place == 'here' or place == 'me':
client = ipregistry.IpregistryClient("fs9pbuwmnx5r2g", cache=ipregistry.DefaultCache(maxsize=2048, ttl=600))
ipInfo = client.lookup()
#print(ipInfo)
return(ipInfo.location.get("latitude"), ipInfo.location.get("longitude"))
else:
geolocator = Nominatim(user_agent="chatbot")
location = geolocator.geocode(place)
return (location.latitude, location.longitude)
# takes in a list of entities, returns the distance between the first two
# if there is only one entity it returns the distance between your ip and that entity
def distanceByLatLong(entities):
if len(entities) > 0:
places = entities['wit$location:location']
place1Coords = Location.getLocation(places[0]['value'])
if len(places) == 1:
place2Coords = Location.getLocation()
else:
place2Coords = Location.getLocation(places[1]['value'])
return distance.distance(place1Coords, place2Coords).km
else:
return 0 |
import logging
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.utils.timezone import utc
from django.contrib.auth import authenticate, login, get_user_model, logout
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib import messages
from django.urls import reverse
from datetime import datetime
from .models import Task
from .forms import LoginForm, TaskForm
from .helper_functions import getTasksCategorized
logger = logging.getLogger(__name__)
# Create your views here.
def loginUser(request):
if request.user.is_authenticated:
return redirect(reverse('tasks:home'))
elif request.method == 'GET':
return render(request, 'login.html')
elif request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
user = authenticate(username=cd['username'].lower(),
password=cd['password'])
if user is not None:
login(request, user)
return redirect(reverse('tasks:home'))
else:
return render(request, 'login.html', {'error':'Username and password doesn\'t match.'})
else:
return render(request, 'login.html', {'error':'Please check again your inputs.'})
def logoutUser(request):
if request.user.is_authenticated:
logout(request)
return redirect(reverse('tasks:login'))
@login_required
def home(request):
user = request.user
groups = Task.Groups.choices
if user.is_superuser:
tasks = getTasksCategorized(tasks=Task.objects.all().order_by('-createdDate'))
users = User.objects.all()
context={'tasks':tasks, 'users':users, 'groups':groups, 'colors':Task.Groups.colors}
else:
tasks = getTasksCategorized(tasks=Task.objects.filter(user=user))
context={'tasks':tasks, 'groups':groups, 'colors':Task.Groups.colors}
context['messages']= messages.get_messages(request)
return render(request, 'home.html', context)
@login_required
def addTask(request):
if not request.user.is_superuser:
messages.error(request, 'Sorry! You don\'t have permission to do this!')
return redirect(reverse('tasks:home'))
if request.method == 'POST':
task = TaskForm(request.POST)
if task.is_valid():
task = task.save()
messages.success(request, 'Task added successfuly!')
return redirect('tasks:home')
else:
logger.debug('errors in form. {post} and {errors}'\
.format(post=request.POST, errors=task.errors))
messages.error(request, 'Sorry! Some errors happened!')
else:
messages.error(request, 'Method GET unsupported!')
return redirect(reverse('tasks:home'))
@login_required
def doneTask(request, id):
user = request.user
task = get_object_or_404(Task, id=id)
if user==task.user or request.user.is_superuser:
task.status = Task.Statuses.done
task.doneDate = datetime.utcnow().replace(tzinfo=utc)
task.save()
messages.success(request, 'Task status changed to done successfuly!')
return redirect(reverse('tasks:home'))
messages.error(request, 'Sorry! You don\'t have this permission!')
return redirect(reverse('tasks:home'))
@login_required
def deleteTask(request, id):
if request.user.is_superuser:
task = get_object_or_404(Task, id=id)
task.delete()
messages.success(request, 'Task deleted successfuly!')
return redirect(reverse('tasks:home'))
else:
messages.error(request, 'Sorry! You don\'t have permission to do this!')
return redirect(reverse('tasks:home'))
def notMentioned(request):
"""view to redirect every not mentioned url to home"""
return redirect(reverse('tasks:home'))
|
""" Parts of the U-Net model """
import torch
import torch.nn as nn
import torch.nn.functional as F
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels // 2, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels , in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = torch.tensor([x2.size()[2] - x1.size()[2]])
diffX = torch.tensor([x2.size()[3] - x1.size()[3]])
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class UNet(nn.Module):
def __init__(self, n_channels = 3, out_channels_id = 9, out_channels_uv = 256, bilinear=True):
super(UNet, self).__init__()
self.n_channels = n_channels
self.out_channels_id = out_channels_id
self.out_channels_uv = out_channels_uv
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
factor = 2 if bilinear else 1
self.down4 = Down(512, 1024//factor)
#ID MASK
self.up1_id = Up(1024, 512, bilinear)
self.up2_id = Up(512, 256, bilinear)
self.up3_id = Up(256, 128, bilinear)
self.up4_id = Up(128, 64 * factor, bilinear)
self.outc_id = OutConv(64, out_channels_id)
#U Mask
self.up1_u = Up(1024, 512, bilinear)
self.up2_u = Up(512,512,bilinear)
self.outc_u1 = OutConv(256, out_channels_uv)
self.outc_u2 = OutConv(256, out_channels_uv)
self.outc_u3 = OutConv(256, out_channels_uv)
self.outc_u4 = OutConv(256, out_channels_uv)
self.up3_u = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.up4_u = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
#V Mask
self.up1_v = Up(1024, 512, bilinear)
self.up2_v = Up(512,512,bilinear)
self.outc_v1 = OutConv(256, out_channels_uv)
self.outc_v2 = OutConv(256, out_channels_uv)
self.outc_v3 = OutConv(256, out_channels_uv)
self.outc_v4 = OutConv(256, out_channels_uv)
self.up3_v = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.up4_v = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
# ID mask
x_id = self.up1_id(x5, x4)
x_id = self.up2_id(x_id, x3)
x_id = self.up3_id(x_id, x2)
x_id = self.up4_id(x_id, x1)
logits_id = self.outc_id(x_id)
# U mask
x_u = self.up1_u(x5, x4)
x_u = self.up2_u(x_u,x3)
x_u = self.outc_u1(x_u)
x_u = self.outc_u2(x_u)
x_u = self.outc_u3(x_u)
x_u = self.up3_u(x_u)
x_u = self.up4_u(x_u)
logits_u = self.outc_u4(x_u)
# V mask
x_v = self.up1_v(x5, x4)
x_v = self.up2_v(x_v,x3)
x_v = self.outc_v1(x_v)
x_v = self.outc_v2(x_v)
x_v = self.outc_v3(x_v)
x_v = self.up3_v(x_v)
x_v = self.up4_v(x_v)
logits_v = self.outc_v4(x_v)
return logits_id,logits_u, logits_v |
import nltk
import pandas as pd
import pickle
from nltk.corpus import stopwords
from textblob import TextBlob
from flask import Flask, render_template, url_for, request
from editdistance import distance
from random2 import choice
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from jieba import analyse
# SUMMARIZE
# Create a table of word frequency
def word_frequency(text_string):
text_words = nltk.word_tokenize(text_string)
stop_Words = set(stopwords.words('english'))
freq_table = {}
for word in text_words:
if word not in stop_Words:
if word not in freq_table.keys():
freq_table[word] = 1
else:
freq_table[word] += 1
return freq_table
# Split the text into sentences
# Calculating the sentence scores
def sent_score(text_sent, freq_table):
Sentscore = {}
for sent in text_sent:
for word in nltk.word_tokenize(sent.lower()):
if word in freq_table.keys():
if len(sent.split(' ')) < 30:
if sent not in Sentscore.keys():
Sentscore[sent] = freq_table[word]
else:
Sentscore[sent] += freq_table[word]
return Sentscore
# Calculating the average score of sentences and considering it as a threshold
def ave_score(Sentence_score):
sum_score = 0
for sent in Sentence_score:
sum_score += Sentence_score[sent]
average = int(sum_score / len(Sentence_score))
return average
# Getting the summary
def run_summarize(sentence, Sentence_Score, threshold):
summary = ''
for sent in sentence:
if sent in Sentence_Score and Sentence_Score[sent] > (threshold):
summary += '' + sent
return summary
# Combining all the steps and execute
def summary(text):
freq_table = word_frequency(text)
text_sent = nltk.sent_tokenize(text)
score = sent_score(text_sent, freq_table)
ave = ave_score(score)
results = run_summarize(text_sent, score, ave)
return results
# TRANSLATE
def translate(text_string):
blob = TextBlob(text_string)
# Detect the input language
Ori_lan = blob.detect_language()
# Choose language you want to translate the original language to
tran_result = blob.translate(from_lang=Ori_lan, to='en')
return tran_result
# SPAM MAIL RECOGNITION
def predict(text):
df = pd.read_csv("spam.csv", encoding="latin-1")
df.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
# Features and Labels
df['label'] = df['class'].map({'ham': 0, 'spam': 1})
X = df['message']
y = df['label']
# Extract Feature With CountVectorizer
cv = CountVectorizer()
X = cv.fit_transform(X) # Fit the Data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# Naive Bayes Classifier
clf = MultinomialNB()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
data = [text]
vect = cv.transform(data).toarray()
my_prediction = clf.predict(vect)
return my_prediction
# SPELL CORRECTION
def find_match(source_word):
"""Finds the best match for a source word"""
min_dist = 100
# min_dist = len(source_word) * 2
optimal_words = []
target_file = open('common_words.txt', 'r')
# FIXME: Runtime of this is O(n^2). Can we improve this?
for line in target_file:
target_word = line.rstrip()
if distance(source_word, target_word) == min_dist:
# Add this word to the list
optimal_words.append(target_word)
if distance(source_word, target_word) < min_dist:
min_dist = distance(source_word, target_word)
# re-initialize the list, with only this word as a possible correction
optimal_words = [target_word]
return choice(optimal_words)
# Flask app
app = Flask(__name__)
@app.route('/', methods=['GET','POST'])
def process():
if request.method == 'POST':
option = request.form['option']
text = request.form['text']
if option == 'Translate':
blob = TextBlob(text)
ori = blob.detect_language()
if ori == 'en':
result = text
else :
result = translate(text)
elif option == 'Summarize':
result = summary(text)
elif option == 'Sentiment Analyse':
blob_obj = TextBlob(text)
sentiment_score = blob_obj.sentiment.polarity
result = 'sentiment analyse: %.2f (-1.0 negative,1.0positive)' % sentiment_score
elif option == 'Keyword Extraction':
keywords = analyse.extract_tags(text)
result = 'Top3 keyword: %s' % (' / '.join(keywords[:3]))
elif option == 'Spam Mail Recognition':
myprediction = predict(text)
result = myprediction
print(result)
elif option == 'Spell Correction':
for line in text:
source_word = line.rstrip()
result = find_match(source_word)
return render_template('index.html', result = result, originaltext = text)
return render_template('index.html', name = 0)
if __name__ == '__main__':
app.run(host='0.0.0.0',port=8080)
|
class Container(object) :
def __init__(self, **kwargs) :
for name, val in kwargs.iteritems() :
setattr(self, name, val)
|
import pytest
from chess.board import Board
def test_board_init_play_white(start_board):
assert start_board.player_white is True
assert start_board.white_to_move is True
assert start_board.moves == []
def test_board_to_array_white(start_board, game_grid_white):
assert start_board.to_array() == game_grid_white
assert start_board.to_array() == game_grid_white
def test_board_to_array_black(game_grid_black):
board = Board(player_white=False)
assert board.to_array() == game_grid_black
assert board.to_array() == game_grid_black
def test_board_init_from_array():
test_board = [
["br", "bn", "bb", "bq", "--", "bb", "bn", "br"],
["--", "--", "--", "bp", "bk", "bp", "bp", "--"],
["--", "--", "bp", "--", "bp", "--", "--", "--"],
["bp", "bp", "--", "--", "--", "--", "--", "bp"],
["wp", "wp", "--", "--", "--", "--", "--", "wp"],
["--", "--", "wp", "--", "wp", "--", "--", "--"],
["--", "wp", "--", "wp", "--", "wp", "wp", "wr"],
["wr", "wn", "wb", "wq", "wk", "wb", "wn", "--"]
]
board = Board(player_white=True, array=test_board, white_to_move=True)
assert board.to_array() == test_board
assert board[(3, 1)].first_move is False
assert board[(1, 5)].name == 'bp'
assert board[6, 7].name == 'wr'
assert board[6, 7].first_move is False
assert board[1, 4].name == 'bk'
assert board[1, 4].first_move is False
def test_board_print_white(start_board):
board_str = "br bn bb bq bk bb bn br 8\n" + \
"bp bp bp bp bp bp bp bp 7\n" + \
"-- -- -- -- -- -- -- -- 6\n" + \
"-- -- -- -- -- -- -- -- 5\n" + \
"-- -- -- -- -- -- -- -- 4\n" + \
"-- -- -- -- -- -- -- -- 3\n" + \
"wp wp wp wp wp wp wp wp 2\n" + \
"wr wn wb wq wk wb wn wr 1\n" + \
"-a -b -c -d -e -f -g -h"
assert start_board.__str__() == board_str
def test_board_print_black():
board = Board(player_white=False, white_to_move=True)
board_str = "wr wn wb wk wq wb wn wr 1\n" + \
"wp wp wp wp wp wp wp wp 2\n" + \
"-- -- -- -- -- -- -- -- 3\n" + \
"-- -- -- -- -- -- -- -- 4\n" + \
"-- -- -- -- -- -- -- -- 5\n" + \
"-- -- -- -- -- -- -- -- 6\n" + \
"bp bp bp bp bp bp bp bp 7\n" + \
"br bn bb bk bq bb bn br 8\n" + \
"-h -g -f -e -d -c -b -a"
assert board.__str__() == board_str
@pytest.fixture
def game_grid_white():
return [
["br", "bn", "bb", "bq", "bk", "bb", "bn", "br"],
["bp", "bp", "bp", "bp", "bp", "bp", "bp", "bp"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["wp", "wp", "wp", "wp", "wp", "wp", "wp", "wp"],
["wr", "wn", "wb", "wq", "wk", "wb", "wn", "wr"]
]
@pytest.fixture
def game_grid_black():
return [
["wr", "wn", "wb", "wk", "wq", "wb", "wn", "wr"],
["wp", "wp", "wp", "wp", "wp", "wp", "wp", "wp"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["bp", "bp", "bp", "bp", "bp", "bp", "bp", "bp"],
["br", "bn", "bb", "bk", "bq", "bb", "bn", "br"]
]
|
class vehicle:
def __init__(self, name, wheels, engines, seats):
self.name = name
self.wheels = wheels
self.engines = engines
self.seats = seats
def getDetails(self):
print("The vehicle",self.name,"has wheels:",self.wheels,"has engines:",self.engines,"has seats:",self.seats)
# def getProperties(self):
# print("The vehicle",self.name,"has wheels:",self.wheels,"has engines:",self.engines,"has seats:",self.seats)
class flyingVehicle(vehicle):
def __init__(self, wingsCount, cocpitSeats):
self.wingCount = wingsCount
self.cocpitSeats = cocpitSeats
def getDetails(self):
print("this is a flying vehicle with wings:",self.wingsCount,"and cocpitSeats:",self.cocpitSeats)
fv = flyingVehicle(2,2)
fv.getDetails()
# class car(vehicle):
# pass
# vehicle1 = vehicle("car",4,1,2)
# car1 = car("Mercedes benz",4,1,2)
# car1.getProperties()
#
# car2 = car("Ecosport",4,1,5)
# car2.getProperties()
|
"""Init file for visualization package."""
from __future__ import division, print_function, absolute_import
from fury._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
"""
@file
@brief Data mostly for the first year.
"""
import os
def anyfile(name, local=True, cache_folder=".", filename=True):
"""
Time about marathons over cities and years
@param name file to download
@param local local data or web
@param cache_folder where to cache the data if downloaded a second time
@param filename return the filename (True) or the content (False)
@return text content (str)
"""
if local:
this = os.path.abspath(os.path.dirname(__file__))
this = os.path.join(this, "data_1a", name)
if not os.path.exists(this):
raise FileNotFoundError(this)
else:
import pyensae
this = pyensae.download_data(name, whereTo=cache_folder)
if filename:
return this
else:
with open(this, "r") as f:
return f.read()
def marathon(local=True, cache_folder=".", filename=True):
"""
Time about marathons over cities and years
@param local local data or web
@param cache_folder where to cache the data if downloaded a second time
@param filename return the filename (True) or the content (False)
@return text content (str)
"""
return anyfile("marathon.txt", local=local, cache_folder=cache_folder, filename=filename)
def donnees_enquete_2003_television(local=True, cache_folder=".", filename=True):
"""
Time about marathons over cities and years
@param local local data or web
@param cache_folder where to cache the data if downloaded a second time
@param filename return the filename (True) or the content (False)
@return text content (str)
"""
return anyfile("donnees_enquete_2003_television.txt", local=local, cache_folder=cache_folder, filename=filename)
|
import binascii
file = open("rgb.bin", "rb")
for i in range(65536*3):
d0 = file.read(1)
x = binascii.b2a_hex(d0)
d1 = file.read(1)
y = binascii.b2a_hex(d1)
d2 = file.read(1)
z = binascii.b2a_hex(d2)
print hex(i),x,y,z
|
"""
state_ps01.py: Get and set vehicle state, parameter and channel-override information. Modified vehicle_state.py
-removed arming code
It also demonstrates how to observe vehicle attribute (state) changes.
Full documentation is provided at http://python.dronekit.io/examples/vehicle_state.html
"""
from droneapi.lib import VehicleMode
from pymavlink import mavutil
import time
# First get an instance of the API endpoint
api = local_connect()
# Get the connected vehicle (currently only one vehicle can be returned).
vehicle = api.get_vehicles()[0]
# Get all vehicle attributes (state)
print "\nGet all vehicle attribute values:"
print " Location: %s" % vehicle.location
print " Attitude: %s" % vehicle.attitude
print " Velocity: %s" % vehicle.velocity
print " GPS: %s" % vehicle.gps_0
print " Groundspeed: %s" % vehicle.groundspeed
print " Airspeed: %s" % vehicle.airspeed
print " Mount status: %s" % vehicle.mount_status
# commented out for USB powered testing
#print " Battery: %s" % vehicle.battery
#print " Rangefinder: %s" % vehicle.rangefinder
#print " Rangefinder distance: %s" % vehicle.rangefinder.distance
#print " Rangefinder voltage: %s" % vehicle.rangefinder.voltage
print " Mode: %s" % vehicle.mode.name # settable
print " Armed: %s" % vehicle.armed # settable
# Set vehicle mode and armed attributes (the only settable attributes)
print "Set Vehicle.mode=GUIDED (currently: %s)" % vehicle.mode.name
vehicle.mode = VehicleMode("GUIDED")
vehicle.flush() # Flush to guarantee that previous writes to the vehicle have taken place
while not vehicle.mode.name=='GUIDED' and not api.exit: #Wait until mode has changed
print " Waiting for mode change ..."
time.sleep(1)
# Show how to add and remove and attribute observer callbacks (using mode as example)
def mode_callback(attribute):
print " CALLBACK: Mode changed to: ", vehicle.mode.name
print "\nAdd mode attribute observer for Vehicle.mode"
vehicle.add_attribute_observer('mode', mode_callback)
print " Set mode=STABILIZE (currently: %s)" % vehicle.mode.name
vehicle.mode = VehicleMode("STABILIZE")
vehicle.flush()
print " Wait 2s so callback invoked before observer removed"
time.sleep(2)
# Remove observer - specifying the attribute and previously registered callback function
vehicle.remove_attribute_observer('mode', mode_callback)
# Get Vehicle Home location ((0 index in Vehicle.commands)
print "\nGet home location"
cmds = vehicle.commands
cmds.download()
cmds.wait_valid()
print " Home WP: %s" % cmds[0]
# Get/Set Vehicle Parameters
print "\nRead vehicle param 'THR_MIN': %s" % vehicle.parameters['THR_MIN']
print "Write vehicle param 'THR_MIN' : 10"
vehicle.parameters['THR_MIN']=10
vehicle.flush()
print "Read new value of param 'THR_MIN': %s" % vehicle.parameters['THR_MIN']
# Demo callback handler for raw MAVLink messages
def mavrx_debug_handler(message):
print "Raw MAVLink message: ", message
print "\nSet MAVLink callback handler (start receiving all MAVLink messages)"
vehicle.set_mavlink_callback(mavrx_debug_handler)
print "Wait 1s so mavrx_debug_handler has a chance to be called before it is removed"
time.sleep(1)
print "Remove the MAVLink callback handler (stop getting messages)"
vehicle.unset_mavlink_callback()
## Reset variables to sensible values.
print "\nReset vehicle attributes/parameters and exit"
vehicle.mode = VehicleMode("STABILIZE")
vehicle.armed = False
vehicle.parameters['THR_MIN']=130
vehicle.flush()
|
# database object
from app import db
# auth model
from app.auth.models import User
# password / encryption helper tools
from werkzeug import check_password_hash, generate_password_hash
# flask dependencies
from flask import url_for
# utils
import re
import string
import random
# generates random strings
def generate_random_str(size=1, numonly=False):
elems = string.ascii_lowercase + string.digits
if numonly: elems = string.digits
return ''.join(random.choice(elems) for _ in range(size))
# valid email check
def valid_email(email):
fmt = bool(re.compile(r"([a-z0-9._]+@[a-z]+.[a-z]+)").match(email))
# format is ok i.e. user@company.extension
if fmt:
username = email.split('@')[0]
company, ext = email.split('@')[1].split('.')
# valid email check: usr@mail.com
if len(username) < 3 or len(company) < 4 or len(ext) < 3:
return False
return True
return False
# Generate username
def generate_unique_username(username):
'''Generates unique username for User data model from a given username.'''
# user already exist
if User.query.filter_by(username=username).first():
# update username
username += generate_random_str(size=1, numonly=True)
# recursion
return generate_unique_username(username)
# return the unique username
return username
# E-mail already exist check
def email_exist(email):
if User.query.filter_by(email=email).first():
return True
return False
# Validate register form
def validate_register_form(form):
email = form['email']
passwd = form['password']
re_passwd = form['re-password']
# did not fill all the fields
if not email or not passwd or not re_passwd: return "Please fill up all fields."
# invalid email
if not valid_email(email): return "Please choose a valid email."
# email already exists
if email_exist(email): return "Email already exists. <a href='%s'>login</a> now."%(url_for('auth.login'))
# invalid password length
if len(passwd) < 6: return "Please choose a longer password."
# password didn't match
if passwd != re_passwd: return "Password didn't match."
# no error
return ''
# create new user from register form
def create_new_auth_user(form):
user = User()
user.email = form['email']
user.username = generate_unique_username(form['email'].split('@')[0])
user.password = generate_password_hash(form['password'], method='sha256')
# saving to database
db.session.add(user)
db.session.commit()
|
from caboard import CaBoard
import time
class Controller:
def __init__(self):
self.caboard = CaBoard(50)
def draw(self):
self.caboard.draw()
self.caboard.update()
time.sleep(0.1) |
from django.contrib import admin
from chat.models import UserProfile, Message, Room
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'created_at', 'avatar')
raw_id_fields = ('user', )
class MessageAdmin(admin.ModelAdmin):
list_display = ('sender', 'timestamp', 'content')
raw_id_fields = ('sender',)
class RoomAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'timestamp', 'is_goup')
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(Message, MessageAdmin)
admin.site.register(Room, RoomAdmin)
|
# APIs Configuration
from django.urls import include, path
from . import views
from rest_framework import routers
router = routers.DefaultRouter()
router.register('api/v1/product', views.product_Set)
router.register('api/v1/seller_product', views.seller_product_Set)
urlpatterns = [
path('',include(router.urls))
]
|
import numpy as np
from numpy import pi
from numpy.fft import fft2
from numpy.fft import ifft2
from numpy.fft import fftshift
from numpy.fft import ifftshift
import matplotlib.pyplot as plt
from skimage.data import shepp_logan_phantom
from skimage.transform import resize
FFT = lambda x: fftshift(fft2(ifftshift(x)))
IFFT = lambda x: fftshift(ifft2(ifftshift(x)))
EPS = 1e-18
## 3) Shift in Fourier domain
# exp(j*2pi*a*x) * f(x) <== Fourier Transform ==> FFT(f(x))(kx - a)
N = 512
X = shepp_logan_phantom()
X = resize(X, (N, N), order=0)
X_fft = FFT(X)
nx = np.linspace(1, N, N)
ny = np.linspace(1, N, N)
[mx, my] = np.meshgrid(nx, ny)
dx = 100
dy = 100
sht = np.exp(1j*2*pi*(dx/N*mx + dy/N*my))
Y = sht*X
Y_fft = FFT(Y)
wndImg = [0, 1]
wndAng = [-pi, pi]
wndFFT = [0, 10]
## Display
plt.subplot(2, 3, 1); plt.imshow(np.abs(X), cmap='gray', vmin=wndImg[0], vmax=wndImg[1]); plt.axis('image'); plt.axis('off'); plt.title('Ground truth : Magitude');
plt.subplot(2, 3, 2); plt.imshow(np.angle(X), cmap='gray', vmin=wndAng[0], vmax=wndAng[1]); plt.axis('image'); plt.axis('off'); plt.title('Ground truth : Phase');
plt.subplot(2, 3, 3); plt.imshow(np.log(np.abs(X_fft) + EPS), cmap='gray', vmin=wndFFT[0], vmax=wndFFT[1]); plt.axis('image'); plt.axis('off'); plt.title('Ground truth : Fourier domain');
plt.subplot(2, 3, 4); plt.imshow(np.abs(Y), cmap='gray', vmin=wndImg[0], vmax=wndImg[1]); plt.axis('image'); plt.axis('off'); plt.title('Spatial Shift_(dx=%d, dy=%d)' % (dx, dy) + ' : Magitude');
plt.subplot(2, 3, 5); plt.imshow(np.angle(Y) % pi, cmap='gray', vmin=wndAng[0], vmax=wndAng[1]); plt.axis('image'); plt.axis('off'); plt.title('Spatial Shift_(dx=%d, dy=%d)' % (dx, dy) + ' : Phase');
plt.subplot(2, 3, 6); plt.imshow(np.log(np.abs(Y_fft) + EPS), cmap='gray', vmin=wndFFT[0], vmax=wndFFT[1]); plt.axis('image'); plt.axis('off'); plt.title('Spatial Shift_(dx=%d, dy=%d)' % (dx, dy) + ' : Fourier domain');
plt.show() |
import textwrap
from onegov.core.utils import module_path
from onegov.form import FormCollection
from onegov.org.initial_content import add_filesets, load_content, add_pages
from onegov.org.models import Organisation
def create_new_organisation(app, name, create_files=True, path=None,
locale='de_CH'):
locales = {
'de_CH': 'content/de.yaml',
'fr_CH': 'content/fr.yaml',
'it_CH': 'content/it.yaml',
}
path = path or module_path('onegov.feriennet', locales[locale])
content = load_content(path)
org = Organisation(name=name, **content['organisation'])
org.meta['locales'] = locale
session = app.session()
session.add(org)
add_pages(session, path)
forms = FormCollection(session).definitions
if locale == 'de_CH':
forms.add(
name='kontakt',
title="Kontakt",
meta={
'lead': (
"Haben Sie Fragen oder eine Anregung? "
"Rufen Sie uns einfach an oder benutzen Sie dieses "
"Formular."
)
},
definition=textwrap.dedent("""\
Vorname *= ___
Nachname *= ___
Telefon *= ___
E-Mail *= @@@
Mitteilung *= ...[12]
"""),
type='builtin'
)
elif locale == 'fr_CH':
forms.add(
name='contact',
title="Contact",
meta={
'lead': (
"Avez-vous des questions ou des commentaires ? "
"Appelez-nous simplement, ou utilisez le formulaire "
"suivant."
)
},
definition=textwrap.dedent("""\
Prénom *= ___
Nom *= ___
Telefon *= ___
Émail *= @@@
Message *= ...[12]
"""),
type='builtin'
)
elif locale == 'it_CH':
forms.add(
name='contatto',
title="Contatto",
meta={
'lead': (
"Avete domande o suggerimenti? "
"Potete telefonarci o riempire questo formulario."
)
},
definition=textwrap.dedent("""\
Nome *= ___
Cognome *= ___
Telefono *= ___
E-mail *= @@@
Comunicazione *= ...[12]
"""),
type='builtin'
)
else:
raise NotImplementedError
if create_files:
add_filesets(
session, name, module_path('onegov.feriennet', locales[locale])
)
return org
|
"""
Test for using a configuration file
"""
import os
import unittest
import tempfile
import logging
import scitokens
import scitokens.utils.config
import configparser
class TestConfig(unittest.TestCase):
"""
Test the configuration parsing
"""
def setUp(self):
self.dir_path = os.path.dirname(os.path.realpath(__file__))
scitokens.utils.config.configuration = configparser.ConfigParser(scitokens.utils.config.CONFIG_DEFAULTS)
def tearDown(self):
# Clear the config back to defaults each time
scitokens.set_config()
def test_config_file(self):
"""
Test the configuration with a regular config file
"""
# Get the current directory and pass it the path of test_config.ini
scitokens.set_config(os.path.join(self.dir_path, "test_config.ini"))
self.assertEqual(scitokens.utils.config.get("log_file"), "")
self.assertEqual(scitokens.utils.config.get("log_level"), "DEBUG")
def test_passing_config(self):
"""
Test the passing of a configuration parser object
"""
new_config = configparser.ConfigParser()
new_config.add_section("scitokens")
new_config.set("scitokens", "log_level", "WARNING")
scitokens.set_config(new_config)
self.assertEqual(scitokens.utils.config.get("log_level"), "WARNING")
def test_passing_config_log(self):
"""
Test the with log_file
"""
new_config = configparser.ConfigParser()
new_config.add_section("scitokens")
new_config.set("scitokens", "log_level", "WARNING")
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "tmp.log")
new_config.set("scitokens", "log_file", tmp_file)
scitokens.set_config(new_config)
self.assertEqual(scitokens.utils.config.get("log_level"), "WARNING")
self.assertEqual(scitokens.utils.config.get("log_file"), tmp_file)
# Log a line
logger = logging.getLogger("scitokens")
logger.error("This is an error")
self.assertTrue(os.path.getsize(tmp_file) > 0)
# close the log files so that TemporaryDirectory can delete itself
for handler in logger.handlers:
handler.close()
def test_no_config(self):
"""
Test when there is no config
"""
# This should throw an exception if there is an error
self.assertEqual(scitokens.utils.config.get("cache_location"), "")
|
import os
import sys
import torch
from torch.autograd import Variable
import torch.nn as nn
from qpth.qp import QPFunction
def computeGramMatrix(A, B):
"""
Constructs a linear kernel matrix between A and B.
We assume that each row in A and B represents a d-dimensional feature vector.
Parameters:
A: a (n_batch, n, d) Tensor.
B: a (n_batch, m, d) Tensor.
Returns: a (n_batch, n, m) Tensor.
"""
assert(A.dim() == 3)
assert(B.dim() == 3)
assert(A.size(0) == B.size(0) and A.size(2) == B.size(2))
return torch.bmm(A, B.transpose(1,2))
def binv(b_mat, device):
"""
Computes an inverse of each matrix in the batch.
Pytorch 0.4.1 does not support batched matrix inverse.
Hence, we are solving AX=I.
Parameters:
b_mat: a (n_batch, n, n) Tensor.
Returns: a (n_batch, n, n) Tensor.
"""
id_matrix = b_mat.new_ones(b_mat.size(-1)).diag().expand_as(b_mat).cuda()
b_inv, _ = torch.gesv(id_matrix, b_mat)
return b_inv
def one_hot(indices, depth, device):
"""
Returns a one-hot tensor.
This is a PyTorch equivalent of Tensorflow's tf.one_hot.
Parameters:
indices: a (n_batch, m) Tensor or (m) Tensor.
depth: a scalar. Represents the depth of the one hot dimension.
Returns: a (n_batch, m, depth) Tensor or (m, depth) Tensor.
"""
encoded_indicies = torch.zeros(indices.size() + torch.Size([depth])).cuda()
index = indices.view(indices.size()+torch.Size([1]))
encoded_indicies = encoded_indicies.scatter_(1,index,1)
return encoded_indicies
def batched_kronecker(matrix1, matrix2):
matrix1_flatten = matrix1.reshape(matrix1.size()[0], -1)
matrix2_flatten = matrix2.reshape(matrix2.size()[0], -1)
return torch.bmm(matrix1_flatten.unsqueeze(2), matrix2_flatten.unsqueeze(1)).reshape([matrix1.size()[0]] + \
list(matrix1.size()[1:]) + list(matrix2.size()[1:])).permute([0, 1, 3, 2, 4]).reshape(\
matrix1.size(0), matrix1.size(1) * matrix2.size(1), matrix1.size(2) * matrix2.size(2))
def MetaOptNetHead_Ridge(query, support, support_labels, n_way, n_shot, device, lambda_reg=100.0, double_precision=False):
tasks_per_batch = query.size(0)
n_support = support.size(1)
n_query = query.size(1)
assert(query.dim() == 3)
assert(support.dim() == 3)
assert(query.size(0) == support.size(0) and query.size(2) == support.size(2))
assert(n_support == n_way * n_shot) # n_support must equal to n_way * n_shot
kernel_matrix = computeGramMatrix(support, support)
kernel_matrix += lambda_reg * torch.eye(n_support).expand(tasks_per_batch, n_support, n_support).cuda()
block_kernel_matrix = kernel_matrix.repeat(n_way, 1, 1) #(n_way * tasks_per_batch, n_support, n_support)
support_labels_one_hot = one_hot(support_labels.view(tasks_per_batch * n_support), n_way, device) # (tasks_per_batch * n_support, n_way)
support_labels_one_hot = support_labels_one_hot.transpose(0, 1) # (n_way, tasks_per_batch * n_support)
support_labels_one_hot = support_labels_one_hot.reshape(n_way * tasks_per_batch, n_support) # (n_way*tasks_per_batch, n_support)
G = block_kernel_matrix
e = -2.0 * support_labels_one_hot
#This is a fake inequlity constraint as qpth does not support QP without an inequality constraint.
id_matrix_1 = torch.zeros(tasks_per_batch*n_way, n_support, n_support)
C = Variable(id_matrix_1)
h = Variable(torch.zeros((tasks_per_batch*n_way, n_support)))
dummy = Variable(torch.Tensor()).cuda()
if double_precision:
G, e, C, h = [x.double().cuda() for x in [G, e, C, h]]
else:
G, e, C, h = [x.float().cuda() for x in [G, e, C, h]]
qp_sol = QPFunction(verbose=False)(G, e.detach(), C.detach(), h.detach(), dummy.detach(), dummy.detach())
#qp_sol = QPFunction(verbose=False)(G, e.detach(), dummy.detach(), dummy.detach(), dummy.detach(), dummy.detach())
#qp_sol (n_way*tasks_per_batch, n_support)
qp_sol = qp_sol.reshape(n_way, tasks_per_batch, n_support)
#qp_sol (n_way, tasks_per_batch, n_support)
qp_sol = qp_sol.permute(1, 2, 0)
#qp_sol (tasks_per_batch, n_support, n_way)
# Compute the classification score.
compatibility = computeGramMatrix(support, query)
compatibility = compatibility.float()
compatibility = compatibility.unsqueeze(3).expand(tasks_per_batch, n_support, n_query, n_way)
qp_sol = qp_sol.reshape(tasks_per_batch, n_support, n_way)
logits = qp_sol.float().unsqueeze(2).expand(tasks_per_batch, n_support, n_query, n_way)
logits = logits * compatibility
logits = torch.sum(logits, 1)
return logits
class ClassificationHead(nn.Module):
def __init__(self, base_learner='MetaOptNet', enable_scale=False):
super(ClassificationHead, self).__init__()
if ('Ridge' in base_learner):
self.head = MetaOptNetHead_Ridge
else:
print ("Cannot recognize the base learner type")
assert(False)
def forward(self, query, support, support_labels, n_way, n_shot, device, **kwargs):
return self.head(query, support, support_labels, n_way, n_shot, device, **kwargs) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
import sqlite3
import os
import pylast
import time
# "congif"
url = "http://rockradio.si/api/module/json/RadioSchedule/JsonModule/GetStreamInfo"
post = {'RadioStreamId': '1', 'SystemName': '', 'Title=': '', 'Description' : '', 'Icon%5BId%5D' : '0', 'Stream' : '', 'XmlStream' : '', 'Position' : '', 'Activated' : 'true'}
#curl -X POST -F 'RadioStreamId=1' -F 'SystemName=' -F 'Title=' -F 'Description=' -F 'Icon%5BId%5D=0' -F 'Stream=' -F 'XmlStream=' -F 'Position=' -F 'Activated=true'
dbname = 'rockradio.db'
#lastfm
API_KEY = "??"
API_SECRET = "??"
username = "??"
password_hash = pylast.md5("??")
if os.path.isfile(dbname):
conn = sqlite3.connect(dbname)
else:
print "new db..."
conn = sqlite3.connect(dbname)
conn.execute('''CREATE TABLE PlayedSongs
(artist TEXT NOT NULL,
title TEXT NOT NULL,
date integer);''')
conn.execute("insert into PlayedSongs Values (?, ?, strftime('%s', 'now')) ", ("", ""));
print "Table created"
r = requests.post(url, data=post)
data = r.json()
print "Currently playing " + data['data'][0]['artist'].encode('utf-8') + " - " + data['data'][0]['title'].encode('utf-8')
cursor = conn.cursor()
row = cursor.execute('SELECT * FROM PlayedSongs ORDER BY date desc limit 1').fetchone()
if(data['data'][0]['artist'] == row[0] and data['data'][0]['title'] == row[1]):
print "same song"
else:
conn.execute("insert into PlayedSongs Values (?, ?, strftime('%s', 'now')) ", (data['data'][0]['artist'], data['data'][0]['title']));
conn.commit()
#commit to lastfm
if("ROCK RADIO" != data['data'][0]['title']):
network = pylast.LastFMNetwork(api_key = API_KEY, api_secret = API_SECRET, username = username, password_hash = password_hash)
network.scrobble(artist=data['data'][0]['artist'], title=data['data'][0]['title'], timestamp=int(time.time()))
print "_____________________________\r\n\r\n"
for row in conn.execute('SELECT * FROM PlayedSongs ORDER BY date desc limit 10'):
print row
conn.close()
|
# Une y triunfarás
# Se recibieron distintos postulantes para un empleo de traductor. Crear un diccionario en el cuál la key de cada elemento sea el nombre de un candidato y el contenido sea un set con los idiomas que aprendió. Inventar valores para 5 candidatos.
# Mostrar en pantalla los idiomas que todos los candidatos aprendieron.
# Mostrar en pantalla todos los candidatos que aprendieron por lo menos Español e Inglés.
# El usuario luego debe poder ingresar el nombre de un idioma y el programa deberá mostrar en pantalla el nombre de aquellos candidatos que aprendieron ese idioma.
candidatos = {
"Pedro": {"Español","Italiano"},
"Raul": {"Español","Inglés"},
"Alberto": {"Español","Inglés","Frances","Italiano","Chino"},
"Luis": {"Español","Frances","Chino"},
"Ulises": {"Inglés","Italiano"}
}
def mostrar_todos_los_idiomas():
todos_los_idiomas = set()
for idiomas in candidatos.values():
todos_los_idiomas |= idiomas
print(todos_los_idiomas)
# mostrar_todos_los_idiomas()
def mostrar_esp_ing():
candidatos_esp_ing = set()
for candidato, idiomas in candidatos.items():
if "Español" in idiomas and "Inglés" in idiomas:
candidatos_esp_ing.add(candidato)
print(candidatos_esp_ing)
# mostrar_esp_ing()
def buscar_candidato():
candidatos_buscados = set()
idioma_buscado = input("Ingrese el idioma a buscar: ")
for candidato, idiomas in candidatos.items():
if idioma_buscado in idiomas:
candidatos_buscados.add(candidato)
print(candidatos_buscados)
# buscar_candidato() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.