text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#################################################################################
# #
# extract_data.py: extract data needed for sci. run interruption plots #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 09, 2021 #
# #
#################################################################################
import math
import re
import sys
import os
import string
#
#--- reading directory list
#
path = '/data/mta/Script/Interrupt/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append a path to a privte folder to python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- mta common functions
#
import mta_common_functions as mcf
#
#--- Science Run Interrupt related funcions shared
#
import interrupt_suppl_functions as itrf
#
#---- EPHIN/HRC data extraction
#
import extract_ephin as ephin
#
#---- GOES data extraction
#
import extract_goes as goes
#
#---- ACE (NOAA) data extraction
#
import extract_ace_data as ace
#
#---- ACE (NOAA) statistics
#
import compute_ace_stat as astat
#
#---- XMM data/stat/plot
#
import compute_xmm_stat_plot_for_report as xmm
#
#--- adding radiation zone info
#
import sci_run_add_to_rad_zone_list as rzl
#-------------------------------------------------------------------------------------
#--- extract_data: extract ephin and GOES data. this is a control and call a few related scripts
#-------------------------------------------------------------------------------------
def extract_data(ifile):
"""
extract ephin and GOES data. this is a control and call a few related scripts
input: ifile --- a file contain the data information
e.g., 20170911 2017:09:11:07:51 2017:09:13:22:56 171.6 auto
output: all data files and stat data file for the event(s)
"""
if ifile == '':
ifile = input('Please put the intrrupt timing list: ')
rzl.sci_run_add_to_rad_zone_list(ifile)
#
#--- correct science run interruption time excluding radiation zones
#
itrf.sci_run_compute_gap(ifile)
data = mcf.read_data_file(ifile)
for ent in data:
print( "EXTRACTING DATA FOR: " + ent)
if not ent:
break
atemp = re.split('\s+|\t+', ent)
event = atemp[0]
start = atemp[1]
stop = atemp[2]
gap = atemp[3]
itype = atemp[4]
#
#--- extract ephin/hrc data
#
ephin.ephin_data_extract(event, start, stop)
#
#--- compute ephin/hrc statistics
#
ephin.compute_ephin_stat(event, start)
#
#---- extract GOES data
#
try:
goes.extract_goes_data(event, start, stop)
except:
pass
#
#---- compute GOES statistics
#
try:
goes.compute_goes_stat(event, start)
except:
pass
#
#---- extract ACE (NOAA) data
#
try:
ace.extract_ace_data(event, start, stop)
except:
pass
#
#---- compute ACE statistics
#
try:
astat.compute_ace_stat(event, start, stop)
except:
pass
#
#---- extract/compute/plot xmm data
#
try:
xmm.read_xmm_and_process(event)
except:
pass
#-----------------------------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv) == 2:
ifile = sys.argv[1]
else:
ifile = ''
extract_data(ifile)
|
#!/usr/bin/python
# Copyright 2010 Alon Zakai ('kripken'). All rights reserved.
# This file is part of Syntensity/the Intensity Engine, an open source project. See COPYING.txt for licensing.
'''
Usage: mapmodels.py [raw-entities-file] [map.cfg]
raw-entities-file is the output when you load a map with
entities, it is a raw JSON dump of the entities. attr2 is the mapmodel
index, which we will convert.
map.cfg is the file that defines the mapmodels, using the mmodel command
'''
import sys
import re
mapmodel_filename = sys.argv[2]
mapmodel_file = open(mapmodel_filename, 'r')
mapmodels = []
for line in mapmodel_file:
line = line.strip()
if 'mmodel' in line:
line = line.replace(' ', ' ')
mapmodels.append(line.split(' ')[1].replace('"', ''))
mapmodel_file.close()
def convert_mapmodel(index):
return mapmodels[int(index)]
filename = sys.argv[1]
output = filename + ".fixed"
outfile = open(output, 'w')
outfile.write('[\n')
for line in open(filename, 'r'):
line = line.strip()
if len(line)>2:
line_sep = eval(line)[0]
if re.search('^.*@REPLACE_MODEL_PATH@.*$', line):
separated = convert_mapmodel(line_sep[2]['attr2'])
line = re.sub("\"attr2\":\"" + line_sep[2]['attr2'] + "\"", "\"attr2\":\"-1\"", line)
outfile.write(' ' + re.sub('@REPLACE_MODEL_PATH@', separated, line) + '\n')
else:
outfile.write(' ' + str(line) + '\n')
outfile.write(']\n')
outfile.close()
|
from Testing.Core import Core
if __name__ == '__main__':
Core = Core()
Core.main_loop()
|
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
from django_registration.forms import RegistrationForm
from users.models import Profile
class NewRegistrationForm(RegistrationForm):
class Meta(UserCreationForm.Meta):
fields = [
'first_name',
'last_name',
get_user_model().USERNAME_FIELD,
get_user_model().get_email_field_name(),
'password1',
'password2'
]
def clean_email(self):
email = self.cleaned_data['email']
if get_user_model().objects.filter(email=email).exists():
raise forms.ValidationError('That email is already taken.')
return email
class UserForm(forms.ModelForm):
class Meta:
model = get_user_model()
fields = [
'first_name',
'last_name',
'username',
'email'
]
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['timezone']
|
#!/usr/bin/python3.4
# -*-coding:Utf-8
ma_list = list()
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from flask import Blueprint
from flask import request, session, flash, redirect, render_template, url_for, current_app
from sqlalchemy import or_
from sqlalchemy.orm import joinedload
from lib import db, login_manager, ui
from lib.flask_login import login_user, logout_user
from auth.models import User, Role
from auth.forms import LoginForm
auth_bp = Blueprint('auth', __name__)
@login_manager.user_loader
def load_user(userid):
return db.Session().query(User).options(
joinedload(User.role).joinedload(Role.permissions),
joinedload(User.permissions),
joinedload(User.dashboard),
).get(userid)
@auth_bp.route('/login', methods=['GET', 'POST'])
def login():
""" 用户登录 """
form = LoginForm(request.form, next=request.args.get('next', ''))
if request.method == 'POST' and form.validate():
session = db.Session()
user = session.query(User).filter(
or_(User.username == form.username.data, User.email == form.username.data)
).first()
authenticated = user.check_pwd(form.password.data) if user else False
if user and authenticated:
remember = (form.remember.data == 'y')
if login_user(user, remember=remember):
user.ip = request.remote_addr
session.commit()
if form.next.data:
return redirect(form.next.data)
return redirect('/')
elif not user:
flash(u'用户不存在', 'error')
else:
flash(u'密码错误', 'error')
form.next.data = request.args.get('next', '')
return render_template('login.html', form=form)
@auth_bp.route('/logout', methods=['GET'])
def logout():
""" 用户登出 """
logout_user()
return redirect('/auth/login')
ui.dashboard.add_widget(ui.Widget('demo1', 'demo1', content='demo1'))
ui.dashboard.add_widget(ui.Widget('demo2', 'demo2', content='demo2'))
ui.dashboard.add_widget(ui.Widget('demo3', 'demo3', content='demo3'))
ui.dashboard.add_widget(ui.Widget('demo4', 'demo4', content='demo4'))
|
from flask import Blueprint
admin_news = Blueprint("admin/news",__name__)
import app.admin.admin_news.views
|
import re
def valiate_phone_number(number):
if re.match(r'^01[016789][1-9]\d{6,7}$', number):
return True
return False
print(valiate_phone_number('01012312343')) # True
print(valiate_phone_number('0101231123')) # True
print(valiate_phone_number('010123112')) # False
print(valiate_phone_number('0101231234a')) # False
|
for i in range(1,10):
for j in range(1,i+1):
print(j,'x',i,'=',i*j,end=' ')
if i==j:
print(' ')#这里是为了输出换行的
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:search_engine.py
# @Author: Michael.liu
# @Date:2020/4/20 19:12
# @Desc: This code is SearchEngine
import math
import operator
import sqlite3
import configparser
from datetime import *
import os
from chapter2.SegmentExample import pyHanlpSeg
class SearchEngine:
stop_words = set()
config_path = ''
config_encoding = ''
K1 = 0
B = 0
N = 0
AVG_L = 0
HOT_K1 = 0
HOT_K2 = 0
conn = None
def __init__(self, config_path, config_encoding):
self.config_path = config_path
self.config_encoding = config_encoding
config = configparser.ConfigParser()
config.read(config_path, config_encoding)
file_path = os.path.join(os.path.dirname(__file__), config['DEFAULT']['stop_words_path'])
file_encoding = config['DEFAULT']['stop_words_encoding']
f = open(file_path, encoding=file_encoding)
#f = open(config['DEFAULT']['stop_words_path'], encoding=config['DEFAULT']['stop_words_encoding'])
words = f.read()
self.stop_words = set(words.split('\n'))
self.conn = sqlite3.connect(config['DEFAULT']['db_path'])
self.K1 = float(config['DEFAULT']['k1'])
self.B = float(config['DEFAULT']['b'])
self.N = int(config['DEFAULT']['n'])
self.AVG_L = float(config['DEFAULT']['avg_l'])
self.HOT_K1 = float(config['DEFAULT']['hot_k1'])
self.HOT_K2 = float(config['DEFAULT']['hot_k2'])
def __del__(self):
self.conn.close()
def is_number(self, s):
try:
float(s)
return True
except ValueError:
return False
def sigmoid(self, x):
return 1 / (1 + math.exp(-x))
def clean_list(self, seg_list):
cleaned_dict = {}
n = 0
for i in seg_list:
i = i.strip().lower()
if i != '' and not self.is_number(i) and i not in self.stop_words:
n = n + 1
if i in cleaned_dict:
cleaned_dict[i] = cleaned_dict[i] + 1
else:
cleaned_dict[i] = 1
return n, cleaned_dict
def fetch_from_db(self, term):
c = self.conn.cursor()
c.execute('SELECT * FROM postings WHERE term=?', (term,))
return (c.fetchone())
def result_by_BM25(self, sentence):
seg_list = pyHanlpSeg(sentence)
n, cleaned_dict = self.clean_list(seg_list)
BM25_scores = {}
for term in cleaned_dict.keys():
r = self.fetch_from_db(term)
if r is None:
continue
df = r[1]
w = math.log2((self.N - df + 0.5) / (df + 0.5))
docs = r[2].split('\n')
for doc in docs:
docid, date_time, tf, ld = doc.split('\t')
docid = int(docid)
tf = int(tf)
ld = int(ld)
s = (self.K1 * tf * w) / (tf + self.K1 * (1 - self.B + self.B * ld / self.AVG_L))
if docid in BM25_scores:
BM25_scores[docid] = BM25_scores[docid] + s
else:
BM25_scores[docid] = s
BM25_scores = sorted(BM25_scores.items(), key=operator.itemgetter(1))
BM25_scores.reverse()
if len(BM25_scores) == 0:
return 0, []
else:
return 1, BM25_scores
def result_by_time(self, sentence):
seg_list = pyHanlpSeg(sentence)
n, cleaned_dict = self.clean_list(seg_list)
time_scores = {}
for term in cleaned_dict.keys():
r = self.fetch_from_db(term)
if r is None:
continue
docs = r[2].split('\n')
for doc in docs:
docid, date_time, tf, ld = doc.split('\t')
if docid in time_scores:
continue
news_datetime = datetime.strptime(date_time, "%Y-%m-%d %H:%M:%S")
now_datetime = datetime.now()
td = now_datetime - news_datetime
docid = int(docid)
td = (timedelta.total_seconds(td) / 3600) # hour
time_scores[docid] = td
time_scores = sorted(time_scores.items(), key=operator.itemgetter(1))
if len(time_scores) == 0:
return 0, []
else:
return 1, time_scores
def result_by_hot(self, sentence):
seg_list = pyHanlpSeg(sentence)
n, cleaned_dict = self.clean_list(seg_list)
hot_scores = {}
for term in cleaned_dict.keys():
r = self.fetch_from_db(term)
if r is None:
continue
df = r[1]
w = math.log2((self.N - df + 0.5) / (df + 0.5))
docs = r[2].split('\n')
for doc in docs:
docid, date_time, tf, ld = doc.split('\t')
docid = int(docid)
tf = int(tf)
ld = int(ld)
news_datetime = datetime.strptime(date_time, "%Y-%m-%d %H:%M:%S")
now_datetime = datetime.now()
td = now_datetime - news_datetime
BM25_score = (self.K1 * tf * w) / (tf + self.K1 * (1 - self.B + self.B * ld / self.AVG_L))
td = (timedelta.total_seconds(td) / 3600) # hour
# hot_score = math.log(BM25_score) + 1 / td
hot_score = self.HOT_K1 * self.sigmoid(BM25_score) + self.HOT_K2 / td
if docid in hot_scores:
hot_scores[docid] = hot_scores[docid] + hot_score
else:
hot_scores[docid] = hot_score
hot_scores = sorted(hot_scores.items(), key=operator.itemgetter(1))
hot_scores.reverse()
if len(hot_scores) == 0:
return 0, []
else:
return 1, hot_scores
def search(self, sentence, sort_type=0):
if sort_type == 0:
return self.result_by_BM25(sentence)
elif sort_type == 1:
return self.result_by_time(sentence)
elif sort_type == 2:
return self.result_by_hot(sentence)
if __name__ == "__main__":
filename = os.path.join(os.path.dirname(__file__), 'config.ini')
se = SearchEngine(filename, 'utf-8')
flag, rs = se.search('肺炎', 0)
# TODO: 显示文件内容
print(rs[:10])
|
import pygame
import random
pygame.init()
BLACK = [0, 0, 0]
WHITE = [255, 255, 255]
# устанавливает ширину и высоту окна
SIZE = width, height = [300, 700]
screen = pygame.display.set_mode(SIZE)
pygame.display.set_caption("Тип Дождь")
speed = 10
# Плотность
val = 100
# Создает пустой список
rain_list = []
# Частота появление (в range()) в рандомных координатах
for i in range(val):
x = random.randrange(0, width)
y = random.randrange(0, height)
rain_list.append([x, y])
clock = pygame.time.Clock()
# Loop until the user clicks the close button.
done = False
while not done:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
# Set the screen background
screen.fill(BLACK)
# Process each snow flake in the list
for i in range(len(rain_list)):
# Draw the snow flake
pygame.draw.circle(screen, WHITE, rain_list[i], random.randrange(1, 5))
# Move the snow flake down one pixel
rain_list[i][1] += speed
# If the snow flake has moved off the bottom of the screen
if rain_list[i][1] > height:
# Reset it just above the top
y = random.randrange(-50, -10)
rain_list[i][1] = y
# Give it a new x position
x = random.randrange(0, width)
rain_list[i][0] = x
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
clock.tick(20)
# Be IDLE friendly. If you forget this line, the program will 'hang'
# on exit.
pygame.quit()
|
from django.db import models
class counter(models.Model):
name=models.CharField(max_length=12)
counter=models.IntegerField()
|
import torch
from torch.utils.data import Dataset
from torchvision.transforms.functional import to_tensor, to_pil_image
import random
import string
import os
import glob
from PIL import Image
characters = ' ' + string.digits
n_classes = len(characters)
n_input_length, n_len = 12, 3
img_dir = 'data/num'
txt_path = 'det_numbers/labeled.txt'
class CaptchaDataset(Dataset):
def __init__(self, characters, input_length, label_length=None, length=None):
super(CaptchaDataset, self).__init__()
self.characters = characters
self.input_length = input_length
self.n_class = len(characters)
self.__img = []
self.__str = []
# img_expr = img_dir + "/*.jpg"
# img_paths = glob.glob(img_expr)
# img_paths = sorted(img_paths)
txtFile = open(txt_path)
txtList = txtFile.readlines()
if length is not None:
self.length = length
else:
self.length = len(txtList)
for i, oneline in enumerate(txtList):
if i == length:
break
info = oneline.split(' ')
image_name = info[0]
rec_number = info[1][:-1]
# print(rec_number)
img_path = os.path.join(img_dir, image_name+'.jpg')
image = Image.open(img_path).convert('RGB')
self.__img.append(image)
# print(rec_number)
self.__str.append(rec_number)
print(self.length)
def __len__(self):
return self.length
def __getitem__(self, index):
image_str = self.__str[index]
label_length = len(image_str)
image = to_tensor(self.__img[index])
target = torch.tensor([self.characters.find(x) for x in image_str], dtype=torch.long)
input_length = torch.full(size=(1,), fill_value=self.input_length, dtype=torch.long)
target_length = torch.full(size=(1,), fill_value=label_length, dtype=torch.long)
return image, target, input_length, target_length
if __name__ == '__main__':
print(characters, n_classes)
dataset = CaptchaDataset(characters, n_input_length)
image, target, input_length, label_length = dataset[-1]
print(target)
print(''.join([characters[x] for x in target]), input_length, label_length)
img = to_pil_image(image)
img.show()
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from neutron.api.rpc.callbacks import events
from neutron import context
from neutron.objects.qos import policy as policy_object
from neutron.services.qos.notification_drivers import manager as driver_mgr
from neutron.services.qos.notification_drivers import message_queue
from neutron.tests.unit.services.qos import base
DUMMY_DRIVER = ("neutron.tests.unit.services.qos.notification_drivers."
"dummy.DummyQosServiceNotificationDriver")
def _load_multiple_drivers():
cfg.CONF.set_override(
"notification_drivers",
["message_queue", DUMMY_DRIVER],
"qos")
class TestQosDriversManagerBase(base.BaseQosTestCase):
def setUp(self):
super(TestQosDriversManagerBase, self).setUp()
self.config_parse()
self.setup_coreplugin()
config = cfg.ConfigOpts()
config.register_opts(driver_mgr.QOS_PLUGIN_OPTS, "qos")
self.policy_data = {'policy': {
'id': 7777777,
'tenant_id': 888888,
'name': 'test-policy',
'description': 'test policy description',
'shared': True}}
self.context = context.get_admin_context()
self.policy = policy_object.QosPolicy(self.context,
**self.policy_data['policy'])
ctxt = None
self.kwargs = {'context': ctxt}
class TestQosDriversManager(TestQosDriversManagerBase):
def setUp(self):
super(TestQosDriversManager, self).setUp()
#TODO(Qos): Fix this unittest to test manager and not message_queue
# notification driver
rpc_api_cls = mock.patch('neutron.api.rpc.handlers.resources_rpc'
'.ResourcesPushRpcApi').start()
self.rpc_api = rpc_api_cls.return_value
self.driver_manager = driver_mgr.QosServiceNotificationDriverManager()
def _validate_registry_params(self, event_type, policy):
self.rpc_api.push.assert_called_with(self.context, policy,
event_type)
def test_create_policy_default_configuration(self):
#RPC driver should be loaded by default
self.driver_manager.create_policy(self.context, self.policy)
self.assertFalse(self.rpc_api.push.called)
def test_update_policy_default_configuration(self):
#RPC driver should be loaded by default
self.driver_manager.update_policy(self.context, self.policy)
self._validate_registry_params(events.UPDATED, self.policy)
def test_delete_policy_default_configuration(self):
#RPC driver should be loaded by default
self.driver_manager.delete_policy(self.context, self.policy)
self._validate_registry_params(events.DELETED, self.policy)
class TestQosDriversManagerMulti(TestQosDriversManagerBase):
def _test_multi_drivers_configuration_op(self, op):
_load_multiple_drivers()
driver_manager = driver_mgr.QosServiceNotificationDriverManager()
handler = '%s_policy' % op
with mock.patch('.'.join([DUMMY_DRIVER, handler])) as dummy_mock:
rpc_driver = message_queue.RpcQosServiceNotificationDriver
with mock.patch.object(rpc_driver, handler) as rpc_mock:
getattr(driver_manager, handler)(self.context, self.policy)
for mock_ in (dummy_mock, rpc_mock):
mock_.assert_called_with(self.context, self.policy)
def test_multi_drivers_configuration_create(self):
self._test_multi_drivers_configuration_op('create')
def test_multi_drivers_configuration_update(self):
self._test_multi_drivers_configuration_op('update')
def test_multi_drivers_configuration_delete(self):
self._test_multi_drivers_configuration_op('delete')
|
#!/usr/bin/env python
import cgi, cgitb
from roundwared import server
import json
print "Content-type: text/plain"
print
# The following like is what should be here. However the OceanVoices client
# is still expecting a different protocol and thus the hack at the end of this
# file is in place to accomodate it.
#print server.webservice_main(server.form_to_dict(cgi.FieldStorage()))
# BEGIN HACK
val = server.catch_errors(server.form_to_dict(cgi.FieldStorage()))
if val.has_key('STREAM_URL'):
print '"'+val['STREAM_URL']+'"';
else:
print json.dumps(val, sort_keys=True, indent=4)
# END HACK
|
from django.db import models
from django.conf import settings
# Create your models here.
class Post(models.Model):
author= models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
message = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
is_public =models.BooleanField(default=False, db_index=True)
ip = models.GenericIPAddressField(null=True,editable=False)
|
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def maxDepth(self, root):
if root == None: return 0
if root.left == None and root.right==None: return 1
elif root.left != None and root.right==None: return 1 + self.maxDepth(root.left)
elif root.left == None and root.right!=None: return 1 + self.maxDepth(root.right)
else: return max(1 + self.maxDepth(root.left), 1 + self.maxDepth(root.right))
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.left= TreeNode(4)
root.left.right = TreeNode(5)
root.left.left = TreeNode(6)
print(Solution().maxDepth(root))
print(Solution().maxDepth(None))
|
#Python code to import function in other pgm to draw line using DDA algorithm
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import sys
def ROUND(a):
return int(a+0.5)
def init():
glClearColor(1.0,1.0,1.0,0.0)
#glClolor3f(1.0,0.0,0.0)
glPointSize(3.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0.0,600.0,0.0,600.0)
def setPixel(x,y):
glBegin(GL_POINTS)
glVertex2i(x,y)
glEnd()
glFlush()
def lineDDA(x0,y0,xEnd,yEnd):
delta_x=xEnd-x0
delta_y=yEnd-y0
dx=abs(xEnd-x0)
dy=abs(yEnd-y0)
x,y=x0,y0
steps=dx if dx>dy else dy
if steps !=0:
change_x=dx/float(steps)
change_y=dy/float(steps)
else:
change_x=0
change_y=0
setPixel(ROUND(x),ROUND(y))
for k in range(steps):
if delta_x >= 0:
x+=change_x
else:
x-=change_x
if delta_y >= 0:
y+=change_y
else:
y-=change_y
setPixel(ROUND(x),ROUND(y))
|
from manim import *
NUM_CARDS = 12
SLICE_ANGLE = TAU/NUM_CARDS
def make_sector(n):
return Sector(
start_angle=((n + (NUM_CARDS / 4)) % NUM_CARDS) * SLICE_ANGLE,
angle=SLICE_ANGLE,
outer_radius=1.5,
stroke_width=2,
stroke_color=BLUE,
fill_color=BLACK)
class Main(Scene):
def construct(self):
sectors = [make_sector(n) for n in range(NUM_CARDS)]
self.play(*[Create(s) for s in sectors])
self.wait()
self.play(*[FadeToColor(s, color=ORANGE) for s in sectors[:4]])
self.wait()
self.play(*[FadeToColor(s, color=BLUE) for s in sectors[4:]])
self.wait(2)
|
l = [2, "tres", True, [1,"dos",3]]
print "lista l =",l
l2 = l[1];
print "el segundo de la lista = ",l2
l3 = l[3][1]
print "lista de lista l[3][1] =",l3
l3 = l[3][1]=2
print "reaccion lista de lista l[3][1] =",l3
l4 = l[0:3]
print "un segmenteo de la lista l[0:3] =",l4
l5 = l[0:3:2]
print "un segmenteo de lista con intervalo l[0:3] =",l5
l6 = l[0::2]
print "un segmenteo de la lista l[0::2] =",l6
l7 = l[1::2]
print "un segmenteo de la lista l[1::2] =",l7
l[0:2]=[4,3]
print "asignacio de una lista l[0:2]=[4,3] =",l
l[0:2]=[4]
print "asignacio de una lista l[0:2]=[4,3] =",l
l9=l[-1]
print "indices invervios l9=l[-1] =",l9
|
#JTSK-350112
# mod_conversion.py
# Taiyr Begeyev
# t.begeyev@jacobs-university.de
def in2cm_table(start_length, end_length, step_size):
print("{0:>8} {1:>8}".format("inch", "cm"))
for i in range(start_length, end_length, step_size):
print("{0:>8.1f} {1:>8.1f}".format(i, i * 2.54))
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 2 09:14:26 2015
@author: olaf
"""
import numpy as np
import matplotlib.pyplot as plt
import random
import time
from scipy import weave
def Startconf(anzTeilchen,anzSpinUp,anzZeitschritte):
weltlinien = np.array([[False]*anzTeilchen]*anzZeitschritte)
zahl = []
zahl0 = random.randint(1,anzTeilchen-1)
# weltlinien[:,::2] = True
for k in range(anzSpinUp):
while zahl0 in zahl:
zahl0 = random.randint(1,anzTeilchen-1)
zahl += [zahl0]
weltlinien[:,zahl0] = True
return weltlinien
def loop1(anzZeitschritte, anzTeilchen, anzSpinup, termination, weltlinien):
weltlinienschnitt = 0
spinerhaltung = False
for m in xrange(100):
x = np.array([0.0]*termination) # Initialisierung
y = np.array([0.0]*termination)
x[0] = random.randint(0,anzZeitschritte-1)
y[0] = random.randint(0,anzTeilchen-1)
while weltlinien[x[0],y[0]] == 0:
x[0] = random.randint(0,anzZeitschritte-1)
y[0] = random.randint(0,anzTeilchen-1)
breakvar = 0
# Plakette links oben inaktiv
if x[0]%2 == y[0]%2: walk = 1
else: walk = 4
walkOld = walk
for k in xrange(1,termination):
if k%2 == 0: # Jeder zweite Schritt geht doppelt
walk_array = []
for walk in xrange(1,5):
# Verhindere Richtungsumkehr
if not ( (walkOld == 1 and walk == 3) or (walkOld == 3 and walk == 1) or (walkOld == 2 and walk == 4) or (walkOld == 4 and walk == 2) ):
# switch für Schritt
if walk == 1: #rechts
x[k] = x[k-1]-0.5
y[k] = y[k-1]+0.5
elif walk == 2: #unten
x[k] = x[k-1]+0.5
y[k]= y[k-1]+0.5
elif walk == 3: #links
x[k] = x[k-1]+0.5
y[k] = y[k-1]-0.5
elif walk == 4: #oben
x[k] = x[k-1]-0.5
y[k] = y[k-1]-0.5
# Periodische Randbedingungen
if x[k] == anzZeitschritte: x[k] = 0
if x[k] == -1: x[k] = anzZeitschritte-1
if y[k] == anzTeilchen: y[k] = 0
if y[k] == -1: y[k] = anzTeilchen-1
if (walk == 1 or walk == 4) and weltlinien[x[k],y[k]] == True: walk_array += [walk]
if (walk == 2 or walk == 3) and weltlinien[x[k],y[k]] == False: walk_array += [walk]
if walk_array == []: break
random.shuffle(walk_array)
walk = walk_array[0]
walkOld = walk
# switch für Schritt
if walk == 1: #rechts
x[k] = x[k-1]-0.5
y[k] = y[k-1]+0.5
elif walk == 2: #unten
x[k] = x[k-1]+0.5
y[k]= y[k-1]+0.5
elif walk == 3: #links
x[k] = x[k-1]+0.5
y[k] = y[k-1]-0.5
elif walk == 4: #oben
x[k] = x[k-1]-0.5
y[k] = y[k-1]-0.5
# Periodische Randbedingungen
if x[k] == anzZeitschritte: x[k] = 0
if x[k] == -1: x[k] = anzZeitschritte-1
if y[k] == anzTeilchen: y[k] = 0
if y[k] == -1: y[k] = anzTeilchen-1
# Suche ob Loop sich beißt
for l in xrange(0,k):
if (x[l]== x[k]) & (y[l] == y[k]):
x = x[l:k]
y = y[l:k]
breakvar = 1
break
if breakvar == 1:
# Wenn Loop sich beißt, filtere Gitterpunkte raus
if x[0] == np.floor(x[0]):
x = x[::2]
y = y[::2]
else:
x = x[1::2]
y = y[1::2]
# überprüfe Anzahl der Schnittpunkte mit Weltlinie
spinerhaltung_mask = np.array([False]*anzTeilchen)
weltlinienschnitt = 0
for n in xrange(len(x)):
if x[n] == 0: spinerhaltung_mask[y[n]] = True
if weltlinien[x[n],y[n]] == True: weltlinienschnitt += 1
spinerhaltung = False
# erhaltung der spinzahl
L1 = (sum( weltlinien[0] ^ spinerhaltung_mask ) == anzSpinup)
# gleichheit von start und endkonfiguration
#L2 = np.array_equal(weltlinien[0], weltlinien[1])
if L1: spinerhaltung = True
break
if weltlinienschnitt > 2 and spinerhaltung: break
return x,y,l,k
def walk_neu(anzZeitschritte, anzTeilchen, weltlinien, x, y, k, walkOld, gs, gd, beta):
#w = np.tanh(Jz*beta/anzZeitschritte)
#w = np.exp(beta * (gs_ratio - gd_ratio) )
mask1 = np.array([[0,0],[0,0]])
mask2 = np.array([[1,1],[1,1]])
mask3 = np.array([[1,0],[1,0]])
mask4 = np.array([[0,1],[0,1]])
mask5 = np.array([[1,0],[0,1]])
mask6 = np.array([[0,1],[1,0]])
walk_h = np.array([2, 1, 4, 3])
walk_v = np.array([4, 3, 2, 1])
x_u = x[k-1]+0.5
x_o = x[k-1]-0.5
y_r = y[k-1]+0.5
y_l = y[k-1]-0.5
# Periodische Randbedingungen
if x_u == anzZeitschritte: x_u = 0
if x_o == -1: x_o = anzZeitschritte-1
if y_r == anzTeilchen: y_r = 0
if y_l == -1: y_l = anzTeilchen-1
links_unten = weltlinien[x_u, y_l]
rechts_unten = weltlinien[x_u, y_r]
links_oben = weltlinien[x_o, y_l]
rechts_oben = weltlinien[x_o, y_r]
plakette = np.array([[links_oben, rechts_oben],[links_unten,rechts_unten]])
# Plaketten vom Typ 1+, 1- (siehe Everz S9)
if np.array_equal(plakette, mask1) or np.array_equal(plakette, mask2):
if random.random() < gs / (gs + gd): walk = walk_v[walkOld-1]
else: walk = walkOld
# Plaketten vom Typ 2+, 2- (siehe Everz S9)
elif np.array_equal(plakette, mask3) or np.array_equal(plakette, mask4):
if random.random() < 1./(1.+gd): walk = walk_v[walkOld-1]
else: walk = walk_h[walkOld-1]
# Plaketten vom Typ 3+, 3- (siehe Everz S9)
elif np.array_equal(plakette, mask5) or np.array_equal(plakette, mask6):
if random.random() < 1./(1.+gs): walk = walkOld
else: walk = walk_h[walkOld-1]
else:
print 'Plakette verboten'
exit(1)
return walk
def walk_neu2(anzZeitschritte, anzTeilchen, weltlinien, x, y, k, walkOld, gs, gd, beta):
#w = np.tanh(Jz*beta/anzZeitschritte)
#w = np.exp(beta * (gs_ratio - gd_ratio) )
mask1 = np.array([[0,0],[0,0]])
mask2 = np.array([[1,1],[1,1]])
mask3 = np.array([[1,0],[1,0]])
mask4 = np.array([[0,1],[0,1]])
mask5 = np.array([[1,0],[0,1]])
mask6 = np.array([[0,1],[1,0]])
walk_h = np.array([2, 1, 4, 3])
walk_v = np.array([4, 3, 2, 1])
x_u = x[k-1]+0.5
x_o = x[k-1]-0.5
y_r = y[k-1]+0.5
y_l = y[k-1]-0.5
# Periodische Randbedingungen
if x_u == anzZeitschritte: x_u = 0
if x_o == -1: x_o = anzZeitschritte-1
if y_r == anzTeilchen: y_r = 0
if y_l == -1: y_l = anzTeilchen-1
links_unten = weltlinien[x_u, y_l]
rechts_unten = weltlinien[x_u, y_r]
links_oben = weltlinien[x_o, y_l]
rechts_oben = weltlinien[x_o, y_r]
plakette = np.array([[links_oben, rechts_oben],[links_unten,rechts_unten]])
# Plaketten vom Typ 1+, 1- (siehe Everz S9)
if np.array_equal(plakette, mask1) or np.array_equal(plakette, mask2):
walk = walk_v[walkOld-1]
# Plaketten vom Typ 2+, 2- (siehe Everz S9)
elif np.array_equal(plakette, mask3) or np.array_equal(plakette, mask4):
if random.random() < np.tanh(beta/anzZeitschritte): walk = walk_h[walkOld-1]
else: walk = walk_v[walkOld-1]
# Plaketten vom Typ 3+, 3- (siehe Everz S9)
elif np.array_equal(plakette, mask5) or np.array_equal(plakette, mask6):
walk = walk_h[walkOld-1]
else:
print 'Plakette verboten'
exit(1)
return walk
def loop2(anzZeitschritte, anzTeilchen, anzSpinup, termination, weltlinien, Jz, beta, gs, gd):
weltlinienschnitt = 0
spinerhaltung = False
update = True
for m in xrange(100):
x = np.array([0.0]*termination) # Initialisierung
y = np.array([0.0]*termination)
x[0] = random.randint(0,anzZeitschritte-1)
y[0] = random.randint(0,anzTeilchen-1)
while weltlinien[x[0],y[0]] == False:
x[0] = random.randint(0,anzZeitschritte-1)
y[0] = random.randint(0,anzTeilchen-1)
breakvar = 0
# Plakette links oben inaktiv
if x[0]%2 == y[0]%2: walk = 1
else: walk = 4
for k in xrange(1,termination):
if k%2 == 0: # Jeder zweite Schritt geht doppelt
walk = walk_neu(anzZeitschritte, anzTeilchen, weltlinien, x, y, k, walk, gs, gd, beta)
# switch für Schritt
if walk == 1: #rechts
x[k] = x[k-1]-0.5
y[k] = y[k-1]+0.5
elif walk == 2: #unten
x[k] = x[k-1]+0.5
y[k]= y[k-1]+0.5
elif walk == 3: #links
x[k] = x[k-1]+0.5
y[k] = y[k-1]-0.5
else: #oben
x[k] = x[k-1]-0.5
y[k] = y[k-1]-0.5
#TODO: optimieren: class xy
# Periodische Randbedingungen
if x[k] == anzZeitschritte: x[k] = 0
if x[k] == -1: x[k] = anzZeitschritte-1
if y[k] == anzTeilchen: y[k] = 0
if y[k] == -1: y[k] = anzTeilchen-1
#TODO: sort + bisec
# Suche ob Loop sich beißt
for l in xrange(0,k-1):
if (x[l]== x[k]) & (y[l] == y[k]):
x = x[l:k]
y = y[l:k]
breakvar = 1
break
if breakvar == 1:
# Wenn Loop sich beißt, filtere Gitterpunkte raus
if x[0] == np.floor(x[0]):
x = x[::2]
y = y[::2]
else:
x = x[1::2]
y = y[1::2]
# überprüfe Anzahl der Schnittpunkte mit Weltlinie
spinerhaltung_mask = np.array([False]*anzTeilchen)
weltlinienschnitt = 0
for n in xrange(len(x)):
# sucht nach Loop-Schnittpunkten in der ersten Zeile (x[n] == 0) des Schachbretts
if x[n] == 0: spinerhaltung_mask[y[n]] = True
# sucht Schnittpunkte mit Weltlinien
if weltlinien[x[n],y[n]] == True: weltlinienschnitt += 1
spinerhaltung = False
# erhaltung der spinzahl
L1 = (sum( weltlinien[0] ^ spinerhaltung_mask ) == anzSpinup)
# gleichheit von start und endkonfiguration
#L2 = np.array_equal(weltlinien[0], weltlinien[1])
if L1: spinerhaltung = True
break
if weltlinienschnitt > 2 and spinerhaltung: break
if m == 99: update = False
return x,y,l,k, update
def gewichter(weltlinien):
weltlinien_neu = np.hstack((weltlinien, weltlinien[:,0].reshape(-1, 1)))
weltlinien_neu = np.vstack((weltlinien, weltlinien[0,:]))
N = int(np.shape(weltlinien_neu)[0])
M = int(np.shape(weltlinien_neu)[1])
ns_py = np.array([0])
nd_py = np.array([0])
n0_py = np.array([0])
val_py= np.array([0])
code = r'''
int ns = 0;
int nd = 0;
int n0 = 0;
int val = 0;
for(int n = 0; n < N-1; n++) {
for(int m = 0; m < M-1; m++) {
if (n%2 != m%2) {
int index1 = (n+1)*M + m;
int index2 = (n+1)*M + m + 1;
int index3 = n*M + m;
int index4 = n*M + m + 1;
int links_unten = weltlinien_neu[index1];
int rechts_unten = weltlinien_neu[index2];
int links_oben = weltlinien_neu[index3];
int rechts_oben = weltlinien_neu[index4];
int valide = 0;
if(links_unten == links_oben && rechts_oben == rechts_unten && rechts_unten != links_oben) {
ns += 1;
valide +=1;
}
if(links_unten != links_oben && rechts_oben != rechts_unten && rechts_unten == links_oben) {
nd += 1;
valide +=1;
}
if(links_unten == links_oben && rechts_oben == rechts_unten && rechts_unten == links_oben) {
n0 += 1;
valide +=1;
}
if(valide!=1){val+=1;}
}
}
}
ns_py[0] = ns;
nd_py[0] = nd;
n0_py[0] = n0;
val_py[0] = val;
'''
weave.inline(code,['N','M','ns_py','nd_py','n0_py','val_py','weltlinien_neu'])
if val_py !=0:
print val_py
exit(1)
return ns_py, nd_py, n0_py
def autocorr(x):
result = np.correlate(x, x, mode = 'full')
maxcorr = np.argmax(result)
#print 'maximum = ', result[maxcorr]
result = result / result[maxcorr] # <=== normalization
return result[result.size/2:]
t0 = time.time()
# Parameter
anzTeilchen = 16
anzSpinup = 8
anzZeitschritte = 200
termination = anzTeilchen*anzZeitschritte
anzMarkovZeit = 10000
Jz = 1.
Jx = 1.
beta = 10.
print(np.tanh(beta/anzZeitschritte))
# um tatsächlich richte Anzahl der Zeitschritte zu machen
#anzZeitschritte += 1
gs = np.exp(-Jz*beta/anzZeitschritte) * np.cosh(Jx*beta/anzZeitschritte)
gd = np.exp(-Jz*beta/anzZeitschritte) * np.sinh(Jx*beta/anzZeitschritte)
gs_ratio = Jx/anzZeitschritte * np.tanh(beta*Jx/anzZeitschritte) - Jz/anzZeitschritte #gs' / gs
gd_ratio = Jx/anzZeitschritte / np.tanh(beta*Jx/anzZeitschritte) - Jz/anzZeitschritte #gd' / gd
meanNs = np.array([0]*(anzMarkovZeit-1))
meanNd = np.array([0]*(anzMarkovZeit-1))
meanN0 = np.array([0]*(anzMarkovZeit-1))
energy = np.array([0.]*(anzMarkovZeit-1))
# Start
weltlinien = Startconf(anzTeilchen,anzSpinup,anzZeitschritte)
#print(weltlinien)
#print(gewichter(weltlinien))
#print((-ns*gs_ratio - nd*gd_ratio)/anzTeilchen - Jz/4)
## heat up array
#for m in xrange(20):
#
# # Loop finden
# x,y,l,k = loop1(anzZeitschritte, anzTeilchen, anzSpinup, termination, weltlinien)
#
# # Spinflip
# for k in xrange(len(x)):
# weltlinien[int(x[k]),int(y[k])] = weltlinien[int(x[k]),int(y[k])] ^ True
# run simulation
for n in xrange(anzMarkovZeit-1):
print n
# Loop finden
x,y,l,k,update = loop2(anzZeitschritte, anzTeilchen, anzSpinup, termination, weltlinien, Jz, beta, gs, gd)
# Spinflip
if update and 0.5 > random.random():
for k in xrange(len(x)):
weltlinien[int(x[k]),int(y[k])] = weltlinien[int(x[k]),int(y[k])] ^ True
# Gewichter der Weltlinienkonfiguration berechnen
ns, nd, n0 = gewichter(weltlinien)
test_energy = -ns*gs_ratio - nd*gd_ratio
test_energy /= anzTeilchen
meanNs[n] = int(ns)
meanNd[n] = int(nd)
meanN0[n] = int(n0)
energy[n] = float(test_energy - Jz/4)
mean_ns = np.mean(meanNs[anzMarkovZeit/2:anzMarkovZeit])
mean_nd = np.mean(meanNd[anzMarkovZeit/2:anzMarkovZeit])
std_ns = np.std(meanNs[anzMarkovZeit/2:anzMarkovZeit])
std_nd = np.std(meanNd[anzMarkovZeit/2:anzMarkovZeit])
mean_E = -mean_ns*gs_ratio - mean_nd*gd_ratio
mean_E /= anzTeilchen
mean_E -= Jz/4
std_E = std_ns*gs_ratio + std_nd*gd_ratio
std_E /= anzTeilchen
print('E_mean', mean_E)
#print(np.mean(energy[1000::]) - Jz/4)
#print(t0 - time.time())
#auto = autocorr(energy)
#
#figAuto = plt.figure()
#plt.plot(auto)
#plt.ylabel('Autocorrelation')
#plt.xlabel('Markov Time')
figEnergy = plt.figure()
plt.plot(energy)
plt.ylabel('Energy')
plt.xlabel('Markov Time')
plt.title('Energy = '+str(mean_E)+' +- '+str(std_E))
figPlaketten = plt.figure()
plt.plot(meanN0, '-k', label='S1')
plt.plot(meanNs, '-b', label='S2')
plt.plot(meanNd, '-r', label='S3')
plt.ylabel('Anz Plaketten')
plt.xlabel('Markov Time')
plt.legend()
|
"""
Easy
https://leetcode.com/problems/valid-palindrome/
Given a string s, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases.
Example 1:
Input: s = "A man, a plan, a canal: Panama"
Output: true
Explanation: "amanaplanacanalpanama" is a palindrome.
Example 2:
Input: s = "race a car"
Output: false
Explanation: "raceacar" is not a palindrome
"""
import re
class Solution:
def isPalindrome(self, s: str) -> bool:
s = s.lower()
haRegex = re.compile(r'[a-z0-9]')
start = 0
end = len(s) - 1
while start <= end:
if not haRegex.search(s[start]):
start = start + 1
continue
if not haRegex.search(s[end]):
end = end - 1
continue
if s[start] == s[end]:
start = start + 1
end = end - 1
else:
return False
return True
|
import tkinter as tk
from PIL import Image, ImageTk
import pygame
root = tk.Tk()
root.title("Tic Tac Toe")
#####################################
pygame.mixer.init()
pygame.mixer.music.load(r"C:\Users\Sam\Documents\Python\Tic Tac Toe\soundtrack_tictactoe.mp3")
pygame.mixer.music.play(loops=100)
####################################
canvas = tk.Canvas(root, height=500, width=500, bg="blue")
canvas.grid(rowspan=7, columnspan=7)
#####################################
image_big = Image.open(r"C:\Users\Sam\Documents\Python\Tic Tac Toe\logo_tictactoe.png")
image= image_big.resize((400, 110))
Logo = ImageTk.PhotoImage(image)
logo = tk.Label(image=Logo)
logo.image = Logo
logo.place(x=50, y=15)
tk.Label(root, bg="blue", width=60, height=1).place(x=50, y=5)
tk.Label(root, bg="blue", width=60, height=1).place(x=50, y=125)
tk.Label(root, bg="blue", width=1, height=10).place(x=40, y=5)
tk.Label(root, bg="blue", width=1, height=10).place(x=450, y=5)
#####################################
#tk.Label(root, text="Tic Tac Toe", font = ("Bauhaus 93", 30), fg='white', bg="blue").grid(row=0, column=2, rowspan=3, columnspan=3)
#####################################
tk.Label(root, text="score o: 0", font = ("Bell MT", 12), bg="#f0f0f0", borderwidth=10, relief="sunken").grid(row=5, column=1, rowspan=6, columnspan=3)
tk.Label(root, text="ties: 0", font = ("Bell MT", 12), bg="#f0f0f0", borderwidth=10, relief="sunken").grid(row=5, column=2, rowspan=6, columnspan=3)
tk.Label(root, text="score x: 0", font = ("Bell MT", 12), bg="#f0f0f0", borderwidth=10, relief="sunken").grid(row=5, column=3, rowspan=6, columnspan=3)
#####################################
button1 = tk.Button(root, text=" ", font = ("Algerian", 10), height=3, width=6, borderwidth=5, command = lambda:button1_response())
button1.grid(row=1, column=1, rowspan=3, columnspan=3)
button2 = tk.Button(root, text=" ", font = ("Algerian", 10), height=3, width=6, borderwidth=5, command = lambda:button2_response())
button2.grid(row=1, column=2, rowspan=3, columnspan=3)
button3 = tk.Button(root, text=" ", font = ("Algerian", 10), height=3, width=6, borderwidth=5, command = lambda:button3_response())
button3.grid(row=1, column=3, rowspan=3, columnspan=3)
button4 = tk.Button(root, text=" ", font = ("Algerian", 10), height=3, width=6, borderwidth=5, command = lambda:button4_response())
button4.grid(row=2, column=1, rowspan=3, columnspan=3)
button5 = tk.Button(root, text=" ", font = ("Algerian", 10), height=3, width=6, borderwidth=5, command = lambda:button5_response())
button5.grid(row=2, column=2, rowspan=3, columnspan=3)
button6 = tk.Button(root, text=" ", font = ("Algerian", 10), height=3, width=6, borderwidth=5, command = lambda:button6_response())
button6.grid(row=2, column=3, rowspan=3, columnspan=3)
button7 = tk.Button(root, text=" ", font = ("Algerian", 10), height=3, width=6, borderwidth=5, command = lambda:button7_response())
button7.grid(row=3, column=1, rowspan=3, columnspan=3)
button8 = tk.Button(root, text=" ", font = ("Algerian", 10), height=3, width=6, borderwidth=5, command = lambda:button8_response())
button8.grid(row=3, column=2, rowspan=3, columnspan=3)
button9 = tk.Button(root, text=" ", font = ("Algerian", 10), height=3, width=6, borderwidth=5, command = lambda:button9_response())
button9.grid(row=3, column=3, rowspan=3, columnspan=3)
button_reset = tk.Button(root, text="reset", font=("Arial Black", 8), fg='white', borderwidth=10, bg="red", height=3, width=6, command = lambda:response_reset())
button_reset.grid(row=5, column=5, rowspan=6, columnspan=6)
#####################################
count = 0
score_x = 0
score_o = 0
score_gelijk = 0
game_over = 0
#####################################
def winner_x():
tk.Label(root, text="x wins", font = ("Berlin Sans FB", 15), width=10, fg='white', bg="blue").grid(row=4, column=2, rowspan=6, columnspan=3)
button1['state'] = tk.DISABLED
button2['state'] = tk.DISABLED
button3['state'] = tk.DISABLED
button4['state'] = tk.DISABLED
button5['state'] = tk.DISABLED
button6['state'] = tk.DISABLED
button7['state'] = tk.DISABLED
button8['state'] = tk.DISABLED
button9['state'] = tk.DISABLED
global score_x
score_x = score_x + 1
score_x_text = "score x: " + str(score_x)
tk.Label(root, text=score_x_text, font = ("Bell MT", 12), bg="#f0f0f0").grid(row=5, column=3, rowspan=6, columnspan=3)
global game_over
game_over = game_over + 1
def winner_o():
tk.Label(root, text="o wins", font = ("Berlin Sans FB", 15), width=10, bg="blue").grid(row=4, column=2, rowspan=6, columnspan=3)
button1['state'] = tk.DISABLED
button2['state'] = tk.DISABLED
button3['state'] = tk.DISABLED
button4['state'] = tk.DISABLED
button5['state'] = tk.DISABLED
button6['state'] = tk.DISABLED
button7['state'] = tk.DISABLED
button8['state'] = tk.DISABLED
button9['state'] = tk.DISABLED
global score_o
score_o = score_o + 1
score_o_text = "score o: " + str(score_o)
tk.Label(root, text=score_o_text, font = ("Bell MT", 12), bg="#f0f0f0").grid(row=5, column=1, rowspan=6, columnspan=3)
global game_over
game_over = game_over + 1
#####################################
def check_winner():
if button1['text'] == 'x' and button2['text'] == 'x' and button3['text'] == 'x':
winner_x()
elif button4['text'] == 'x' and button5['text'] == 'x' and button6['text'] == 'x':
winner_x()
elif button7['text'] == 'x' and button8['text'] == 'x' and button9['text'] == 'x':
winner_x()
elif button1['text'] == 'o' and button2['text'] == 'o' and button3['text'] == 'o':
winner_o()
elif button4['text'] == 'o' and button5['text'] == 'o' and button6['text'] == 'o':
winner_o()
elif button7['text'] == 'o' and button8['text'] == 'o' and button9['text'] == 'o':
winner_o()
elif button1['text'] == 'x' and button4['text'] == 'x' and button7['text'] == 'x':
winner_x()
elif button2['text'] == 'x' and button5['text'] == 'x' and button8['text'] == 'x':
winner_x()
elif button3['text'] == 'x' and button6['text'] == 'x' and button9['text'] == 'x':
winner_x()
elif button1['text'] == 'o' and button4['text'] == 'o' and button7['text'] == 'o':
winner_o()
elif button2['text'] == 'o' and button5['text'] == 'o' and button8['text'] == 'o':
winner_o()
elif button3['text'] == 'o' and button6['text'] == 'o' and button9['text'] == 'o':
winner_o()
elif button1['text'] == 'x' and button5['text'] == 'x' and button9['text'] == 'x':
winner_x()
elif button3['text'] == 'x' and button5['text'] == 'x' and button7['text'] == 'x':
winner_x()
elif button1['text'] == 'o' and button5['text'] == 'o' and button9['text'] == 'o':
winner_o()
elif button3['text'] == 'o' and button5['text'] == 'o' and button7['text'] == 'o':
winner_o()
elif count == 9:
tk.Label(root, text="tie", font = ("Berlin Sans FB", 15), width=10, fg="white", bg="blue").grid(row=4, column=2, rowspan=6, columnspan=3)
global score_gelijk
score_gelijk = score_gelijk + 1
score_gelijk_text = "ties: " + str(score_gelijk)
tk.Label(root, text=score_gelijk_text, font = ("Bell MT", 12), bg="#f0f0f0").grid(row=5, column=2, rowspan=6, columnspan=3)
button1['state'] = tk.DISABLED
button2['state'] = tk.DISABLED
button3['state'] = tk.DISABLED
button4['state'] = tk.DISABLED
button5['state'] = tk.DISABLED
button6['state'] = tk.DISABLED
button7['state'] = tk.DISABLED
button8['state'] = tk.DISABLED
button9['state'] = tk.DISABLED
global game_over
game_over = game_over + 1
#####################################
def response1():
global count
count = count + 1
if button1['text'] == ' ':
if count % 2 == 0:
button1['text'] = 'x'
else:
button1['text'] = 'o'
else:
count = count - 1
check_winner()
#####################################
def response2():
global count
count = count + 1
if button2['text'] == ' ':
if count % 2 == 0:
button2['text'] = 'x'
else:
button2['text'] = 'o'
else:
count = count - 1
check_winner()
#####################################
def response3():
global count
count = count + 1
if button3['text'] == ' ':
if count % 2 == 0:
button3['text'] = 'x'
else:
button3['text'] = 'o'
else:
count = count - 1
check_winner()
#####################################
def response4():
global count
count = count + 1
if button4['text'] == ' ':
if count % 2 == 0:
button4['text'] = 'x'
else:
button4['text'] = 'o'
else:
count = count - 1
check_winner()
#####################################
def response5():
global count
count = count + 1
if button5['text'] == ' ':
if count % 2 == 0:
button5['text'] = 'x'
else:
button5['text'] = 'o'
else:
count = count - 1
check_winner()
#####################################
def response6():
global count
count = count + 1
if button6['text'] == ' ':
if count % 2 == 0:
button6['text'] = 'x'
else:
button6['text'] = 'o'
else:
count = count - 1
check_winner()
#####################################
def response7():
global count
count = count + 1
if button7['text'] == ' ':
if count % 2 == 0:
button7['text'] = 'x'
else:
button7['text'] = 'o'
else:
count = count - 1
check_winner()
#####################################
def response8():
global count
count = count + 1
if button8['text'] == ' ':
if count % 2 == 0:
button8['text'] = 'x'
else:
button8['text'] = 'o'
else:
count = count - 1
check_winner()
#####################################
def response9():
global count
count = count + 1
if button9['text'] == ' ':
if count % 2 == 0:
button9['text'] = 'x'
else:
button9['text'] = 'o'
else:
count = count - 1
check_winner()
#####################################
def response_reset():
button1['text'] = " "
button2['text'] = " "
button3['text'] = " "
button4['text'] = " "
button5['text'] = " "
button6['text'] = " "
button7['text'] = " "
button8['text'] = " "
button9['text'] = " "
tk.Label(root, width=30, bg="blue").grid(row=4, column=2, rowspan=6, columnspan=3)
global count
count = 0
button1['state'] = tk.NORMAL
button2['state'] = tk.NORMAL
button3['state'] = tk.NORMAL
button4['state'] = tk.NORMAL
button5['state'] = tk.NORMAL
button6['state'] = tk.NORMAL
button7['state'] = tk.NORMAL
button8['state'] = tk.NORMAL
button9['state'] = tk.NORMAL
global game_over
if game_over == 1:
game_over = game_over - 1
#########################################
def AI_turn():
#tenzij game over
if game_over == 0:
#horizontaal
if button1['text'] == 'x' and button2['text'] == 'x' and button3['text'] == ' ':
response3()
elif button1['text'] == 'x' and button3['text'] == 'x' and button2['text'] == ' ':
response2()
elif button2['text'] == 'x' and button3['text'] == 'x' and button1['text'] == ' ':
response1()
elif button4['text'] == 'x' and button5['text'] == 'x' and button6['text'] == ' ':
response6()
elif button4['text'] == 'x' and button6['text'] == 'x' and button5['text'] == ' ':
response5()
elif button5['text'] == 'x' and button6['text'] == 'x' and button4['text'] == ' ':
response4()
elif button7['text'] == 'x' and button8['text'] == 'x' and button9['text'] == ' ':
response9()
elif button7['text'] == 'x' and button9['text'] == 'x' and button8['text'] == ' ':
response8()
elif button8['text'] == 'x' and button9['text'] == 'x' and button7['text'] == ' ':
response7()
#verticaal
elif button1['text'] == 'x' and button4['text'] == 'x' and button7['text'] == ' ':
response7()
elif button1['text'] == 'x' and button7['text'] == 'x' and button4['text'] == ' ':
response4()
elif button4['text'] == 'x' and button7['text'] == 'x' and button1['text'] == ' ':
response1()
elif button2['text'] == 'x' and button5['text'] == 'x' and button8['text'] == ' ':
response8()
elif button2['text'] == 'x' and button8['text'] == 'x' and button5['text'] == ' ':
response5()
elif button5['text'] == 'x' and button8['text'] == 'x' and button2['text'] == ' ':
response2()
elif button3['text'] == 'x' and button6['text'] == 'x' and button9['text'] == ' ':
response9()
elif button3['text'] == 'x' and button9['text'] == 'x' and button6['text'] == ' ':
response6()
elif button6['text'] == 'x' and button9['text'] == 'x' and button3['text'] == ' ':
response3()
#diagonaal
elif button1['text'] == 'x' and button5['text'] == 'x' and button9['text'] == ' ':
response9()
elif button1['text'] == 'x' and button9['text'] == 'x' and button5['text'] == ' ':
response5()
elif button5['text'] == 'x' and button9['text'] == 'x' and button1['text'] == ' ':
response1()
elif button3['text'] == 'x' and button5['text'] == 'x' and button7['text'] == ' ':
response7()
elif button3['text'] == 'x' and button7['text'] == 'x' and button5['text'] == ' ':
response5()
elif button5['text'] == 'x' and button7['text'] == 'x' and button3['text'] == ' ':
response3()
#verticaal
elif button1['text'] == 'o' and button2['text'] == 'o' and button3['text'] == ' ':
response3()
elif button1['text'] == 'o' and button3['text'] == 'o' and button2['text'] == ' ':
response2()
elif button2['text'] == 'o' and button3['text'] == 'o' and button1['text'] == ' ':
response1()
elif button4['text'] == 'o' and button5['text'] == 'o' and button6['text'] == ' ':
response6()
elif button4['text'] == 'o' and button6['text'] == 'o' and button5['text'] == ' ':
response5()
elif button5['text'] == 'o' and button6['text'] == 'o' and button4['text'] == ' ':
response4()
elif button7['text'] == 'o' and button8['text'] == 'o' and button9['text'] == ' ':
response9()
elif button7['text'] == 'o' and button9['text'] == 'o' and button8['text'] == ' ':
response8()
elif button8['text'] == 'o' and button9['text'] == 'o' and button7['text'] == ' ':
response7()
#verticaal
elif button1['text'] == 'o' and button4['text'] == 'o' and button7['text'] == ' ':
response7()
elif button1['text'] == 'o' and button7['text'] == 'o' and button4['text'] == ' ':
response4()
elif button4['text'] == 'o' and button7['text'] == 'o' and button1['text'] == ' ':
response1()
elif button2['text'] == 'o' and button5['text'] == 'o' and button8['text'] == ' ':
response8()
elif button2['text'] == 'o' and button8['text'] == 'o' and button5['text'] == ' ':
response5()
elif button5['text'] == 'o' and button8['text'] == 'o' and button2['text'] == ' ':
response2()
elif button3['text'] == 'o' and button6['text'] == 'o' and button9['text'] == ' ':
response9()
elif button3['text'] == 'o' and button9['text'] == 'o' and button6['text'] == ' ':
response6()
elif button6['text'] == 'o' and button9['text'] == 'o' and button3['text'] == ' ':
response3()
#diagonaal
elif button1['text'] == 'o' and button5['text'] == 'o' and button9['text'] == ' ':
response9()
elif button1['text'] == 'o' and button9['text'] == 'o' and button5['text'] == ' ':
response5()
elif button5['text'] == 'o' and button9['text'] == 'o' and button1['text'] == ' ':
response1()
elif button3['text'] == 'o' and button5['text'] == 'o' and button7['text'] == ' ':
response7()
elif button3['text'] == 'o' and button7['text'] == 'o' and button5['text'] == ' ':
response5()
elif button5['text'] == 'o' and button7['text'] == 'o' and button3['text'] == ' ':
response3()
###############################
#if middle middle is free then there
elif button5['text'] == ' ':
response5()
#if bottom right is no good option then bottom left scenario 1
elif button1['text'] == 'o' and button6['text'] == 'o' and button7['text'] == ' ':
response7()
#if bottom right is no good option then bottom left scenario 2
elif button1['text'] == 'o' and button8['text'] == 'o' and button7['text'] == ' ':
response7()
#if bottom right is free then there
elif button9['text'] == ' ':
response9()
#if bottom left is free then there
elif button7['text'] == ' ':
response7()
#pick whats left
elif button1['text'] == ' ':
response1()
elif button2['text'] == ' ':
response2()
elif button3['text'] == ' ':
response3()
elif button4['text'] == ' ':
response4()
elif button6['text'] == ' ':
response6()
elif button8['text'] == ' ':
response8()
#################################
def button1_response():
if button1['text'] == ' ':
response1()
AI_turn()
def button2_response():
if button2['text'] == ' ':
response2()
AI_turn()
def button3_response():
if button3['text'] == ' ':
response3()
AI_turn()
def button4_response():
if button4['text'] == ' ':
response4()
AI_turn()
def button5_response():
if button5['text'] == ' ':
response5()
AI_turn()
def button6_response():
if button6['text'] == ' ':
response6()
AI_turn()
def button7_response():
if button7['text'] == ' ':
response7()
AI_turn()
def button8_response():
if button8['text'] == ' ':
response8()
AI_turn()
def button9_response():
if button9['text'] == ' ':
response9()
AI_turn()
#####################################
root.mainloop()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-01-29 00:33:53
# @Author : Your Name (you@example.org)
# @Link : http://example.org
# @Version : $Id$
import scrapy,pytesseract,json
from scrapy import FormRequest,Request
from PIL import Image
from io import BytesIO
from scrapy.log import logger
class VerificationCodeSpider(scrapy.Spider):
name = 'VerificationCode'
allowed_domains = ['']
start_urls = ['']
def parse(self,response):
pass
login_url = '登录页面链接'
user = '用户名'
password = '密码'
def start_requests(self):
yield Request(self.login_url,callback=self.parse_login,dont_filter=True)
def parse_login(self,response):#此方法既要完成登录有下载验证码图片
login_response = response.meta.get('login_response')
#若response.meta[login_response]存在,则当前为验证码图片响应,否则为登录页面响应
if not login_response: #登录页响应,提取验证码图片的url
VC_url = response.css('label.field.prepend-icon img::attr(src)').extract_first()
VC_url = response.urljoin(VC_url)
yield Request(VC_url,
callback=self.parse_login,
meta = {'login_response':response},
dont_filter = True)
else: #此时为验证码图片的响应
fmdata = {
'email':self.user,
'password': self.password,
'code':self.parse_VC_by_ocr(response.body) # response.body是图片的二进制数据
}
yield FormRequest.from_response(login_response,
callback=self.parse_login_ed,
formdata = fmdata,
dont_filter = True)
#**********************************tesseract-ocr识别*******************************
def parse_VC_by_ocr(self,data):
img = Image.open(BytesIO(data))
#data指response.body是二进制数据,为了构造Image对象,需要传入类文件对象(BytesIO)
img = img.convert(mode='L')
imgtext = pytesseract.image_to_string(img)
img.close()
return imgtext
#**********************************网络平台识别************************************
def parse_VC_by_net(self,data):
import requests,base64
url = 'http://ali-checkcode.showapi.com/checkcode'
appcode = 'f94u2k5h5249850298450' #平台发放的用于识别身份
form ={}
form['convert_to_jpg'] = '0' #不转换为jpg
form['img_base64'] = base64.b64encode(data) #对图片进行base64编码
form['typeId'] = '4070' #验证码类型,4070代表7位汉字
headers = {'Athorization': 'APPCODE' + appcode}
response = requests.post(url,headers=headers,data=form)
res = response.json()
if res['showapi_res_code'] == 0:
return res['showapi_res_body']['Result']
return ''
#**********************************自己识别***************************************
def parse_VC_by_myself(self,data):
img = Image.open(BytesIO(data))
img.show()
imgtext = input('输入验证码:')
img.close()
return imgtext
#--------------------------------------------------------------------------------
def parse_login_ed(self,response):
#需要判断是否登录成功,不成功则重新登录
info = json.loads(response.text) #form请求的正文是json串,包含了用户验证的结果,转为python字典后根据error字段判断
if info['error'] == '0':
logger.info('登录成功:-)')
return super().start_requests()
else:
logger.info('登录失败:-( 重新登录...')
return self.start_requests()
|
from logging import warning
from api import gitlab
from utilities import types, validate
gitlab = gitlab.GitLab(types.Arguments().url)
def get_all(project_id, project_url):
merge_requests = []
details = gitlab.get_merge_requests(project_id)
if validate.api_result(details):
warning("[*] Found %s merge requests for project %s", len(details), project_url)
for item in details:
merge_requests.append(types.MergeRequest(item['iid'], item['web_url'], item['title']))
return merge_requests
def sniff_secrets(mr):
monitor = types.SecretsMonitor()
return monitor.sniff_secrets({mr.web_url: mr.description})
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
import django
django.setup()
from some.models import DeviceData
import socket
def saveData(data):
data = data.split(', ')
d = DeviceData()
d.step = DeviceData.objects.count() + 1
d.light = data[0]
d.temperature = data[1]
d.save()
def connect():
sock = socket.socket()
sock.bind(('', 9090))
while True:
sock.listen(1)
conn, addr = sock.accept()
data = conn.recv(1024)
if data:
saveData(data)
conn.send(data.upper())
connect()
|
'''
Created on Nov 19, 2010
@author: Jason Huang
'''
from google.appengine.ext import db
class Marker(db.Model):
'''
classdocs
'''
type = db.StringProperty( choices=('start', 'dest', 'waypoint', 'normal'),required=True)
latitude = db.FloatProperty(required=True)
longitude = db.FloatProperty(required=True)
description = db.TextProperty()
|
from flask import Flask, render_template
from plot import make_plot
app = Flask(__name__)
@app.route("/")
def render_plot():
return render_template("plotly.html", plot_json=make_plot())
if __name__ == "__main__":
app.run(debug=True)
|
import copy
import os
import pickle
import warnings
import numpy as np
import scipy.stats as st
import pandas as pd
import xgboost as xgb
from ensemble.core import EnsembleBaseModel
from ensemble.modelCV import SVRCV, XGBRCV
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import LogisticRegression, RidgeCV
from sklearn.model_selection import train_test_split
warnings.filterwarnings('ignore')
class EnsembleRidge(EnsembleBaseModel):
"""Ridge with Random Patches and Random Subspaces
"""
def __init__(self, n_models, col_ratio, row_ratio, scale):
super().__init__(n_models, col_ratio, row_ratio, scale)
def get_model(self):
return RidgeCV()
class EnsembleRFR(EnsembleBaseModel):
"""Ridge with Random Patches and Random Subspaces
"""
def __init__(self, n_models, col_ratio, row_ratio, scale):
super().__init__(n_models, col_ratio, row_ratio, scale)
def get_model(self):
n_estimators = np.random.randint(80, 120)
max_depth = np.random.randint(5, 20)
return RandomForestRegressor(n_estimators=n_estimators,
max_depth=max_depth)
class EnsembleRFC(EnsembleBaseModel):
"""Ridge with Random Patches and Random Subspaces
"""
def __init__(self, n_models, col_ratio, row_ratio, scale):
super().__init__(n_models, col_ratio, row_ratio, scale)
def get_model(self):
n_estimators = np.random.randint(80, 120)
max_depth = np.random.randint(5, 20)
return RandomForestClassifier(n_estimators=n_estimators,
max_depth=max_depth)
class EnsembleGBRCV(EnsembleBaseModel):
"""Ridge with Random Patches and Random Subspaces
"""
def __init__(self, n_models, col_ratio, row_ratio, scale):
super().__init__(n_models, col_ratio, row_ratio, scale)
def get_model(self):
return XGBRCV(n_trials=100)
class EnsembleGBR(EnsembleBaseModel):
"""Ridge with Random Patches and Random Subspaces
"""
def __init__(self, n_models, col_ratio, row_ratio, scale):
super().__init__(n_models, col_ratio, row_ratio, scale)
def get_model(self):
n_estimators = np.random.randint(80, 120)
max_depth = np.random.randint(5, 20)
return xgb.XGBRegressor(n_estimators=n_estimators,
max_depth=max_depth, silent=1)
class EnsembleSVR(EnsembleBaseModel):
"""Ridge with Random Patches and Random Subspaces
"""
def __init__(self, n_models, col_ratio, row_ratio, scale):
super().__init__(n_models, col_ratio, row_ratio, scale)
def get_model(self):
return SVRCV(n_trials=100)
class EnsembleLogit(EnsembleBaseModel):
"""Ridge with Random Patches and Random Subspaces
"""
def __init__(self, n_models, col_ratio, row_ratio, scale):
super().__init__(n_models, col_ratio, row_ratio, scale)
def get_model(self):
return LogisticRegression()
|
import math
import numpy as np
class DOF3:
def __init__(self, person, ax):
self.ax = ax
self.person = person
self.head_shift = 5
self.isHidden = True
self.prev_dot_head = None
if ax is not None:
self.center_line, = self.ax.plot(-1000, -1000, color='r', linewidth=2.0)
self.left_line, = self.ax.plot(-1000, -1000, color='r', linewidth=2.0)
self.right_line, = self.ax.plot(-1000, -1000, color='r', linewidth=2.0)
self.mid_dot, = self.ax.plot(-1000, -1000, '.', color='c', linewidth=2.0)
self.head_dot, = self.ax.plot(-1000, -1000, '.', color='c', linewidth=2.0)
self.left_leg_dot, = self.ax.plot(-1000, -1000, '.', color='c', linewidth=2.0)
self.right_leg_dot, = self.ax.plot(-1000, -1000, '.', color='c', linewidth=2.0)
def hide(self):
if not self.isHidden:
self.mid_dot.set_data(-1000, -1000)
self.head_dot.set_data(-1000, -1000)
self.left_leg_dot.set_data(-1000, -1000)
self.right_leg_dot.set_data(-1000, -1000)
self.center_line.set_data(-1000, -1000)
self.left_line.set_data(-1000, -1000)
self.right_line.set_data(-1000, -1000)
self.isHidden = True
def angles(self, frame):
center_x, center_y = self.center_calc(frame)
head_x, head_y = self.head_calc(frame)
left_leg_x, left_leg_y = self.left_leg(frame)
right_leg_x, right_leg_y = self.right_leg(frame)
# angles
angles = []
# center - head
body_angle = self.calc_angle(center_x, center_y, head_x, head_y)
angles.append(body_angle)
# center - left_leg
left_leg_angle = self.calc_angle(center_x, center_y, left_leg_x, left_leg_y)
angles.append(left_leg_angle)
# center - right_leg
right_leg_angle = self.calc_angle(center_x, center_y, right_leg_x, right_leg_y)
angles.append(right_leg_angle)
return angles
def show(self, frame):
# dots
self.isHidden = False
center_x, center_y = self.center_calc(frame)
self.mid_dot.set_data(center_x, center_y)
head_x, head_y = self.head_calc(frame)
self.head_dot.set_data(head_x, head_y)
left_leg_x, left_leg_y = self.left_leg(frame)
self.left_leg_dot.set_data(left_leg_x, left_leg_y)
right_leg_x, right_leg_y = self.right_leg(frame)
self.right_leg_dot.set_data(right_leg_x, right_leg_y)
# lines
self.center_line.set_data([head_x, center_x], [head_y, center_y])
self.left_line.set_data([left_leg_x, center_x], [left_leg_y, center_y])
self.right_line.set_data([right_leg_x, center_x], [right_leg_y, center_y])
# angles
angles = []
titles = []
# center - head
body_angle = self.calc_angle(center_x, center_y, head_x, head_y)
angles.append(body_angle)
titles.append("Тело: ")
# center - left_leg
left_leg_angle = self.calc_angle(center_x, center_y, left_leg_x, left_leg_y)
angles.append(left_leg_angle)
titles.append("Левая нога: ")
# center - right_leg
right_leg_angle = self.calc_angle(center_x, center_y, right_leg_x, right_leg_y)
angles.append(right_leg_angle)
titles.append("Правая нога: ")
return angles, titles
def calc_angle(self, part_from_x, part_from_y, part_to_x, part_to_y):
straight_line = [abs(part_from_y - part_to_y), 0]
cur_line = [part_to_x - part_from_x, part_to_y - part_from_y]
if (part_to_y - part_from_y) < 0:
return 360 - angle(straight_line, cur_line)
else:
return angle(straight_line, cur_line)
def center_calc(self, frame):
height = self.person.areas[frame] / self.person.widths[frame]
mid_y = int(height * 0.53)
line = self.person.figures[frame][mid_y]
count_ones = 0
sum_ones = 0
for i in range(len(line)):
if line[i] == 1:
sum_ones += i
count_ones += 1
mid_x = round(sum_ones / count_ones)
return mid_x, mid_y
def head_calc(self, frame):
head_line = self.person.figures[frame][self.head_shift]
height = self.person.areas[frame] / self.person.widths[frame]
count_ones = 0
sum_ones = 0
for i in range(len(head_line)):
if head_line[i] == 1:
sum_ones += i
count_ones += 1
if count_ones == 0:
if self.prev_dot_head is not None:
head_center = self.head_dot[0].get_xdata()
else:
head_center = int(len(head_line) / 2)
else:
head_center = round(sum_ones / count_ones)
self.prev_dot_head = [head_center, height]
return head_center, height
def left_leg(self, frame):
figure = self.person.figures[frame]
height = int((self.person.areas[frame] / self.person.widths[frame]) / 4)
width = int(self.person.widths[frame] / 2)
dots_x = []
dot_y = -1
for i in reversed(range(len(figure) - height, len(figure))):
for j in range(width):
if figure[i][j] == 1:
dot_y = len(figure) - i
dots_x.append(j)
if len(dots_x) > 0:
dot_x = np.mean(dots_x)
return dot_x, dot_y
return width / 2, 0
def right_leg(self, frame):
figure = self.person.figures[frame]
height = int((self.person.areas[frame] / self.person.widths[frame]) / 4)
width = int(self.person.widths[frame] / 2)
dots_x = []
dot_y = -1
for i in reversed(range(len(figure) - height, len(figure))):
for j in reversed(range(width, len(figure[i]))):
if figure[i][j] == 1:
dot_y = len(figure) - i
dots_x.append(j)
if len(dots_x) > 0:
dot_x = np.mean(dots_x)
return dot_x, dot_y
return 3 * width / 2, 0
def angle(v1, v2):
def dotproduct(v1, v2):
return sum((a * b) for a, b in zip(v1, v2))
def length(v):
return math.sqrt(dotproduct(v, v))
return math.degrees(math.acos(dotproduct(v1, v2) / (length(v1) * length(v2))))
|
from bs4 import BeautifulSoup
import requests
import re
import pandas as pd
with open("all_speakers.htm", 'rb') as f:
lines = f.readlines()
soup = BeautifulSoup("".join(lines), 'html.parser')
out = []
for div in soup.find_all(class_='lumen-tile__title'):
out.append((div.text.strip(), div.a['href']))
df = pd.DataFrame(out, columns=['name', 'url'])
def parse(s):
p = re.compile('\([0-9]+\)')
try: m = p.search(s).group(0)[1:-1]
except Exception: m = 1
return m
df['count'] = df.name.apply(parse).astype(int)
df['name_trimmed'] = df.name.apply(lambda s : s[:s.find('(')].strip())
df = df.sort_values('count', ascending=False)
df = df.drop_duplicates('name_trimmed')
df.head(10).to_pickle("top10.pdpkl")
|
import discord
class CustomClient(discord.Client):
async def on_ready(self):
print(f'{self.user} has connected to Discord!')
|
from keras.optimizers import *
from dataset import Dataset
from metrics import iou_metric_all, iou_metric_fronts, iou_metric_hot, iou_metric_cold, \
iou_metric_stationary, iou_metric_occlusion, mixed_loss_gen
from deeplabv3plus import Deeplabv3
from utils import load_indexing, class_weights, trained_models
from config import Config
config = Config(
# model = FPN(backbone_name="resnet34", input_shape=(None, None, 5), classes=5, encoder_weights=None)
# model = Unet(backbone_name="resnet34", encoder_weights=None, input_shape=(256, 256, 5))
# model = Linknet(backbone_name="resnet18", input_shape=(256, 256, 5), classes=5, encoder_weights=None, activation="softmax")
# model = PSPNet50(input_shape=(256, 256, 5), n_labels=5)
# model=Deeplabv3(weights=None, input_shape=(256, 256, 5), classes=5),
model=trained_models['deeplab'],
optimizer=Adam(lr=5e-4),
logdir="/mnt/ldm_vol_DESKTOP-DSIGH25-Dg0_Volume1/DiplomLogs/logs_weighted/deeplab_mixed_loss/",
class_weights=class_weights,
in_shape=(256, 256),
n_classes=5,
varnames=["air", "mslet", "shum", "uwnd", "vwnd"],
filename="/mnt/d4dca524-e11f-4923-8fbe-6066e6efd2fd/NARR/narr.nc",
truth_filename="/mnt/d4dca524-e11f-4923-8fbe-6066e6efd2fd/NARR/plotted_fronts_fat.nc",
batch_size=16,
binary=True,
regularizers=None,
augment=False,
metrics=[
iou_metric_all,
iou_metric_fronts,
iou_metric_hot,
iou_metric_cold,
iou_metric_stationary,
iou_metric_occlusion,
# iou_metric_binary
],
# loss=weighted_categorical_crossentropy(Config.class_weights),
# loss=weighted_jaccard_loss,
loss=mixed_loss_gen(class_weights),
recompile=True
)
train, val, test = load_indexing("indexing.npz")
with Dataset(train, config) as train_dataset, \
Dataset(val, config) as val_dataset, \
Dataset(test, config) as test_dataset:
print(config.model.keras_model.metrics_names, config.model.evaluate(test_dataset))
|
# -*- coding: utf-8 -*-
import yaml
from django.http import HttpResponse
from django.views import View
class GetInventoryView(View):
def get(self, request, *args, **kwargs):
node_list = inventory(kwargs.get('master_id'))
result = {}
for node_name, node in node_list.items():
service_class = []
role_class = []
result[node_name] = {
'roles': role_class,
'services': service_class,
}
return HttpResponse(yaml.dump(result))
class GetHostDataView(View):
def get(self, request, *args, **kwargs):
node_list = inventory(kwargs.get('master_id'))
node = node_list[kwargs.get('minion_id')]
node.pop('__reclass__')
if '_param' in node.get('parameters'):
node.get('parameters').pop('_param')
return HttpResponse(yaml.dump(node.get('parameters')))
|
#cloud-config
packages:
- python
- python-pip
- aws-cli
- unzip
- wget
write_files:
- path: /tmp/tempcloudwatch/config.json
content: |
{
"metrics": {
"append_dimensions":{
"InstanceId":"${aws:InstanceId}"
},
"aggregation_dimensions": [
["InstanceId"],
["AutoScalingGroupName"],
],
"metrics_collected": {
"mem": {
"measurement": [
"mem_used_percent"
],
"metrics_collection_interval": 60
}
}
}
}
- path: /usr/bin/rename_instance.py
content: |
#!/usr/bin/python
import boto.ec2
import boto3
import requests
import random
import time
import subprocess
import json
import os
def create_record(name, record_type, target, zone, client):
client.change_resource_record_sets(
HostedZoneId=zone,
ChangeBatch={
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': name,
'Type': record_type,
'TTL': 60,
'ResourceRecords': [
{
'Value': target
},
]
}
},
]
}
)
time.sleep(random.randrange(5, 30))
instance_data = json.loads((requests.get("http://169.254.169.254/latest/dynamic/instance-identity/document")).content)
conn = boto.ec2.connect_to_region(instance_data['region'])
r53 = boto3.client('route53', instance_data['region'])
priv_ip = instance_data['privateIp']
currentReservation = conn.get_all_instances(instance_ids=instance_data['instanceId'])
currentInstance = [i for r in currentReservation for i in r.instances]
for inst in currentInstance:
instApp = inst.tags['Application']
instType = inst.tags['csNomadClass']
instEnvironment = inst.tags['Environment']
count = 0
instances = {}
allReservations = conn.get_all_instances()
for res in allReservations:
for inst in res.instances:
if 'csNomadClass' in inst.tags:
if inst.tags['csNomadClass'] == instType and \
inst.tags['Environment'] == instEnvironment and \
inst.state == 'running':
instances[inst.id] = inst.tags['Name']
count += 1
for x in range(1, count + 1):
name = instEnvironment + "-" + instApp + "-" + instType + "-" + str(x)
if name not in instances.values():
print "renaming -%s to %s" % (currentInstance, name)
break
for inst in currentInstance:
inst.add_tag('Name', name)
poutput = subprocess.check_output('hostname ' + name + '.cs.int', shell=True)
sed_cmd = ("sed -i 's/HOSTNAME=.*/HOSTNAME={}.cs.int/' "
"/etc/sysconfig/network").format(name)
poutput = subprocess.check_output(sed_cmd, shell=True)
create_record('{}.cs.int'.format(name), "A", priv_ip, "Z1CTUH3DX45339", r53)
create_record('{}.cs.int'.format(currentInstance[0]), "A", priv_ip, "Z1CTUH3DX45339", r53)
create_record("{}.{}.30.172.in-addr.arpa.".format(priv_ip.split(".")[3], priv_ip.split(".")[2]),
"PTR", '{}.cs.int'.format(name), "Z31KWMFMFZK6YQ", r53)
- path: /usr/bin/allocate_eip.py
content: |
#!/usr/bin/python
import requests
import boto3
import json
instance_data = requests.get("http://169.254.169.254/latest/dynamic/instance-identity/document")
response_json = instance_data.json()
region = response_json.get('region')
instance_id = response_json.get('instanceId')
ec2 = boto3.client('ec2', region_name=region)
try:
allocate_eip = ec2.associate_address(AllocationId='eipalloc-0f7526dce6e0f1db8', InstanceId=instance_id)
except:
print("Associate IP failed")
try:
create_tag = ec2.create_tags(Resources=[instance_id], Tags=[{'Key':'ElasticIp', 'Value':'eipalloc-0f7526dce6e0f1db8'}])
except:
print("Create tag failed")
runcmd:
- [ pip, install, boto3 ]
- [ python, /usr/bin/rename_instance.py ]
- [ sleep, 15 ]
- [ python, /usr/bin/allocate_eip.py ]
- [ service, sensu-client, stop ]
- [ salt-call, saltutil.sync_all ]
- [ salt-call, write_ec2tags.write_to_disk ]
- [ salt-call, write_ec2tags.write_minion_id ]
- [ salt-call, saltutil.revoke_auth ]
- [ service, salt-minion, stop ]
- [ rm, -rf, /etc/salt/pki/minion ]
- [ rm, -rf, /opt/consul/data ]
- [ rm, -rf, /var/lib/nomad/tmp/client ]
- [ mv, /etc/salt/minion_id.tmp, /etc/salt/minion_id ]
- [ cd, /tmp/tempcloudwatch ]
- [ wget, "https://s3.amazonaws.com/amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip" ]
- [ unzip, AmazonCloudWatchAgent.zip ]
- [ sudo, ./install.sh ]
- [ sudo, /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl, -a, fetch-config, -m, ec2, -c,"file:config.json", -s ]
- [ service, salt-minion, start ]
- [ sleep, 10 ]
- [ salt-call, state.highstate ]
|
ID_COLS = ['CountryName',
'RegionName',
'Date']
#INDICES = ['ConfirmedCases']
INDICES = []
# Which IPs to choose?
MY_IPS = ['C1_School closing',
'C2_Workplace closing',
'C3_Cancel public events',
'C4_Restrictions on gatherings',
'C5_Close public transport',
'C6_Stay at home requirements',
'C7_Restrictions on internal movement',
'C8_International travel controls',
'H1_Public information campaigns',
'H2_Testing policy',
'H3_Contact tracing',
'H6_Facial Coverings']
|
# -*- coding: utf-8 -*-
"""
Define classifier properties and operations
@author: peter
"""
#import time
class Weak_Classifier(object):
def __init__(self, haar, images, weights):
self.feature = haar
self.images = images
self.weights = weights
self.polarity, self.threshold, self.feature_vals = self.get_best_threshold()
self.predictions, self.error = self.predict()
self.beta = self.error / ( 1 - self.error )
# Train weak classifiers with generated features and define best threshold
# for each classifiers
def get_best_threshold(self):
# Record feature values from input images
haar_vals, feature_vals = [], []
# Get sum of weigths of positive / negative samples
total_pos, total_neg = 0.0, 0.0
for img, weight in zip( self.images, self.weights ):
haar_vals.append ( (img.isPositive , weight, self.feature.get_feature_val( img.integral ) ) )
feature_vals.append( self.feature.get_feature_val( img.integral ) )
if img.isPositive: total_pos += weight
else: total_neg += weight
# sort haar features by value
haar_vals = sorted( haar_vals, key = lambda x: x[2] )
# Loop over threshold values and get the one with smallest error
smallest_error, best_threshold = float('inf') , 0.0
# Get sum of positive / negative sample weights
sum_pos, sum_neg, polarity = 0.0, 0.0, 1
for isPositive, weight, threshold in haar_vals:
# minimum_error = min((S+) + (T-) - (S-), (S-) + (T+) - (S+))
if isPositive:
sum_pos += weight
this_error = sum_pos + total_neg - sum_neg
if this_error < smallest_error:
smallest_error = this_error
best_threshold = threshold
polarity = 1
else:
sum_neg += weight
this_error = sum_neg + total_pos - sum_pos
if this_error < smallest_error:
smallest_error = this_error
best_threshold = threshold
polarity = -1
return polarity, best_threshold, feature_vals
# Return a boolean whether the sample being classified correctly by this classifier
def isHit(self, feature_val ):
return self.polarity * feature_val < self.polarity * self.threshold
# Predict over samples and get accumulation of error ( 0.0 <= err <= 1.0 ) of this classifier
def predict(self):
predictions, sum_error = [], 0.0
for feature_val, weight in zip(self.feature_vals, self.weights):
# While current sample image predicted successfully
if self.isHit(feature_val):
predictions.append(True)
# While not, increment current sample weight
else:
predictions.append(False)
sum_error += weight
# Test
#print(sum_error)
#time.sleep(0.01)
# Test
return predictions, sum_error
# Return updated weighted for the next training iteration
def get_updated_weights(self):
new_weights = []
for predict, weight in zip( self.predictions, self.weights ):
# weight * beta if classified correctly
if predict:
new_weights.append( weight * self.beta )
# keep weight value if not
else:
new_weights.append( weight )
return new_weights
'''
class Strong_Classifier(object):
def __init__(self, cascade):
self.weak_classifiers = cascade
'''
|
#3 4 6
#5 1 2 3 4
def fun(arr,k):
n = len(arr)
for i in range(n):
maxUntilNow = arr[i]
if(i<n-k):
for j in range(i+1,i+k+1):
if(arr[j]>arr[i]):
maxUntilNow = arr[j]
if(maxUntilNow == arr[i]):
return arr[i]
else:
for j in range(i,n):
if(arr[j]>maxUntilNow):
maxUntilNow = arr[j]
return maxUntilNow
arr = [2,1,5,4,3,10]
arr1 = [3,1,2]
print(fun(arr1,2))
|
"""Training methods for rhasspyfuzzywuzzy"""
import logging
import typing
from collections import defaultdict
import networkx as nx
import rapidfuzz.utils as fuzz_utils
import rhasspynlu
from .const import ExamplesType
_LOGGER = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
def train(intent_graph: nx.DiGraph) -> ExamplesType:
"""Generate examples from intent graph."""
# Generate all possible intents
_LOGGER.debug("Generating examples")
examples: ExamplesType = defaultdict(dict)
for intent_name, words, path in generate_examples(intent_graph):
sentence = fuzz_utils.default_process(" ".join(words))
examples[intent_name][sentence] = path
_LOGGER.debug("Examples generated")
return examples
# -----------------------------------------------------------------------------
def generate_examples(
intent_graph: nx.DiGraph,
) -> typing.Iterable[typing.Tuple[str, typing.List[str], typing.List[int]]]:
"""Generate all possible sentences/paths from an intent graph."""
n_data = intent_graph.nodes(data=True)
# Get start/end nodes for graph
start_node, end_node = rhasspynlu.jsgf_graph.get_start_end_nodes(intent_graph)
assert (start_node is not None) and (
end_node is not None
), "Missing start/end node(s)"
# Generate all sentences/paths
paths = nx.all_simple_paths(intent_graph, start_node, end_node)
for path in paths:
assert len(path) > 2
# First edge has intent name (__label__INTENT)
olabel = intent_graph.edges[(path[0], path[1])]["olabel"]
assert olabel.startswith("__label__")
intent_name = olabel[9:]
sentence = []
for node in path:
word = n_data[node].get("word")
if word:
sentence.append(word)
yield (intent_name, sentence, path)
|
class MovieRepository:
def alreadyNotified(self):
raise NotImplementedError("You're calling an abstract class!")
def add(self, movie):
raise NotImplementedError("You're calling an abstract class!")
class PrintMovieRepository(MovieRepository):
def alreadyNotified(self):
print "(asked for alreadyNotified)"
return []
def add(self, movie):
print "Add to repo:\t[" + movie.title + "]"
|
# Generated by Django 3.2 on 2021-07-12 16:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('film', '0013_auto_20210712_1908'),
]
operations = [
migrations.AddField(
model_name='film',
name='actor',
field=models.ManyToManyField(through='film.acotors_film', to='film.actors'),
),
]
|
import re
import sys
from ete3 import Tree
def read_tree(tree):
lines = open(tree).readlines()
for line in lines:
if (not line.startswith(">")):
return Tree(line, format=1)
return None
def get_title(name):
split = name.split("_")
for i in range(0, len(split)):
split[i] = split[i].title()
return "UUU".join(split)
def title_node_names(input_tree, output_tree):
tree = read_tree(input_tree)
for leaf in tree.get_leaves():
leaf.name = get_title(leaf.name)
tree.write(outfile = output_tree)
if (__name__ == "__main__"):
if (len(sys.argv) != 3):
print("Syntax: python " + os.path.basename(__file__) + " input_tree output_tree")
exit(1)
title_node_names(sys.argv[1], sys.argv[2])
|
number = int(input())
last = []
def geacha(n):
if n == 1:
return 1
else:
return n + last[n-2]
i = 1
while True:
last.append(geacha(i))
if number <= last[-1]:
break
i += 1
if number > 1:
start = last[i-2]+1
if i % 2 == 0:
boonja = 1 + number - start
boonmo = i - number + start
else:
boonja = i - number + start
boonmo = 1 + number - start
else:
boonja = 1
boonmo = 1
print(str(boonja)+'/'+str(boonmo),end='')
|
import urllib2
import json
import numpy as np
def get_forecast():
f = urllib2.urlopen('http://api.wunderground.com/api/40c1e03239029f36/forecast/q/RI/Providence.json')
json_string = f.read()
parsed_json = json.loads(json_string)
windspeeds = []
for i in parsed_json['forecast']['simpleforecast']['forecastday']:
wind_mph = i['avewind']['mph']
print wind_mph
snow_allday = i['snow_allday']['in']
print snow_allday
high_temp = i['high']['fahrenheit']
print high_temp
low_temp = i['low']['fahrenheit']
print low_temp
windspeeds.append(float(wind_mph))
f.close()
return np.array(windspeeds)
|
# -*- coding: utf-8 -*-
import datetime
from django.test import TestCase
from django.contrib.auth.models import User
from todo.models import Chain, Task
from . import factories
class TaskTest(TestCase):
def setUp(self):
factories.make_fixtures()
# Сотрудники.
self.manager = User.objects.get(username='alexander')
self.designer = User.objects.get(username='kazimir')
self.programmer = User.objects.get(username='ada')
class ActualStatusTest(TaskTest):
"""Тестирует определение статуса задачи."""
def test_first_task_wait(self):
"""Тестирует статус WAIT у первой задачи.
Задача стоит первой в цепочке и дата начала работы над цепочкой еще
не наступила. Цепочка начнет работать через 1 день, на дизайн выделено
3 дня.
"""
today = datetime.date.today()
chain_start_date = today + datetime.timedelta(days=1)
chain = Chain.objects.create(name='Chain', start_date=chain_start_date,
owner=self.manager)
deadline = chain_start_date + datetime.timedelta(days=3)
first_task = Task.objects.create(worker=self.designer, task='Design',
deadline=deadline, chain=chain)
self.assertEqual(first_task.actual_status(), Task.WAIT_STATUS)
def test_prev_task_not_done(self):
"""Тестирует статус WAIT, проверяя статус предыдущей задачи.
Статус предыдущей задачи не должен быть DONE. Цепочка начала работать 1
день назад, на дизайн выделено 3 дня, задача выполняется второй день.
Программист ожидает результы работы дизайнера через 1 день
(послезавтра).
"""
today = datetime.date.today()
chain_start_date = today - datetime.timedelta(days=1)
chain = Chain.objects.create(name='Chain', start_date=chain_start_date,
owner=self.manager)
deadline_first_task = chain_start_date + datetime.timedelta(days=3)
Task.objects.create(worker=self.designer, task='Design', chain=chain,
deadline=deadline_first_task)
deadline_second_task = deadline_first_task + datetime.timedelta(days=2)
second_task = Task.objects.create(worker=self.programmer,
task='Programming',
deadline=deadline_second_task,
chain=chain)
self.assertEqual(second_task.actual_status(), Task.WAIT_STATUS)
def test_first_task_work(self):
"""Тестирует статус WORK у первой задачи.
Задача стоит первой в цепочке и наступила дата начала работы над
цепочкой. Цепочка начала работать 1 день назад, на дизайн выделено
3 дня.
"""
today = datetime.date.today()
chain_start_date = today - datetime.timedelta(days=1)
chain = Chain.objects.create(name='Chain', start_date=chain_start_date,
owner=self.manager)
deadline = chain_start_date + datetime.timedelta(days=3)
first_task = Task.objects.create(worker=self.designer, task='Design',
deadline=deadline, chain=chain)
self.assertEqual(first_task.actual_status(), Task.WORK_STATUS)
def test_prev_task_done(self):
"""Тестирует статус WORK, проверяя статус предыдущей задачи.
Статус предыдущей задачи должен быть DONE. Цепочка начала работать 2
дня назад, на дизайн выделено 3 дня, задача была выполнена за 2 дня.
Программист досрочно (на 1 день раньше) получил результаты работы
дизайнера.
"""
today = datetime.date.today()
chain_start_date = today - datetime.timedelta(days=2)
chain = Chain.objects.create(name='Chain', start_date=chain_start_date,
owner=self.manager)
deadline_first_task = chain_start_date + datetime.timedelta(days=3)
Task.objects.create(worker=self.designer, task='Design', chain=chain,
deadline=deadline_first_task, finish_date=today,
status=Task.DONE_STATUS)
deadline_second_task = deadline_first_task + datetime.timedelta(days=2)
second_task = Task.objects.create(worker=self.programmer,
task='Programming',
deadline=deadline_second_task,
chain=chain)
self.assertEqual(second_task.actual_status(), Task.WORK_STATUS)
class StartDateTest(TaskTest):
"""Тестирует определение даты начала работы над задачей."""
def test_first_task(self):
"""Тестирует дату начала работы первой задачи.
Дата начала первой задачи совпадает с датой начала цепочки. Это условие
верно для задач с любым статусом.
"""
today = datetime.date.today()
chain_start_date = today + datetime.timedelta(days=1)
chain = Chain.objects.create(name='Chain', start_date=chain_start_date,
owner=self.manager)
deadline = chain_start_date + datetime.timedelta(days=3)
first_task = Task.objects.create(worker=self.designer, task='Design',
deadline=deadline, chain=chain)
self.assertEqual(first_task.start_date(), chain.start_date)
def test_wait(self):
"""Тестирует дату начала работы задачи со статусом WAIT.
Дата начала совпадает с датой дедлайна предыдущей задачи (дедлайн
не просрочен). Предыдущая задача может иметь статус WAIT, WORK, STOP.
"""
today = datetime.date.today()
chain_start_date = today - datetime.timedelta(days=1)
chain = Chain.objects.create(name='Chain', start_date=chain_start_date,
owner=self.manager)
deadline_first_task = chain_start_date + datetime.timedelta(days=3)
Task.objects.create(worker=self.designer, task='Design', chain=chain,
deadline=deadline_first_task)
deadline_second_task = deadline_first_task + datetime.timedelta(days=2)
Task.objects.create(worker=self.programmer, task='Programming',
deadline=deadline_second_task, chain=chain)
first_task = Task.objects.get(task='Design')
second_task = Task.objects.get(task='Programming')
self.assertEqual(second_task.start_date(), first_task.deadline)
def test_work(self):
"""Тестирует дату начала работы задачи со статусом WORK.
Дата начала задачи наступает на следующий день после окончания
предыдущей задачи (DONE). Это условие верно и для задач со статусом
DONE или STOP.
"""
today = datetime.date.today()
chain_start_date = today - datetime.timedelta(days=2)
chain = Chain.objects.create(name='Chain', start_date=chain_start_date,
owner=self.manager)
deadline_first_task = chain_start_date + datetime.timedelta(days=3)
Task.objects.create(worker=self.designer, task='Design', chain=chain,
deadline=deadline_first_task, finish_date=today,
status=Task.DONE_STATUS)
deadline_second_task = deadline_first_task + datetime.timedelta(days=2)
Task.objects.create(worker=self.programmer, task='Programming',
deadline=deadline_second_task, chain=chain)
design_finish = Task.objects.get(task='Design').finish_date
prog_start = Task.objects.get(task='Programming').start_date()
self.assertEqual(prog_start, design_finish + datetime.timedelta(1))
def test_unpredictable(self):
"""Тестирует непрогнозируемую дату начала работы задачи.
Статус задачи WAIT, предыдущая задача превысила дедлайн. Предыдущая
задача может иметь статус WAIT, WORK, STOP.
"""
today = datetime.date.today()
chain_start_date = today - datetime.timedelta(days=10)
chain = Chain.objects.create(name='Chain', start_date=chain_start_date,
owner=self.manager)
deadline_first_task = chain_start_date + datetime.timedelta(days=3)
Task.objects.create(worker=self.designer, task='Design', chain=chain,
deadline=deadline_first_task)
deadline_second_task = deadline_first_task + datetime.timedelta(days=2)
Task.objects.create(worker=self.programmer, task='Programming',
deadline=deadline_second_task, chain=chain)
second_task = Task.objects.get(task='Programming')
self.assertEqual(second_task.start_date(), None)
class DeadlineDaysTest(TaskTest):
"""Тестирует определение количества дней до дедлайна и после него."""
def test_before_deadline(self):
"""Тестирует случай, когда дедлайн еще не наступил.
Задача работает второй день, на выполнение отведено 3 дня.
"""
today = datetime.date.today()
chain_start_date = today - datetime.timedelta(days=1)
chain = Chain.objects.create(name='Chain', start_date=chain_start_date,
owner=self.manager)
deadline = chain_start_date + datetime.timedelta(days=3)
first_task = Task.objects.create(worker=self.designer, task='Design',
deadline=deadline, chain=chain)
self.assertEqual(first_task.remaining_days(), 1)
self.assertEqual(first_task.days_quantity_after_deadline(), None)
def test_after_deadline(self):
"""Тестирует случай, когда дедлайн просрочен.
Задача работает 5 день, на выполнение отведено 3 дня.
"""
today = datetime.date.today()
chain_start_date = today - datetime.timedelta(days=4)
chain = Chain.objects.create(name='Chain', start_date=chain_start_date,
owner=self.manager)
deadline = chain_start_date + datetime.timedelta(days=3)
first_task = Task.objects.create(worker=self.designer, task='Design',
deadline=deadline, chain=chain)
self.assertEqual(first_task.days_quantity_after_deadline(), 2)
self.assertEqual(first_task.remaining_days(), None)
def task_wait_overdue(self):
"""Просрочен дедлайн у ожидающей задачи из-за предыдущей задачи.
Предыдущая задача превысила свой дедлайн и дедлайн текущей задачи.
"""
def task_work_overdue(self):
"""Работающая задача превысила дедлайн."""
def task_done_overdue(self):
"""Задача выполнена с превышением дедлайна."""
def task_stop_overdue(self):
"""Просрочен дедлайн у остановленной задачи.
Владелец цепочки не решил проблему остановки задачи.
"""
class ExpendedDaysTest(TaskTest):
"""Тестирует определение количества дней, затраченных на задачу."""
def test_wait(self):
"""Тестирует случай, когда задача ожидает начала работы."""
today = datetime.date.today()
chain_start_date = today + datetime.timedelta(days=1)
chain = Chain.objects.create(name='Chain', start_date=chain_start_date,
owner=self.manager)
deadline = chain_start_date + datetime.timedelta(days=3)
first_task = Task.objects.create(worker=self.designer, task='Design',
deadline=deadline, chain=chain)
self.assertEqual(first_task.expended_days(), 0)
def test_work(self):
"""Тестирует случай, когда задача работает."""
today = datetime.date.today()
chain_start_date = today - datetime.timedelta(days=4)
chain = Chain.objects.create(name='Chain', start_date=chain_start_date,
owner=self.manager)
deadline = chain_start_date + datetime.timedelta(days=3)
first_task = Task.objects.create(worker=self.designer, task='Design',
deadline=deadline, chain=chain,
status=Task.DONE_STATUS,
finish_date=today)
self.assertEqual(first_task.expended_days(), 5)
def test_done(self):
"""Тестирует случай, когда задача завершена."""
today = datetime.date.today()
chain_start_date = today - datetime.timedelta(days=1)
chain = Chain.objects.create(name='Chain', start_date=chain_start_date,
owner=self.manager)
deadline = chain_start_date + datetime.timedelta(days=3)
first_task = Task.objects.create(worker=self.designer, task='Design',
deadline=deadline, chain=chain,
status=Task.DONE_STATUS,
finish_date=today)
self.assertEqual(first_task.expended_days(), 2)
def test_stop(self):
"""Тестирует случай, когда задача остановлена."""
today = datetime.date.today()
chain_start_date = today - datetime.timedelta(days=1)
chain = Chain.objects.create(name='Chain', start_date=chain_start_date,
owner=self.manager)
deadline = chain_start_date + datetime.timedelta(days=3)
first_task = Task.objects.create(worker=self.designer, task='Design',
deadline=deadline, chain=chain,
status=Task.STOP_STATUS)
self.assertEqual(first_task.expended_days(), None)
class DaysToStartTest(TaskTest):
"""Тестирует определение количества дней, оставшихся до начала задачи."""
def test_start_date_greater_than_today_to_one_day(self):
"""Дата начала больше текущей даты на один день."""
today = datetime.date.today()
chain = factories.ChainFactory(
start_date=today + datetime.timedelta(days=1)
)
task = factories.TaskFactory(
deadline=chain.start_date + datetime.timedelta(days=5),
chain=chain
)
self.assertEqual(task.days_to_start(), 0)
def test_today_equal_start_date(self):
"""Текущая дата совпадает с датой начала работы над задачей."""
today = datetime.date.today()
chain = factories.ChainFactory(start_date=today)
task = factories.TaskFactory(
deadline=chain.start_date + datetime.timedelta(days=5),
chain=chain
)
self.assertIsNone(task.days_to_start())
def test_prev_task_overdue(self):
"""Предыдущая задача превысила дедлайн."""
today = datetime.date.today()
chain = factories.ChainFactory(
start_date=today - datetime.timedelta(days=7)
)
design = factories.TaskFactory(
deadline=chain.start_date + datetime.timedelta(days=5),
chain=chain
)
layout = factories.TaskFactory(
deadline=design.deadline + datetime.timedelta(days=5),
chain=chain
)
self.assertIsNone(layout.days_to_start())
def test_task_not_wait(self):
"""Задача не ожидает начала работы, а имеет статус WORK/DONE/STOP."""
today = datetime.date.today()
chain = factories.ChainFactory(
start_date=today - datetime.timedelta(days=3)
)
task = factories.TaskFactory(
deadline=chain.start_date + datetime.timedelta(days=5),
chain=chain
)
# WORK.
self.assertIsNone(task.days_to_start())
# STOP.
task.status = task.STOP_STATUS
self.assertIsNone(task.days_to_start())
# DONE.
task.status = task.DONE_STATUS
task.finish_date = today
self.assertIsNone(task.days_to_start())
class DurationTest(TestCase):
"""Тестирует определение количества дней, выделенных на выполнение задачи.
"""
def setUp(self):
"""Создает две задачи. Первой выделено 3 дня, второй 2 дня.
Например, первая задача ограничена сроком [2; 5), вторая -- [5; 7)
"""
today = datetime.date.today()
chain = factories.ChainFactory(start_date=today)
self.first_task = factories.TaskFactory(
deadline=chain.start_date + datetime.timedelta(days=3),
chain=chain
)
self.second_task = factories.TaskFactory(
deadline=self.first_task.deadline + datetime.timedelta(days=2),
chain=chain
)
def test_first_task_in_chain(self):
"""Задача стоит первой в цепочке."""
self.assertEqual(self.first_task.duration(), 3)
def test_second_task_in_chain(self):
"""Задача стоит второй в цепочке."""
self.assertEqual(self.second_task.duration(), 2)
class ChainActualStatusTest(TestCase):
"""Тестирует определение фактического статуса цепочки задач."""
def setUp(self):
factories.make_fixtures()
def test_chain_wait(self):
"""Цепочка ожидает начала работы."""
chain = Chain.objects.get(name='Chain waits')
self.assertEqual(chain.actual_status(), Chain.WAIT_STATUS)
def test_chain_work(self):
"""Цепочка работает."""
chain = Chain.objects.get(name='Chain works')
self.assertEqual(chain.actual_status(), Chain.WORK_STATUS)
def test_chain_stop(self):
"""Цепочка остановлена."""
chain = Chain.objects.get(name='Chain was stopped')
self.assertEqual(chain.actual_status(), Chain.STOP_STATUS)
def test_chain_done(self):
"""Цепочка завершена."""
chain = Chain.objects.get(name='Chain was completed in time')
self.assertEqual(chain.actual_status(), Chain.DONE_STATUS)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import textwrap
import pytest
from pants.backend.python import target_types_rules
from pants.backend.python.goals import tailor
from pants.backend.python.goals.tailor import (
PutativePythonTargetsRequest,
classify_source_files,
is_entry_point,
)
from pants.backend.python.macros.pipenv_requirements import PipenvRequirementsTargetGenerator
from pants.backend.python.macros.poetry_requirements import PoetryRequirementsTargetGenerator
from pants.backend.python.macros.python_requirements import PythonRequirementsTargetGenerator
from pants.backend.python.target_types import (
PexBinary,
PythonSourcesGeneratorTarget,
PythonTestsGeneratorTarget,
PythonTestUtilsGeneratorTarget,
)
from pants.core.goals.tailor import AllOwnedSources, PutativeTarget, PutativeTargets
from pants.core.target_types import ResourceTarget
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
def test_classify_source_files() -> None:
test_files = {
"foo/bar/baz_test.py",
"foo/test_bar.py",
"foo/tests.py",
}
source_files = {
"foo/bar/baz.py",
"foo/bar_baz.py",
"foo.pyi",
}
test_util_files = {
"conftest.py",
"foo/bar/baz_test.pyi",
"foo/test_bar.pyi",
"tests.pyi",
}
assert {
PythonTestsGeneratorTarget: test_files,
PythonSourcesGeneratorTarget: source_files,
PythonTestUtilsGeneratorTarget: test_util_files,
} == classify_source_files(test_files | source_files | test_util_files)
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*tailor.rules(),
*target_types_rules.rules(),
QueryRule(PutativeTargets, (PutativePythonTargetsRequest, AllOwnedSources)),
],
target_types=[PexBinary],
)
def test_find_putative_targets(rule_runner: RuleRunner) -> None:
rule_runner.set_options(["--no-python-tailor-ignore-empty-init-files"])
rule_runner.write_files(
{
"3rdparty/Pipfile.lock": "{}",
"3rdparty/pyproject.toml": "[tool.poetry]",
"3rdparty/requirements-test.txt": "",
"pep621/pyproject.toml": textwrap.dedent(
"""\
[project]
dependencies = [
"ansicolors>=1.18.0",
]
"""
),
"pep621/requirements.txt": "", # requirements in same dir as pep621 pyproject.toml causes conflict for name
"already_owned/requirements.txt": "",
"already_owned/Pipfile.lock": "",
"already_owned/pyproject.toml": "[tool.poetry]",
"no_match/pyproject.toml": "# no poetry section",
**{
f"src/python/foo/{fp}": ""
for fp in (
"__init__.py",
"bar/__init__.py",
"bar/baz1.py",
"bar/baz1_test.py",
"bar/baz2.py",
"bar/baz2_test.py",
"bar/baz3.py",
"bar/conftest.py",
)
},
}
)
pts = rule_runner.request(
PutativeTargets,
[
PutativePythonTargetsRequest(
(
"3rdparty",
"already_owned",
"no_match",
"src/python/foo",
"src/python/foo/bar",
"pep621",
)
),
AllOwnedSources(
[
"already_owned/requirements.txt",
"already_owned/Pipfile.lock",
"already_owned/pyproject.toml",
"src/python/foo/bar/__init__.py",
"src/python/foo/bar/baz1.py",
]
),
],
)
assert (
PutativeTargets(
[
PutativeTarget.for_target_type(
PipenvRequirementsTargetGenerator,
path="3rdparty",
name="pipenv",
triggering_sources=["3rdparty/Pipfile.lock"],
),
PutativeTarget.for_target_type(
PoetryRequirementsTargetGenerator,
path="3rdparty",
name="poetry",
triggering_sources=["3rdparty/pyproject.toml"],
),
PutativeTarget.for_target_type(
PythonRequirementsTargetGenerator,
path="3rdparty",
name="reqs",
triggering_sources=["3rdparty/requirements-test.txt"],
kwargs={"source": "requirements-test.txt"},
),
PutativeTarget.for_target_type(
PythonRequirementsTargetGenerator,
path="pep621",
name="reqs",
triggering_sources=["pep621/pyproject.toml"],
kwargs={"source": "pyproject.toml"},
),
PutativeTarget.for_target_type(
PythonRequirementsTargetGenerator,
path="pep621",
name="reqs",
triggering_sources=["pep621/requirements.txt"],
),
PutativeTarget.for_target_type(
PythonSourcesGeneratorTarget, "src/python/foo", None, ["__init__.py"]
),
PutativeTarget.for_target_type(
PythonSourcesGeneratorTarget,
"src/python/foo/bar",
None,
["baz2.py", "baz3.py"],
),
PutativeTarget.for_target_type(
PythonTestsGeneratorTarget,
"src/python/foo/bar",
"tests",
["baz1_test.py", "baz2_test.py"],
),
PutativeTarget.for_target_type(
PythonTestUtilsGeneratorTarget,
"src/python/foo/bar",
"test_utils",
["conftest.py"],
),
]
)
== pts
)
def test_skip_invalid_requirements(rule_runner: RuleRunner) -> None:
rule_runner.set_options(["--no-python-tailor-ignore-empty-init-files"])
rule_runner.write_files(
{
"3rdparty/requirements-valid.txt": b"FooProject >= 1.2",
"3rdparty/requirements-invalid.txt": b"FooProject LOLOLOLOL 1.2",
"pipfile-valid/Pipfile.lock": b"{}",
"pipfile-invalid/Pipfile.lock": b"FNARB",
"poetry-valid/pyproject.toml": b"[tool.poetry]",
"poetry-invalid/pyproject.toml": b"FNARB",
}
)
pts = rule_runner.request(
PutativeTargets,
[
PutativePythonTargetsRequest(
(
"3rdparty",
"pipfile-valid",
"pipfile-invalid",
"poetry-valid",
"poetry-invalid",
)
),
AllOwnedSources([]),
],
)
assert (
PutativeTargets(
[
PutativeTarget.for_target_type(
PythonRequirementsTargetGenerator,
path="3rdparty",
name="reqs",
triggering_sources=["3rdparty/requirements-valid.txt"],
kwargs={"source": "requirements-valid.txt"},
),
PutativeTarget.for_target_type(
PipenvRequirementsTargetGenerator,
path="pipfile-valid",
name="pipenv",
triggering_sources=["pipfile-valid/Pipfile.lock"],
),
PutativeTarget.for_target_type(
PoetryRequirementsTargetGenerator,
path="poetry-valid",
name="poetry",
triggering_sources=["poetry-valid/pyproject.toml"],
),
]
)
== pts
)
def test_find_putative_targets_subset(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
f"src/python/foo/{fp}": ""
for fp in (
"__init__.py",
"bar/__init__.py",
"bar/bar.py",
"bar/bar_test.py",
"baz/baz.py",
"baz/baz_test.py",
"qux/qux.py",
)
}
)
pts = rule_runner.request(
PutativeTargets,
[
PutativePythonTargetsRequest(("src/python/foo/bar", "src/python/foo/qux")),
AllOwnedSources(["src/python/foo/bar/__init__.py", "src/python/foo/bar/bar.py"]),
],
)
assert (
PutativeTargets(
[
PutativeTarget.for_target_type(
PythonTestsGeneratorTarget,
"src/python/foo/bar",
"tests",
["bar_test.py"],
),
PutativeTarget.for_target_type(
PythonSourcesGeneratorTarget, "src/python/foo/qux", None, ["qux.py"]
),
]
)
== pts
)
def test_find_putative_targets_for_entry_points(rule_runner: RuleRunner) -> None:
mains = ("main1.py", "main2.py", "main3.py")
rule_runner.write_files(
{
f"src/python/foo/{name}": textwrap.dedent(
"""
if __name__ == "__main__":
main()
"""
)
for name in mains
}
)
rule_runner.write_files(
{
"src/python/foo/BUILD": textwrap.dedent(
"""\
pex_binary(name='main1', entry_point='main1.py')
pex_binary(name='main2', entry_point='foo.main2')
"""
),
"src/python/foo/__main__.py": "",
}
)
rule_runner.set_options(["--python-tailor-pex-binary-targets"])
pts = rule_runner.request(
PutativeTargets,
[
PutativePythonTargetsRequest(("src/python/foo",)),
AllOwnedSources(
[f"src/python/foo/{name}" for name in mains] + ["src/python/foo/__main__.py"]
),
],
)
assert (
PutativeTargets(
[
PutativeTarget.for_target_type(
PexBinary,
"src/python/foo",
"main3",
[],
kwargs={"entry_point": "main3.py"},
),
PutativeTarget.for_target_type(
PexBinary,
"src/python/foo",
"__main__",
[],
kwargs={"entry_point": "__main__.py"},
),
]
)
== pts
)
def test_find_putative_targets_for_py_typed_marker_files(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"src/python/foo/py.typed": ""})
rule_runner.set_options(["--python-tailor-py-typed-targets"])
pts = rule_runner.request(
PutativeTargets,
[
PutativePythonTargetsRequest(("src/python/foo",)),
AllOwnedSources([]),
],
)
assert (
PutativeTargets(
[
PutativeTarget.for_target_type(
ResourceTarget,
path="src/python/foo",
name="py_typed",
triggering_sources=("py.typed",),
kwargs={"source": "py.typed"},
),
]
)
== pts
)
@pytest.mark.parametrize("ignore", [True, False])
def test_ignore_empty_init(rule_runner: RuleRunner, ignore: bool) -> None:
rule_runner.write_files(
{
"project/__init__.py": "",
"project/d1/__init__.py": "# content",
"project/d2/__init__.py": "",
"project/d2/f.py": "",
}
)
rule_runner.set_options([f"--python-tailor-ignore-empty-init-files={ignore}"])
pts = rule_runner.request(
PutativeTargets,
[
PutativePythonTargetsRequest(
("project", "project/d1", "project/d2"),
),
AllOwnedSources([]),
],
)
result = {
PutativeTarget.for_target_type(
PythonSourcesGeneratorTarget,
"project/d1",
None,
["__init__.py"],
),
PutativeTarget.for_target_type(
PythonSourcesGeneratorTarget,
"project/d2",
None,
["__init__.py", "f.py"],
),
}
if not ignore:
result.add(
PutativeTarget.for_target_type(
PythonSourcesGeneratorTarget,
"project",
None,
["__init__.py"],
)
)
assert result == set(pts)
def test_is_entry_point_true() -> None:
assert is_entry_point(
textwrap.dedent(
"""
# Note single quotes.
if __name__ == '__main__':
main()
"""
).encode()
)
assert is_entry_point(
textwrap.dedent(
"""
# Note double quotes.
if __name__ == "__main__":
main()
"""
).encode()
)
assert is_entry_point(
textwrap.dedent(
"""
# Note weird extra spaces.
if __name__ == "__main__":
main()
"""
).encode()
)
assert is_entry_point(
textwrap.dedent(
"""
# Note trailing comment.
if __name__ == "__main__": # Trailing comment.
main()
"""
).encode()
)
assert is_entry_point(
textwrap.dedent(
"""
# Note trailing comment.
if __name__ == "__main__":# Trailing comment.
main()
"""
).encode()
)
assert is_entry_point(
textwrap.dedent(
"""
# Note trailing comment.
if __name__ == "__main__": # Trailing comment.
main()
"""
).encode()
)
def test_is_entry_point_false() -> None:
assert not is_entry_point(
textwrap.dedent(
"""
# Note commented out.
# if __name__ == "__main__":
# main()
"""
).encode()
)
assert not is_entry_point(
textwrap.dedent(
"""
# Note weird indent.
if __name__ == "__main__":
main()
"""
).encode()
)
assert not is_entry_point(
textwrap.dedent(
"""
# Note some nonsense, as a soundness check.
print(__name__)
"""
).encode()
)
|
import unittest
import sys
import os
sys.path.append(os.path.join('..', 'Src'))
from SentimentalExtraction import SentimentExtraction
class SentimentExtractionTestCase(unittest.TestCase):
def testGeneralSentimentAccuracy(self):
sentimentClass = SentimentExtraction()
sentence = 'The prom was pretty good and worthwhile'
sentiment = sentimentClass.extractSentimentSentence(sentence)
self.assertTrue(sentiment[1]>0.65)
def testStronglyPositiveSentiment(self):
sentimentClass = SentimentExtraction()
sentence = 'The prom was set in a beautiful venue with a marvellous stage'
sentiment = sentimentClass.extractSentimentSentence(sentence)
self.assertEqual(sentiment[0], 'positive')
def testStronglyNegativeSentiment(self):
sentimentClass = SentimentExtraction()
sentence = 'The prom was set in a disastrous venue with a tacky stage'
sentiment = sentimentClass.extractSentimentSentence(sentence)
self.assertEqual(sentiment[0], 'negative')
def testMildlyPositiveSentiment(self):
sentimentClass = SentimentExtraction()
sentence = 'The prom was great though it was slighly long'
sentiment = sentimentClass.extractSentimentSentence(sentence)
self.assertEqual(sentiment[0], 'positive')
def testMildlyNegativeSentiment(self):
sentimentClass = SentimentExtraction()
sentence = 'The prom was terrible though finished quick'
sentiment = sentimentClass.extractSentimentSentence(sentence)
self.assertEqual(sentiment[0], 'negative')
if __name__ == '__main__':
unittest.main()
|
from work.models import Site, ShiftedQty, ProgressQty, SurveyQty, ShiftedQtyExtra, ProgressQtyExtra, SiteExtra, DprQty, Log, Resolution
from consumers.models import Consumer
import pandas as pd
from .functions import getHabID, formatString
from django.db.models import F, Func
def getCompletedHabs():
num_fields = ['site__hab_id', 'site__village', 'site__census', 'site__habitation', 'site__district',
'status', 'ht', 'pole_ht_8m', 'lt_3p', 'lt_1p', 'pole_lt_8m', 'dtr_100', 'dtr_63', 'dtr_25']
# dfP = pd.DataFrame(ProgressQty.objects.filter(status='completed')
dfP = pd.DataFrame(ProgressQty.objects.all()
.values(*num_fields))
dfP.set_index('site__hab_id', inplace=True)
dfP['rem'] = 'site'
# dfPX = pd.DataFrame(ProgressQtyExtra.objects.filter(status='completed')
dfPX = pd.DataFrame(ProgressQtyExtra.objects.all()
.values(*num_fields))
dfPX.set_index('site__hab_id', inplace=True)
dfPX['rem'] = 'extra'
#df = dfP.add(dfPX, fill_value=0, numeric_only=True)
df = pd.concat([dfP, dfPX])
df.to_excel('outputs/progress_sites.xlsx')
return df
def assignSite():
cs = Consumer.objects.all()
for rec in cs:
hab_id = getHabID(census=rec.census, habitation=rec.habitation)
if(Site.objects.filter(hab_id=hab_id).exists()):
site = Site.objects.get(hab_id=hab_id)
rec.site = site
print(hab_id)
rec.save()
# file = '102LConsumers.xlsx'
# of = pd.ExcelFile(file)
# ss = of.sheet_names[3:]
def importPortalConsumers(file):
of = pd.ExcelFile(file)
ss = of.sheet_names[3:]
dfs = [pd.read_excel(of, sheet_name=s) for s in ss]
[df.dropna(inplace=True) for df in dfs]
return dfs
def markPortal(df, label):
for i, row in df.iterrows():
print('Processing... {}'.format(row))
consumers = Consumer.objects.filter(census=row['Census Code'], consumer_no=row['Consumer No'], name=row['Name'])
row['found'] = False
for consumer in consumers:
print('Found')
# input()
row['found'] = True
consumer.isInPortal = True
if(len(consumers) > 1):
consumer.remark = 'dup'
consumer.save()
df.to_excel('Processed '+ label+'.xlsx')
return df
# dfs = importPortalConsumers(file)
# markPortal(dfs[0], ss[0])
# markPortal(dfs[1], ss[1])
# markPortal(dfs[1], ss[1])
# markPortal(dfs[1], ss[1])
# markPortal(dfs[1], ss[1])
count = 0
def deleteDuplicate():
for consumer in Consumer.objects.all():
print(consumer.name)
if(Consumer.objects.filter(
consumer_no = consumer.consumer_no.upper(),
name = consumer.name.upper(),
habitation = consumer.habitation.upper(),
census = consumer.census).count()>1):
row.delete()
count += 1
print(count)
count = 0
def makeUpperStrip():
global count
for consumer in Consumer.objects.all():
count += 1
print(count)
consumer.habitation = " ".join(consumer.habitation.split()).upper()
consumer.name = " ".join(consumer.name.split()).upper()
consumer.consumer_no = " ".join(consumer.consumer_no.split()).upper()
try:
consumer.save()
except:
consumer.delete()
def deleteDups():
qs = Consumer.objects.all()
key_set = set()
delete_ids_list = []
dup_c_no = []
for object in qs:
object_key1 = object.consumer_no
# object_key2 = object.name
# object_key3 = object.habitation
# object_key4 = object.census
# if((object_key1, object_key2, object_key3, object_key4) in key_set):
# if((object_key1, object_key2, object_key4) in key_set):
if(object_key1 in key_set):
# print(object_key2)
delete_ids_list.append(object.id)
dup_c_no.append(object.consumer_no)
else:
# key_set.add((object_key1, object_key2, object_key3, object_key4))
# key_set.add((object_key1, object_key2, object_key4))
key_set.add(object_key1)
Consumer.objects.filter(id__in=delete_ids_list).delete()
return delete_ids_list
def stripUpper(s):
return s.map(lambda x: " ".join(x.__str__().split()).upper())
def ___():
cols = ['Name', 'Consumer No']
for df in dfs:
df['Name'] = stripUpper(df['Name'])
df['Consumer No'] = stripUpper(df['Consumer No'])
# df1['Name'] = stripUpper(df1['Name'])
# consumer_nos = Consumer.objects.all().values_list('consumer_no', flat=True)
# dfNF = []
# for df in dfs:
# df1 = df[~df['Consumer No'].isin(consumer_nos)]
# dfNF.append(df1)
# sum = 0
# for df in dfNF:
# sum += len(df)
# dfa[~dfa['Consumer No'].isin(consumer_nos)]
# dfa[~dfa['Consumer No'].isin(q.values_list('consumer_no'))]
# dfa[~dfa['Consumer No'].isin(f)]
# dfa[~dfa['Consumer No'].isin(['460069'])]
# dfNot = dfNF[0]
# for df in dfNF[1:]:
# dfNot = dfNot.append(df)
def updateNotFoundConsumers():
consumerNotFound = 'missingPortalConsumers.xlsx'
dfNot = pd.read_excel(consumerNotFound)
consumer_nos = Consumer.objects.all().values_list('consumer_no', flat=True)
df1 = dfNot[~dfNot['Consumer No'].isin(consumer_nos)]
df1.to_excel(consumerNotFound)
print(len(df1))
def markPortalConsumer():
consumerNotFound = 'missingPortalConsumers.xlsx'
dfNot = pd.read_excel(consumerNotFound)
modi = False
for i, row in dfNot.iterrows():
print(i)
consumers = Consumer.objects.filter(census=row['Census Code'], name=row['Name'])
dropped = False
for consumer in consumers:
print('Found')
consumer.isInPortal = True
# if(len(consumers) > 1):
# consumer.remark = 'dup'
# consumer.remark = 'truncated'
consumer.save()
modi = True
if(not dropped):
dfNot = dfNot.drop(i)
dropped = True
if(modi):
print(len(dfNot))
dfNot.to_excel(consumerNotFound)
from consumers.models import Consumer
count = 0
for consumer in Consumer.objects.all():
count += 1
cno = consumer.consumer_no
consumer.consumer_no = str(consumer.consumer_no).replace(" ","").upper()
try:
consumer.save()
except:
print('Error {}'.format(cno))
input()
consumer.remark = str(consumer.remark) + "dup_cno"
consumer.consumer_no = cno
consumer.save()
print(count)
for row in Site.objects.all():
try:
row.habitation = formatString(row.habitation)
row.village = formatString(row.village)
row.district = formatString(row.district)
row.division = formatString(row.division)
row.save()
except:
print(row)
input()
for row in Site.objects.all():
try:
row.hab_id = formatString(row.hab_id)
row.save()
except Exception as ex:
print(row)
print(ex.__str__())
Site.objects.get(
hab_id='270949LONKHU',
village='LONKHU',
census=270949,
habitation='LONKHU',
district='CHANDEL',
division='CHANDEL')
def movToSite():
extras = SiteExtra.objects.exclude(site = None)
progress = ProgressQty.objects.filter(site__in = extras.values('site'))
progressxtra = ProgressQtyExtra.objects.filter(site__in = extras)
def copyProgress(obj,model):
return model(**{x:getattr(obj,x) for x in vars(obj) if not x[0]=='_'}).save()
def moveExtraToSite():
# whether in Survey or not can be done by query SurveyQty site
xsites = SiteExtra.objects.all()
#1: move xsites to sites
for sx in xsites:
if(not sx.site == None):
copyProgress(sx.progressqtyextra, ProgressQty)
copyProgress(sx.shiftedqtyextra, ShiftedQty)
from work.models import Site
from django.db.models import F,Q
dtrIssues = Site.objects.exclude(Q(progressqty__dtr_25=F('surveyqty__dtr_25')) & Q(progressqty__dtr_63=F('surveyqty__dtr_63'))).filter(district='CHANDEL', progressqty__status='completed')
import pandas as pd
df = pd.DataFrame(dtrIssues.values('village', 'census', 'habitation', 'progressqty__dtr_25', 'surveyqty__dtr_25', 'progressqty__dtr_63', 'surveyqty__dtr_63'))
|
#!/usr/bin/env python
import daemon, socket
import os, sys, time
from daemon import pidlockfile, DaemonContext
WORKDIR = '/tmp/python_daemon'
LOCKFILE = os.sep.join([WORKDIR, 'lockfile.pid'])
SOCKFILE = os.sep.join([WORKDIR, 'socket.file'])
class DelegateDaemon():
def __init__(self):
self.number = 0
self.__checkPaths__()
# self.__startDeamon__()
self.__connect__()
self.run()
def run(self):
while True:
self.__getData__()
def __startDeamon__(self):
self.stdout = open(os.sep.join([WORKDIR, 'stdout.log']), 'w')
self.stderr = open(os.sep.join([WORKDIR, 'stderr.log']), 'w')
self.daemon = DaemonContext(
working_directory=WORKDIR,
pidfile=pidlockfile.PIDLockFile(LOCKFILE),
stdout = self.stdout,
stderr = self.stderr,
)
self.daemon.open()
def __checkPaths__(self):
if not os.path.exists(WORKDIR):
os.makedirs(WORKDIR)
if os.path.exists(LOCKFILE):
sys.exit(1)
if os.path.exists(SOCKFILE):
os.remove(SOCKFILE)
def __connect__(self):
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.bind(SOCKFILE)
def __getData__(self):
self.socket.listen(1)
self.conn, self.addr = self.socket.accept()
data = self.conn.recv(4096)
print 'DATA1: %s' % data
data = self.conn.recv(4096)
print 'DATA2: %s' % data
if __name__ == '__main__':
DelegateDaemon()
|
__author__ = "Narwhale"
class Node(object):
"""节点"""
def __init__(self,elem):
self.elem = elem
self.lchild = None
self.rchild = None
class Tree(object):
"""二叉树"""
pass
|
# libraries
import numpy as np
import pandas as pd
from datetime import datetime
from typing import Union
def split_train_test(df: pd.DataFrame, train_split: int):
"""
Split a data frame into train and test sets
:param df: a data frame to split
:param train_split: split index
:return: train and test data frames
"""
if type(df) is np.ndarray:
df = pd.DataFrame(data=df, columns=['y'])
if df.shape[1] == 1:
df['ds'] = pd.date_range(end=datetime.today(), periods=len(df))
# order columns
df = df[["ds", "y"]]
# split into train and test sets
train, test = df.iloc[0:train_split, :], df.iloc[train_split:len(df), :]
return train, test
def univariate_data(dataset: pd.DataFrame, start_index: int, end_index: Union[int, None],
size_window: int, target_size: int = 0):
"""
Split a dataset into features and labels.
:param dataset: dataset to split.
:param start_index: start index.
:param end_index: end index.
:param size_window: size of the past window of information.
:param target_size: label that needs to be predicted.
:return: two np.arrays with features and labels datasets.
"""
data = []
labels = []
start_index = start_index + size_window
if end_index is None:
end_index = len(dataset) - target_size
for i in range(start_index, end_index):
indices = range(i - size_window, i)
# reshape data from (size_window,) to (size_window, 1)
data.append(np.reshape(dataset[indices], (size_window, 1)))
labels.append(dataset[i + target_size])
data = np.array(data)
labels = np.array(labels)
return data, labels
|
#!/usr/bin/env python3
import time
import random
import typing
def pos(data, size=4):
ret = []
for x in range(0, len(data), size):
print('-->' + str(x))
ret.append( int.from_bytes(data[x:x+size], 'big') )
return ret
def neg(data, size=4):
return b''.join([e.to_bytes(size, 'big') for e in data])
def _encrypt(v: typing.List[int], key: typing.List[int]):
counter, delta, mask = 0, 0xFACEB00C, 0xffffffff
for i in range(32):
counter = counter + delta & mask
v[0] += ((v[1] << 4) + key[0] ^ (v[1] + counter) & mask ^ (v[1] >> 5) + key[1] & mask) & mask
v[1] += ((v[0] << 4) + key[2] ^ (v[0] + counter) & mask ^ (v[0] >> 5) + key[3] & mask) & mask
return v
def encrypt(clear_text: bytes, key: bytes):
cipher_text = b''
for i in range(0, len(clear_text), 8):
cipher_text += neg(_encrypt(pos(clear_text[i:i+8]), pos(key)))
return cipher_text
if __name__ == '__main__':
flag = open('flag', 'rb').read()
assert len(flag) == 16
random.seed(int(time.time()))
key = random.getrandbits(128).to_bytes(16, 'big')
cipher_text = encrypt(flag, key)
print(f'密文 = {cipher_text.hex()}')
|
# Generated by Django 2.2 on 2021-08-12 01:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('the_wall_app', '0007_comment_creator'),
]
operations = [
migrations.AddField(
model_name='comment',
name='users_who_liked',
field=models.ManyToManyField(related_name='comments_users_liked', to='the_wall_app.User'),
),
migrations.AddField(
model_name='message',
name='users_who_liked',
field=models.ManyToManyField(related_name='messages_user_liked', to='the_wall_app.User'),
),
]
|
class Solution:
def __init__(self):
self.ceil = None
def getSuccessor(self, root, val):
if root == None:
return self.ceil
if root.val == val:
return self.getSuccessor(root.right,val)
if root.val < val:
return self.getSuccessor(root.right,val)
if root.val > val:
self.ceil = root
return self.getSuccessor(root.left,val)
|
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import subprocess
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# root dir
root = os.path.abspath(os.path.dirname(__file__))
# Package meta-data.
NAME = "EthTx"
DESCRIPTION = "EthTx transaction decoder."
URL = "https://github.com/EthTx/ethtx"
EMAIL = "karol@tfi.team, tomek@tfi.team, piotr.rudnik@tfi.team"
AUTHOR = "Karol Chojnowski, Tomasz Mierzwa, Piotr Rudnik"
REQUIRES_PYTHON = ">=3.7.0"
REQUIRED = []
REQUIRED_TEST = []
about = {
"__version__": subprocess.check_output(
["git", "describe", "--tags"], universal_newlines=True
).strip()
}
try:
with io.open(os.path.join(root, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
def load_requirements(fname):
"""Load requirements from file."""
with open(fname) as file:
return file.read().splitlines()
class UploadCommand(Command):
"""Support setup.py upload."""
description = "Build and publish the package."
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds…")
rmtree(os.path.join(root, "dist"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution…")
os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable))
self.status("Uploading the package to PyPI via Twine…")
os.system("twine upload dist/*")
self.status("Pushing git tags…")
os.system("git tag v{0}".format(about["__version__"]))
os.system("git push --tags")
sys.exit()
# *************** INSTALL *****************
setup(
name=NAME,
version=about["__version__"],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
license="Apache-2.0 License",
packages=find_packages(exclude=["tests"]),
install_requires=load_requirements("requirements.txt"),
include_package_data=True,
test_suite="tests",
classifiers=[
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
# $ setup.py publish support.
cmdclass={"upload": UploadCommand},
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-24 09:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webjuego', '0008_usuario_avatar'),
]
operations = [
migrations.AlterField(
model_name='usuario',
name='avatar',
field=models.ImageField(blank=True, null=True, upload_to='fotos'),
),
]
|
"""User related tests."""
from django.urls import reverse
from modoboa.core.models import User
from modoboa.lib.tests import ModoTestCase
from ..factories import populate_database
from ..models import Alias
class ForwardTestCase(ModoTestCase):
"""User forward test cases."""
def setUp(self):
super(ForwardTestCase, self).setUp()
populate_database()
def test_set_forward(self):
self.client.logout()
self.client.login(username="user@test.com", password="toto")
url = reverse("user_forward")
self.ajax_post(
url,
{"dest": "user@extdomain.com", "keepcopies": True}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
content = response.json()
self.assertIn("user@extdomain.com", content["content"])
self.assertNotIn("user@test.com", content["content"])
forward = Alias.objects.get(address="user@test.com", internal=False)
sadmin = User.objects.get(username="admin")
self.assertTrue(sadmin.can_access(forward))
domadmin = User.objects.get(username="admin@test.com")
self.assertTrue(domadmin.can_access(forward))
selfalias = Alias.objects.get(address="user@test.com", internal=True)
self.assertTrue(selfalias.enabled)
# Deactivate local copies
self.ajax_post(
url,
{"dest": "user@extdomain.com", "keepcopies": False}
)
selfalias.refresh_from_db()
self.assertFalse(selfalias.enabled)
# Now, empty forward
self.ajax_post(url, {"dest": "", "keepcopies": False})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
content = response.json()
self.assertNotIn("user@extdomain.com", content["content"])
selfalias.refresh_from_db()
self.assertTrue(selfalias.enabled)
|
#! /usr/bin/env python
# ======================= Gen Imports ========================
import sys
import json
from flask import Flask
from flask_cors import CORS
from flask_restful import Api
import os
# Stupid games so we can run our api in a nested folder. Some reason we iterate though twice and changing directories throws shit off. but we have to change directories otherwise Flask cant find us.
if 'carbon_black' not in os.getcwd():
with open('./data_operations/config.json') as f:
config = json.load(f)
else:
with open('../data_operations/config.json') as f:
config = json.load(f)
sys.path.insert(1, config['project_root'])
os.chdir(config['project_root'] + 'carbon_black')
# ====================== Custom Imports ======================
from carbon_black.endpoints import *
app = Flask(__name__)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
api = Api(app)
api.add_resource(Index, '/', '/<string:date>')
api.add_resource(IndexSpecific, '/api/<string:api_endpoint>',
'/api/<string:api_endpoint>/<string:date>')
api.add_resource(
Future_EOD, '/api/<string:api_endpoint>/future/<int:transaction_id>')
api.add_resource(
EOD, '/api/<string:api_endpoint>/price-eod/<int:transaction_id>/<string:include_anaylsis>')
api.add_resource(
Fundamental, '/api/<string:api_endpoint>/fundamental/<int:transaction_id>')
api.add_resource(News, '/api/<string:api_endpoint>/news/<int:transaction_id>')
api.add_resource(
Peers, '/api/<string:api_endpoint>/peers/<int:transaction_id>')
api.add_resource(SEC, '/api/<string:api_endpoint>/sec/<int:transaction_id>')
api.add_resource(
Weekly, '/api/<string:api_endpoint>/price-weekly/<int:transaction_id>')
api.add_resource(Sectors, '/api/sectors/<string:date>')
api.add_resource(Stats, '/api/stats')
# ============= Non Table Related (custom rolled) ===================
api.add_resource(
Ticker, '/api/<string:api_endpoint>/ticker/<int:transaction_id>')
api.add_resource(Settings, '/api/settings')
api.add_resource(Nostradamus_Settings, '/api/nostradamus-settings')
app.run(debug=True)
|
from rest_framework import serializers
from django_filters.rest_framework import DjangoFilterBackend
from .models import ChatMessages
class ChatMessageSerializer(serializers.ModelSerializer):
class Meta:
model = ChatMessages
fields = ('id', 'message', 'user', 'chat_room', 'created_at')
|
from random import randint
money = 1000
while money>0:
print('总资产为:%d' % money)
first = randint(1,6)+randint(1,6)
needs_go_on = False
debt = int(input('说吧,你想下多大的赌注!:'))
if debt<0 or debt>money:
print('这样是不行滴,不想跟你玩了')
debt = int(input('说吧,你想下多大的赌注!:'))
#这里还是有点问题,如何设置两次以上的赌注问题
print('第一次总和:%d'% first )
if first==7 or first == 11:
print('玩家胜利!')
money = money+debt
print('你的剩余资产为%d' % money)
print('是否想继续?继续请按1,否则请按0,其他选项默认继续')
if int(input('你的选择是:')) == 1:
needs_go_on = True
elif int(input('你的选择是:')) == 0:
print('祝你走好,下次好运')
exit()
else:
print('error')
#这里也有一点小问题,并不能实现其他选项默认继续
#而且你得选择要按两次
elif first == 2 or first== 3 or first ==12:
print('你的对手赢了!')
money = money-debt
print('你的剩余资产为%d' % money)
print('是否想继续?继续请按1,否则请按0')
if int(input('你的选择是:')) == 1:
#为什么判断正确只需要一次,判定否则的情况需要两次?
needs_go_on = True
elif int(input('你的选择是:')) == 0:
print('祝你走好,下次好运')
exit()
else:
print('error!')
else:
needs_go_on = True
print('不好意思,你得继续')
while needs_go_on:
needs_go_on=False
current = randint(1,6)+randint(1,6)
if current == 7:
print('你的对手赢了!')
money=money-debt
print('你的剩余资产为%d' % money)
print('是否想继续?继续请按1,否则请按0,其他选项默认继续')
if int(input('你的选择是:')) == 1:
needs_go_on = True
elif int(input('你的选择是:')) == 0:
print('祝你走好,下次好运')
exit()
else:
print('error!')
elif current == first:
print('玩家胜利!')
money=money+debt
print('你的剩余资产为%d' % money)
print('是否想继续?继续请按1,否则请按0,其他选项默认继续')
if int(input('你的选择是:')) == 1:
needs_go_on = True
elif int(input('你的选择是:')) == 0:
print('祝你走好,下次好运')
exit()
else:
print('error!')
else:
needs_go_on = True
print('你破产了')
exit()
|
# Generated by Django 1.9.5 on 2016-11-05 13:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('relaydomains', '0004_auto_20161105_1424'),
]
operations = [
migrations.RemoveField(
model_name='relaydomain',
name='dates',
),
]
|
import numpy as np
from utils import extract_column, results_to_csv, error_rate, plot_data
from spam_utils import load_spam
from decision_tree_starter import DecisionTree, RandomForest
#RandomForest(trees, sample_size, bag_size, type_map, categories_map, seed)
# fit(data, max_depth, min_samples)
#
#DecisionTree(type_map, categories_map)
# fit(data, max_depth, min_samples, bag_size = None)
#
#100, 500, 70 .. 20 -> depth 36: .031884 or 26: .03478
# classifier = RandomForest(100, 500, 70, type_map, categories_map, 20)
def plot_q2_5_3():
data, test_data, feature_names, class_names = load_spam()
type_map = dict((i, 'quantitative') for i in range(data.shape[1]))
categories_map = {}
perm = np.random.RandomState(seed=20).permutation((data.shape[0]))
data = data[perm]
data, valid = data[:4137], data[4137:]
train_accuracies = []
valid_accuracries =[]
depths = list(range(1,41))
for max_depth in range(1,41):
print("Computing max depth: ", max_depth)
idy = valid.shape[1] - 1
classifier = DecisionTree(type_map, categories_map, feature_names, class_names)
classifier.fit(data, max_depth, 10)
train_pred = classifier.predict(data)
valid_pred = classifier.predict(valid)
train_actual = extract_column(data, idy)
valid_actual = extract_column(valid, idy)
train_acc = 1 - error_rate(train_pred, train_actual)
valid_acc = 1 -error_rate(valid_pred, valid_actual)
train_accuracies.append(train_acc)
valid_accuracries.append(valid_acc)
plot_data(depths, train_accuracies, valid_accuracries, 'r', 'b', 'Training/Validation Accuracies')
return
def q_2_5_2():
data, test_data, feature_names, class_names = load_spam()
type_map = dict((i, 'quantitative') for i in range(data.shape[1]))
categories_map = {}
perm = np.random.RandomState(seed=20).permutation((data.shape[0]))
data = data[perm]
data, valid = data[:4137], data[4137:]
classifier = DecisionTree(type_map, categories_map, feature_names, class_names)
classifier.fit(data, 8, 15)
samp_point = np.array([valid[0]])
classifier.predict(samp_point, True)
samp_point = np.array([valid[1]])
classifier.predict(samp_point, True)
def kaggle():
"""
#run featurize.py with 5000 samples
data, test_data, feature_names, class_names = load_spam()
type_map = dict((i, 'quantitative') for i in range(data.shape[1]))
categories_map = {}
classifier = RandomForest(100, 500, 70, type_map, categories_map, 20)
classifier.fit(data, 36, 10)
predictions = classifier.predict(test_data)
pred_train = classifier.predict(data)
actual = extract_column(data, 9)
print(error_rate(pred_train, actual))
results_to_csv(predictions.flatten())
#TESTING DECISION TREE
data, test_data, feature_names, class_names = load_spam()
type_map = dict((i, 'quantitative') for i in range(data.shape[1]))
categories_map = {}
perm = np.random.RandomState(seed=20).permutation((data.shape[0]))
data = data[perm]
data, valid = data[:4137], data[4137:]
best_i = -1
best_error = 1
for i in range(2, 50):
classifier = DecisionTree(type_map, categories_map, feature_names, class_names)
classifier.fit(data, 36, 10)
predictions = classifier.predict(valid)
actual = extract_column(valid, valid.shape[1] - 1)
error = error_rate(predictions, actual)
print(i, error)
if error < best_error:
best_error = error
best_i = i
print(best_i, best_error)
# Best at depth 14 with error 0.11594202898550725
"""
"""
data, test_data, feature_names, class_names = load_spam()
type_map = dict((i, 'quantitative') for i in range(data.shape[1]))
categories_map = {}
perm = np.random.RandomState(seed=20).permutation((data.shape[0]))
data = data[perm]
data, valid = data[:4137], data[4137:]
best_i = -1
best_error = 1
best_j = -1
print("Bagging, depth, error")
for i in range(10, 50):
for j in range(30, 31):
classifier = RandomForest(300, 300, i, type_map, categories_map, 20)
classifier.fit(data, j, 10)
predictions = classifier.predict(valid)
actual = extract_column(valid, 9)
error = error_rate(predictions, actual)
print(i, j, error)
if error < best_error:
best_error = error
best_i = i
best_j = j
print(best_i, best_j, best_error)
"""
return
def q2_4():
print("******RUNNING SPAM DATA SET*****")
data, test_data, feature_names, class_names = load_spam()
type_map = dict((i, 'quantitative') for i in range(data.shape[1]))
categories_map = {}
perm = np.random.RandomState(seed=20).permutation((data.shape[0]))
data = data[perm]
data, valid = data[:4137], data[4137:]
idy = data.shape[1] - 1
classifier = DecisionTree(type_map, categories_map)
classifier.fit(data, 14 , 10)
train_predictions = classifier.predict(data)
train_actual = extract_column(data, idy)
valid_predictions = classifier.predict(valid)
valid_actual = extract_column(valid, idy)
print("Decision Tree training Accuracies: ", error_rate(train_predictions, train_actual))
print("Decision Tree Validation Accuracies: ", error_rate(valid_predictions, valid_actual))
classifier = RandomForest(300, 300, 2, type_map, categories_map, 20)
classifier.fit(data, 10, 10)
train_predictions = classifier.predict(data)
train_actual = extract_column(data, idy)
valid_predictions = classifier.predict(valid)
valid_actual = extract_column(valid, idy)
print("Random Forest training Accuracies: ", error_rate(train_predictions, train_actual))
print("Random Forest Validation Accuracies: ", error_rate(valid_predictions, valid_actual))
return
if __name__ == "__main__":
#plot_q2_5_3()
#q_2_5_2()
#kaggle()
q2_4()
|
import utils
utils.get_lib_addr()
|
"""
This module compares Ruler performance to that of the Python standard
re library. The idea is to match the same few lines of text and
compare how long it takes using re and ruler.
Since the measurements always have non-deterministic, but always
positive, measurement errors, we will make many short measurements
and compare the fastest ones encountered.
"""
import re
import timeit
import ruler as r
TIMEIT_ITERATIONS = 10000
ATTEMPTS_COUNT = 50
# These are the strings that will be matched
ann_likes_juice = 'Ann likes to drink juice'
peter_likes_tea = 'Peter likes to drink tea'
john_likes_tea_with_milk = 'John likes to drink tea with milk'
class ReTimer(object):
"""
Match and time the strings using the Python standard re library
"""
def __init__(self):
self.grammar = re.compile(r"""
(?P<who> John|Peter|Ann )
[ ]likes[ ]to[ ]drink
[ ](?P<what>
(?P<juice> juice )
|
(?P<tea> tea ([ ]with[ ](?P<milk> milk ))?)
)""", re.VERBOSE)
self.timer = timeit.Timer('self.match()', globals=locals())
def match(self):
g = self.grammar.match(ann_likes_juice).groupdict()
assert g['who'] == 'Ann'
assert g['what'] == 'juice'
assert g['juice'] is not None
assert g['tea'] is None
assert g['milk'] is None
g = self.grammar.match(peter_likes_tea).groupdict()
assert g['who'] == 'Peter'
assert g['what'] == 'tea'
assert g['juice'] is None
assert g['tea'] is not None
assert g['milk'] is None
g = self.grammar.match(john_likes_tea_with_milk).groupdict()
assert g['who'] == 'John'
assert g['what'] == 'tea with milk'
assert g['juice'] is None
assert g['tea'] is not None
assert g['milk'] is not None
def time(self):
return self.timer.timeit(TIMEIT_ITERATIONS)
class RulerTimer(object):
"""
Match and time the strings using Ruler library
"""
def __init__(self):
class MorningDrink(r.Grammar):
who = r.OneOf('John', 'Peter', 'Ann')
juice = r.Rule('juice')
milk = r.Rule('milk')
tea = r.Rule('tea', r.Optional(' with ', milk))
what = r.OneOf(juice, tea)
grammar = r.Rule(who, ' likes to drink ', what)
self.grammar = MorningDrink.create()
self.timer = timeit.Timer('self.match()', globals=locals())
def match(self):
g = self.grammar
assert g.match(ann_likes_juice)
assert g.who.matched == 'Ann'
assert g.what.matched == 'juice'
assert g.what.juice.matched
assert g.what.tea.matched is None
assert g.match(peter_likes_tea)
assert g.who.matched == 'Peter'
assert g.what.matched == 'tea'
assert g.what.juice.matched is None
assert g.what.tea.matched
assert g.match(john_likes_tea_with_milk)
assert g.who.matched == 'John'
assert g.what.matched == 'tea with milk'
assert g.what.juice.matched is None
assert g.what.tea
assert g.what.tea.milk
def time(self):
return self.timer.timeit(TIMEIT_ITERATIONS)
def main():
re_timer = ReTimer()
ruler_timer = RulerTimer()
re_measurements = []
ruler_measurements = []
for attempt in range(ATTEMPTS_COUNT):
print('Attempt {} out of {}...'.format(attempt+1, ATTEMPTS_COUNT))
re_measurements.append(re_timer.time())
ruler_measurements.append(ruler_timer.time())
print(' re: {:.3f} {}'.format(re_measurements[-1],
'New record!' if re_measurements[-1] == min(re_measurements) else ''))
print(' ruler: {:.3f} {}'.format(ruler_measurements[-1],
'New record!' if ruler_measurements[-1] == min(ruler_measurements) else ''))
print('Performance ratio: {}'.format(int(min(ruler_measurements) / min(re_measurements))))
if __name__ == '__main__':
main()
|
# A Program to determine employee eligability for advancement
# Created by: <your name here>
# Copyright CTHS Engineering, Inc., 2021
# This code or any portion fo this code can be be reused without
# previous approval from the company CIO or CEO, in writing.
empName = "Sam"
#Project1(P1) - New school wing
#TA - Task Accuracy
#EstBud - Estimated Budget
#ActBud - Actual Budget
#EstMP - Estimated Manpower
#ActMP - Actual Manpower
empP1TA = 92
empP1EstBud = 1285000
empP1ActBud = 1301346
empP1EstMP = 1625
empP1EstMP = 1650
#Project2 - Custom motorcycle company warehouse
empP2TA = 98
empP2EstBud = 650000
empP2ActBud = 624000
empP2EstMP = 525
empP2ActMP = 515
#Project3 - Minor Nascar training track
empP3TA = 96
empP3EstBud = 2500000
empP3ActBud = 3231325
empP3EstMP = 1050
empP3ActMP = 1250
#Project4 - Man cave warehouse and house
empP4TA = 92
empP4EstBud = 825000
empP4ActBud = 830000
empP4EstMP = 400
empP4ActMP = 375
#your code goes below
|
# -*- coding: utf-8 -*-
import requests, base64, time, os, shutil, glob, csv
from subprocess import call
import smtplib, configparser, ftplib
from datetime import datetime
from pytz import timezone
est = timezone('US/Eastern')
#Where am I running from?
dir_path = os.path.dirname(os.path.realpath(__file__))
# Read INI, set which numbers go to what groups
configobject = configparser.ConfigParser()
configobject.read(dir_path + '\\scriptconfig.ini')
#Just some end points
api = 'https://applications.filebound.com/v4/'
login_end = api + 'login?'
docs_end = api + 'documents/'
files_end = api + 'files/'
fbsite = 'fbsite='
url = fbsite + 'https://burriswebdocs.filebound.com'
#Lists which were used in testing or are currently being used
missingbolpo = []
notmissingbolpo = []
missingorder = []
notmissingorder = []
missingdc = []
notmissingdc = []
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + "Script executing in: " + dir_path)
currenttime = datetime.now(est).strftime("%Y%m%d-%H_%M")
#For this run, this is where the PDFs will live
global image_dir
image_dir = dir_path + '\\images\\' + currenttime + '\\'
spreadsheets = dir_path + '\\data'
if not os.path.exists(spreadsheets):
os.makedirs(spreadsheets)
spreadsheetpath = spreadsheets
def login():
u = configobject['WebDocs']['user']
p = configobject['WebDocs']['pass']
data = {
'username': u,
'password': p
}
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Logging into WebDocs as "{}"'.format(u))
login = login_end + url
r = requests.post(login, data)
if r.status_code == 200:
guid = r.json()
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Logged into WebDocs successfully')
return guid
else:
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Error when logging into WebDocs. Check your connection and try again.')
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Status code: ' + str(r.status_code))
def customquer2(bolpo):
url = 'https://applications.filebound.com/v3/query/projectId_2/F1_' + bolpo + '/divider_/binaryData?fbsite=https://burriswebdocs.filebound.com' + guid
r = requests.get(url)
if r.status_code != 200:
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Network connectivity error when querying {}'.format(bolpo))
if r.status_code == 200:
data = r.json()
if data[0]['files']['Collection']:
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Document with PO {} found on site'.format(bolpo))
notmissingbolpo.append(str(bolpo))
return(data)
else:
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Document with PO {} not found on site'.format(bolpo))
def bolprocess(data, currentorder, currentDC, currentpo):
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Successfully opened TSM lookup record')
for file in data[0]['files']['Collection']:
#fileleveldir = dir_path + '\\images\\' + '\\' + currenttime + '\\' + currentDC + '\\' + str(currentorder)
fileleveldir = dir_path + '\\images\\' + currenttime + '\\' + str(currentorder)
if not os.path.exists(fileleveldir):
os.makedirs(fileleveldir)
os.chdir(fileleveldir)
doccount = 0
for i in file['documents']['Collection']:
doccount += 1
docId = i['documentId']
extension = i['extension']
binaryData = i['binaryData']
convertedbinaryData = base64.b64decode(binaryData)
playfile = str(str(docId) + '.' + extension)
# print(fileleveldir)
with open(playfile, 'wb') as f:
f.write(convertedbinaryData)
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Downloaded page #' + str(doccount) + ' of PO {}'.format(currentpo))
#currentdirectory = (os.getcwd())
#filesinourdir = [f for f in listdir(currentdirectory) if isfile(join(currentdirectory, f))]
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Saved to: '+fileleveldir)
os.chdir("..")
try:
#call(["C:\\WINDOWS\\system32\\WindowsPowerShell\\v1.0\\powershell.exe", "-windowstyle", "hidden", "convert.exe", fileleveldir + "//*", fileleveldir + '.PDF'])
call(["C:\\WINDOWS\\system32\\WindowsPowerShell\\v1.0\\powershell.exe", "convert.exe", fileleveldir + "//*", fileleveldir + '.PDF'])
shutil.rmtree(fileleveldir)
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Converted PO {} to PDF succesfully'.format(currentpo))
except:
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Error when converting PO {} to PDF'.format(currentpo))
def newopen(path):
for file in glob.glob(spreadsheetpath + '\\*.xls'):
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'TMS lookup file {} found at {}'.format(file ,spreadsheetpath))
newname = file + '.processing'
newnewname = file
os.rename(file, newname)
with open(newname) as csvfile:
neat = csv.DictReader(csvfile, delimiter='\t')
for row in neat:
currentpo = row['PONumber']
currentorder = row['ord_hdrnumber']
currentDC = row['DC']
print('\n' + datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Performing query on PO {} with location code {}'.format(currentpo, currentDC))
data = customquer2(currentpo)
try:
bolprocess(data, currentorder, currentDC, currentpo)
except TypeError as notfound:
missingbolpo.append(str(currentpo))
missingorder.append(str(currentorder))
missingdc.append(str(currentDC))
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Added PO ' + str(currentpo) + ' to the missing log')
except TimeoutError as nointernet:
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Cannot reach the WebDocs server. Test and confirm your network connection.')
except ConnectionError as err:
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Cannot reach the WebDocs server. Test and confirm your network connection.')
except ConnectionAbortedError:
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Cannot reach the WebDocs server. Test and confirm your network connection.')
processed = spreadsheets + '\\completed\\' + currenttime + '\\'
if not os.path.exists(processed):
os.makedirs(processed)
os.rename(newname, newnewname)
shutil.move(newnewname, processed)
os.chdir(processed)
if len(missingbolpo) > 0:
missingfile = open(processed + '\\missing.txt', 'w')
for item in missingbolpo:
missingfile.write("%s\n" % item)
def checkNet():
try:
r = requests.get('http://www.google.com/')
ayy = r.raise_for_status()
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + str(ayy))
return True
except requests.exceptions.HTTPError as err:
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + err)
return False
except requests.exceptions.ConnectionError as err:
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + err)
return False
result = checkNet()
if result == True:
guid = '&guid=' + login()
newopen(spreadsheetpath)
if result == False:
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + '\nCannot reach the internet. Check your connection and try again.')
#Create some lists to store which group's docs are missing for emailing
sevenone = []
ohone = []
seventhree = []
fourthree = []
nineone = []
#Assemble the missing PO numbers to the location codes they should be emailed to
for i in range(len(missingbolpo)):
if missingdc[i] == '071':
sevenone.append(missingbolpo[i])
elif missingdc[i] == '001':
ohone.append(missingbolpo[i])
elif missingdc[i] == '073':
seventhree.append(missingbolpo[i])
elif missingdc[i] == '043':
fourthree.append(missingbolpo[i])
elif missingdc[i] == '091':
nineone.append(missingbolpo[i])
else:
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'If you see this, then your input spreadsheet has unrecognized values in the DC column.\n')
#Ready the ini's email configs
subject = configobject['Emails']['subject']
body = configobject['Emails']['body']
email_from = configobject['Emails']['from']
p = configobject['Emails']['pass']
#This block code looks at the length of the missing group arrays
#and if the length of an array is more than 0, it converts the array's content
#to string and then places it inside an email body to sen
print('----')
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Lookup and retrievals complete.')
print('----')
if len(missingbolpo) > 0:
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Emailing missing PO numbers to email groups')
try:
if len(sevenone) > 0:
email_to = configobject['Emails']['071']
email_sevenone = ", ".join(sevenone)
gm = email_session(email_from, email_to)
gm.send_message(subject, body.format(email_sevenone))
if len(ohone) > 0:
email_to = configobject['Emails']['001']
email_ohone = ", ".join(ohone)
gm = email_session(email_from, email_to)
gm.send_message(subject, body.format(email_ohone))
if len(seventhree) > 0:
email_to = configobject['Emails']['073']
email_seventhree = ", ".join(seventhree)
gm = email_session(email_from, email_to)
gm.send_message(subject, body.format(email_seventhree))
if len(fourthree) > 0:
email_to = configobject['Emails']['043']
email_fourthree = ", ".join(fourthree)
gm = email_session(email_from, email_to)
gm.send_message(subject, body.format(email_fourthree))
if len(nineone) > 0:
email_to = configobject['Emails']['091']
email_nineone = ", ".join(nineone)
gm = email_session(email_from, email_to)
gm.send_message(subject, body.format(email_nineone))
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Emails sent sucessfully')
emailsuccess = True
except:
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'WARNING!')
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") +'Unable to send emails. Check SMTP server, and email credentials.\n')
emailsuccess = False
else:
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'No emails to send at this time.')
### FTP beyond this point
server = configobject['FTP']['server']
username = configobject['FTP']['username']
password = configobject['FTP']['password']
purgefiles = configobject['housekeeping']['purge_images']
def uploadThis(path):
files = os.listdir(path)
os.chdir(path)
for f in files:
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Uploading {} to FTP Server at {} as {}'.format(f, server, username))
if os.path.isfile(path + r'\{}'.format(f)):
fh = open(f, 'rb')
myFTP.storbinary('STOR %s' % f, fh)
fh.close()
elif os.path.isdir(path + r'\{}'.format(f)):
myFTP.mkd(f)
myFTP.cwd(f)
uploadThis(path + r'\{}'.format(f))
myFTP.cwd('..')
os.chdir('..')
myFTP.quit()
if not os.path.exists(image_dir):
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'No TMS lookup file to process in: {}'.format(spreadsheetpath))
else:
downloaded_images = os.listdir(image_dir)
print('\n' + datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Uploading PDFs from {}'.format(image_dir))
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Connecting to FTP at {} as {}'.format(server, username))
try:
myFTP = ftplib.FTP(server, username, password)
uploadThis(image_dir)
if purgefiles == 'True':
print('\n' + datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Downloaded images from this run are set to delete')
shutil.rmtree(image_dir)
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Downloaded images have been removed')
else:
print('\n' + datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Downloaded images are set to NOT delete')
except:
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Unable to reach FTP server at {} as {}'.format(server, username))
exit()
if emailsuccess != True:
print('\n' + datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Script executed, but emails were NOT sent')
print(datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Check the email server and the ini and confirm the email account credentials are correct')
else:
print('\n' + datetime.now(est).strftime("%m/%d/%Y %H:%M:%S - ") + 'Script executed successfully')
|
#
# https://github.com/tensorflow/docs/blob/master/site/en/tutorials/sequences/text_generation.ipynb
from __future__ import absolute_import, division, print_function
import tensorflow as tf
print (tf.__version__)
#tf.enable_eager_execution()
import numpy as np
import os
import time
## Setup
def loss(labels, logits):
return tf.keras.backend.sparse_categorical_crossentropy(labels, logits, from_logits=True)
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
class Trainer():
def __init__(self):
pass
def train(self, path_to_file):
text = open(path_to_file, 'rb').read().decode(encoding='utf-8')
vocab = sorted(set(text))
self.char2idx = {u:i for i, u in enumerate(vocab)}
print ("self.char2idx")
print (self.char2idx)
self.idx2char = np.array(vocab)
text_as_int = np.array([self.char2idx[c] for c in text])
seq_length = 100
examples_per_epoch = len(text)//seq_length
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
sequences = char_dataset.batch(seq_length+1, drop_remainder=True)
dataset = sequences.map(split_input_target)
# Batch size
BATCH_SIZE = 64
steps_per_epoch = examples_per_epoch//BATCH_SIZE
BUFFER_SIZE = 10000
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print (dataset)
return
# rnn_units = 256
rnn_units = 1024
embedding_dim = 256
self.model = self.build_model(vocab_size = len(vocab),
embedding_dim=embedding_dim,
rnn_units=rnn_units,
batch_size=BATCH_SIZE)
self.model.compile(optimizer = tf.train.AdamOptimizer(), loss = loss)
# Directory where the checkpoints will be saved
checkpoint_dir = '.\\training_checkpoints'
# Name of the checkpoint files
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}")
checkpoint_callback=tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_prefix, save_weights_only=True)
EPOCHS=2
## Generate Text
history = self.model.fit(dataset.repeat(), epochs=EPOCHS,
steps_per_epoch=steps_per_epoch, callbacks=[checkpoint_callback])
tf.train.latest_checkpoint(checkpoint_dir)
vocab_size = len(vocab)
self.model = self.build_model(vocab_size, embedding_dim, rnn_units, batch_size=1)
self.model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
self.model.build(tf.TensorShape([1, None]))
## Build The Model
def build_model(self, vocab_size, embedding_dim, rnn_units, batch_size):
rnn = None
if tf.test.is_gpu_available():
rnn = tf.keras.layers.CuDNNGRU
else:
import functools
rnn = functools.partial(tf.keras.layers.GRU,
recurrent_activation='sigmoid')
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape=[batch_size, None]),
rnn(rnn_units, return_sequences=True,
recurrent_initializer='glorot_uniform',
stateful=True),
rnn(rnn_units, return_sequences=True,
recurrent_initializer='glorot_uniform',
stateful=True),
tf.keras.layers.Dense(vocab_size)])
return model
def generate_text(self, start_string):
# Evaluation step (generating text using the learned model)
# Number of characters to generate
num_generate = 1000
# Converting our start string to numbers (vectorizing)
input_eval = [self.char2idx[s] for s in start_string]
input_eval = tf.expand_dims(input_eval, 0)
# Empty string to store our results
text_generated = []
# Low temperatures results in more predictable text.
# Higher temperatures results in more surprising text.
# Experiment to find the best setting.
temperature = 1.0
# Here batch size == 1
self.model.reset_states()
for i in range(num_generate):
predictions = self.model(input_eval)
# remove the batch dimension
predictions = tf.squeeze(predictions, 0)
# using a multinomial distribution to predict the word returned by the model
predictions = predictions / temperature
predicted_id = tf.multinomial(predictions, num_samples=1)[-1,0].numpy()
# We pass the predicted word as the next input to the model
# along with the previous hidden state
input_eval = tf.expand_dims([predicted_id], 0)
text_generated.append(self.idx2char[predicted_id])
return (start_string + ''.join(text_generated))
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
print (path_to_file)
trainer = Trainer()
trainer.train(path_to_file)
#print(trainer.generate_text(start_string=u"ROMEO: "))
#tf.keras.models.save_model(
# model,
# "mine.h5",
# overwrite=True,
# include_optimizer=True)
|
from grafo import Grafo
CERO = 0
UNO = 1
class Parser(object):
def escribir_stable_matching(self, nombre, E, H, Q):
"""Escribe un archivo del tipo Stable Matching"""
try:
mi_arch = open(nombre, 'w')
n = len(E)
mi_arch.write(str(n) + '\n')
for i in range(0, n):
numeros = " ".join(str(x) for x in E[i]) + '\n' # Deberia haber m numeros
mi_arch.write(numeros)
m = len(H)
mi_arch.write(str(m) + '\n')
for i in range(0, m):
numeros = " ".join(str(x) for x in H[i]) + '\n' # Deberia haber n numeros
mi_arch.write(numeros)
numeros = " ".join(str(x) for x in Q) + '\n' # Deberia haber m numeros
mi_arch.write(numeros)
mi_arch.close()
return True
except:
print "Ocurrio un error leyendo el archivo de Stable Matching " + nombre
return False
def _read_line(self, mi_arch):
return mi_arch.readline().strip("\n")
def _read_line_int_list(self, mi_arch):
return [int(i) for i in self._read_line(mi_arch).split(" ")]
def leer_stable_matching(self, nombre):
"""Escribe un archivo del tipo Stable Matching"""
try:
mi_arch = open(nombre, 'r')
n = int(self._read_line(mi_arch))
E = []
for i in range(0, n):
E.append(self._read_line_int_list(mi_arch))
m = int(self._read_line(mi_arch))
H = []
for i in range(0, m):
H.append(self._read_line_int_list(mi_arch))
Q = self._read_line_int_list(mi_arch)
mi_arch.close()
return E, H, Q
except:
print "Ocurrio un error leyendo el archivo"
return False
def leer_grafo_no_dirigido(self, nombre):
"""Lee un archivo de un grafo no dirigido sin peso"""
try:
grafo = Grafo()
grafo.leer_no_dirigido(nombre)
return grafo
except:
print "Ocurrio un error leyendo el archivo de grafo no dirigido " + nombre
return False
def leer_grafo_dirigido(self, nombre):
"""Lee un archivo de un grafo dirigido sin peso"""
try:
grafo = Grafo()
grafo.leer_dirigido(nombre)
return grafo
except:
print "Ocurrio un error leyendo el archivo de grafo dirigido " + nombre
return False
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" USB3 link-layer abstraction."""
from amaranth import *
from ...stream import USBRawSuperSpeedStream, SuperSpeedStreamArbiter, SuperSpeedStreamInterface
from ..physical.coding import IDL
from .idle import IdleHandshakeHandler
from .ltssm import LTSSMController
from .header import HeaderQueue, HeaderQueueArbiter
from .receiver import HeaderPacketReceiver
from .transmitter import PacketTransmitter
from .timers import LinkMaintenanceTimers
from .ordered_sets import TSTransceiver
from .data import DataPacketReceiver, DataPacketTransmitter, DataHeaderPacket
class USB3LinkLayer(Elaboratable):
""" Abstraction encapsulating the USB3 link layer hardware.
Performs the lower-level data manipulations associated with transporting USB3 packets
from place to place.
"""
def __init__(self, *, physical_layer, ss_clock_frequency=125e6):
self._physical_layer = physical_layer
self._clock_frequency = ss_clock_frequency
#
# I/O port
#
# Header packet exchanges.
self.header_sink = HeaderQueue()
self.header_source = HeaderQueue()
# Data packet exchange interface.
self.data_source = SuperSpeedStreamInterface()
self.data_header_from_host = DataHeaderPacket()
self.data_source_complete = Signal()
self.data_source_invalid = Signal()
self.data_sink = SuperSpeedStreamInterface()
self.data_sink_send_zlp = Signal()
self.data_sink_sequence_number = Signal(5)
self.data_sink_endpoint_number = Signal(4)
self.data_sink_length = Signal(range(1024 + 1))
self.data_sink_direction = Signal()
# Device state for header packets
self.current_address = Signal(7)
# Status signals.
self.trained = Signal()
self.ready = Signal()
self.in_reset = Signal()
# Test and debug signals.
self.disable_scrambling = Signal()
def elaborate(self, platform):
m = Module()
physical_layer = self._physical_layer
# Mark ourselves as always consuming physical-layer packets.
m.d.comb += physical_layer.source.ready.eq(1)
#
# Training Set Detectors/Emitters
#
training_set_source = USBRawSuperSpeedStream()
m.submodules.ts = ts = TSTransceiver()
m.d.comb += [
# Note: we bring the physical layer's "raw" (non-descrambled) source to the TS detector,
# as we'll still need to detect non-scrambled TS1s and TS2s if they arrive during normal
# operation.
ts.sink .tap(physical_layer.raw_source),
training_set_source .stream_eq(ts.source)
]
#
# Idle handshake / logical idle detection.
#
m.submodules.idle = idle = IdleHandshakeHandler()
m.d.comb += idle.sink.tap(physical_layer.source)
#
# U0 Maintenance Timers
#
m.submodules.timers = timers = LinkMaintenanceTimers(ss_clock_frequency=self._clock_frequency)
#
# Link Training and Status State Machine (LTSSM)
#
m.submodules.ltssm = ltssm = LTSSMController(ss_clock_frequency=self._clock_frequency)
m.d.comb += [
ltssm.phy_ready .eq(physical_layer.ready),
# For now, we'll consider ourselves in USB reset iff we detect reset signaling.
# This should be expanded; ideally to also consider e.g. loss of VBUS on some devices.
ltssm.in_usb_reset .eq(physical_layer.lfps_reset_detected | ~physical_layer.vbus_present),
# Link Partner Detection
physical_layer.perform_rx_detection .eq(ltssm.perform_rx_detection),
ltssm.link_partner_detected .eq(physical_layer.link_partner_detected),
ltssm.no_link_partner_detected .eq(physical_layer.no_link_partner_detected),
# Pass down our link controls to the physical layer.
physical_layer.tx_electrical_idle .eq(ltssm.tx_electrical_idle),
physical_layer.engage_terminations .eq(ltssm.engage_terminations),
physical_layer.invert_rx_polarity .eq(ltssm.invert_rx_polarity),
physical_layer.train_equalizer .eq(ltssm.train_equalizer),
# LFPS control.
ltssm.lfps_polling_detected .eq(physical_layer.lfps_polling_detected),
physical_layer.send_lfps_polling .eq(ltssm.send_lfps_polling),
ltssm.lfps_cycles_sent .eq(physical_layer.lfps_cycles_sent),
# Training set detectors
ltssm.tseq_detected .eq(ts.tseq_detected),
ltssm.ts1_detected .eq(ts.ts1_detected),
ltssm.inverted_ts1_detected .eq(ts.inverted_ts1_detected),
ltssm.ts2_detected .eq(ts.ts2_detected),
ltssm.hot_reset_requested .eq(ts.hot_reset_requested),
ltssm.loopback_requested .eq(ts.loopback_requested),
ltssm.no_scrambling_requested .eq(ts.no_scrambling_requested),
# Training set emitters
ts.send_tseq_burst .eq(ltssm.send_tseq_burst),
ts.send_ts1_burst .eq(ltssm.send_ts1_burst),
ts.send_ts2_burst .eq(ltssm.send_ts2_burst),
ts.request_hot_reset .eq(ltssm.request_hot_reset),
ts.request_no_scrambling .eq(ltssm.request_no_scrambling),
ltssm.ts_burst_complete .eq(ts.burst_complete),
# Scrambling control.
physical_layer.enable_scrambling .eq(ltssm.enable_scrambling),
# Idle detection.
idle.enable .eq(ltssm.perform_idle_handshake),
ltssm.idle_handshake_complete .eq(idle.idle_handshake_complete),
# Link maintainance.
timers.enable .eq(ltssm.link_ready),
# Status signaling.
self.trained .eq(ltssm.link_ready),
self.in_reset .eq(ltssm.request_hot_reset | ltssm.in_usb_reset),
# Test and debug.
ltssm.disable_scrambling .eq(self.disable_scrambling),
]
#
# Packet transmission path.
# Accepts packets from the protocol and link layers, and transmits them.
#
# Transmit header multiplexer.
m.submodules.hp_mux = hp_mux = HeaderQueueArbiter()
hp_mux.add_producer(self.header_sink)
# Core transmitter.
m.submodules.transmitter = transmitter = PacketTransmitter()
m.d.comb += [
transmitter.sink .tap(physical_layer.source),
transmitter.enable .eq(ltssm.link_ready),
transmitter.usb_reset .eq(self.in_reset),
transmitter.queue .header_eq(hp_mux.source),
# Link state management handling.
timers.link_command_received .eq(transmitter.link_command_received),
self.ready .eq(transmitter.bringup_complete),
]
#
# Header Packet Rx Path.
# Receives header packets and forwards them up to the protocol layer.
#
m.submodules.header_rx = header_rx = HeaderPacketReceiver()
m.d.comb += [
header_rx.sink .tap(physical_layer.source),
header_rx.enable .eq(ltssm.link_ready),
header_rx.usb_reset .eq(self.in_reset),
# Bring our header packet interface to the protocol layer.
self.header_source .header_eq(header_rx.queue),
# Keepalive handling.
timers.link_command_transmitted .eq(header_rx.source.valid),
header_rx.keepalive_required .eq(timers.schedule_keepalive),
timers.packet_received .eq(header_rx.packet_received),
# Transmitter event path.
header_rx.retry_required .eq(transmitter.retry_required),
transmitter.lrty_pending .eq(header_rx.lrty_pending),
header_rx.retry_received .eq(transmitter.retry_received),
# For now, we'll reject all forms of power management by sending a REJECT
# whenever we receive an LGO (Link Go-to) request.
header_rx.reject_power_state .eq(transmitter.lgo_received),
]
#
# Link Recovery Control
#
m.d.comb += ltssm.trigger_link_recovery.eq(
timers.transition_to_recovery |
header_rx.recovery_required |
transmitter.recovery_required
)
#
# Data packet handlers.
#
# Receiver.
m.submodules.data_rx = data_rx = DataPacketReceiver()
m.d.comb += [
data_rx.sink .tap(physical_layer.source),
# Data interface to Protocol layer.
self.data_source .stream_eq(data_rx.source),
self.data_header_from_host .eq(data_rx.header),
self.data_source_complete .eq(data_rx.packet_good),
self.data_source_invalid .eq(data_rx.packet_bad),
]
# Transmitter.
m.submodules.data_tx = data_tx = DataPacketTransmitter()
hp_mux.add_producer(data_tx.header_source)
m.d.comb += [
transmitter.data_sink .stream_eq(data_tx.data_source),
# Device state information.
data_tx.address .eq(self.current_address),
# Data interface from Protocol layer.
data_tx.data_sink .stream_eq(self.data_sink),
data_tx.send_zlp .eq(self.data_sink_send_zlp),
data_tx.sequence_number .eq(self.data_sink_sequence_number),
data_tx.endpoint_number .eq(self.data_sink_endpoint_number),
data_tx.data_length .eq(self.data_sink_length),
data_tx.direction .eq(self.data_sink_direction)
]
#
# Transmit stream arbiter.
#
m.submodules.stream_arbiter = arbiter = SuperSpeedStreamArbiter()
# Add each of our streams to our arbiter, from highest to lowest priority.
arbiter.add_stream(training_set_source)
arbiter.add_stream(header_rx.source)
arbiter.add_stream(transmitter.source)
# If we're idle, send logical idle.
with m.If(arbiter.idle):
m.d.comb += [
# Drive our idle stream with our IDL value (0x00)...
physical_layer.sink.valid .eq(1),
physical_layer.sink.data .eq(IDL.value),
physical_layer.sink.ctrl .eq(IDL.ctrl),
# Let the physical layer know it can insert CTC skips whenever data is being accepted
# from our logical idle stream.
physical_layer.can_send_skp .eq(1)
]
# Otherwise, output our stream data.
with m.Else():
m.d.comb += physical_layer.sink.stream_eq(arbiter.source)
return m
|
from django.test import TestCase
from common.models import Injection
from common.models import CRI
from calc.forms import CalcInjForm, CRISimpleForm, CRIAdvancedForm, CRIInsulinForm, CRICPRForm, CRIMetoclopramideForm
class InjectionTest(TestCase):
def test_injection_page_renders_injection_page_template(self):
response = self.client.get('/calc/injection/')
self.assertTemplateUsed(response, 'calc/injection.html')
def test_injection_page_passes_rx_context(self):
med = Injection.objects.create(name='Tramadol')
response = self.client.get('/calc/injection/')
self.assertIn(med, list(response.context['rx'].items())[0])
def test_injection_page_uses_form(self):
response = self.client.get('/calc/injection/')
self.assertIsInstance(response.context['form'], CalcInjForm)
def test_can_unpack_zipped_rx_and_dosage(self):
Injection.objects.create(name='Tramadol', factor=1/50)
response = self.client.get('/calc/injection/',
data={'weight': 9.2}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(list(response.context['rx'].items())[0][0], Injection.objects.first())
self.assertAlmostEqual(list(response.context['rx'].items())[0][1], 0.184)
def test_injection_submit_button_does_not_send_empty_string(self):
response = self.client.get('/calc/injection/', data={'weight': ''})
self.assertNotEqual(500, response.status_code)
class CRISimpleTest(TestCase):
def test_cri_simple_uses_cri_simple_template(self):
response = self.client.get('/calc/cri/simple/')
self.assertTemplateUsed(response, 'calc/cri_simple.html')
def test_cri_simple_calc_uses_cri_simple_form(self):
response = self.client.get('/calc/cri/simple/')
self.assertIsInstance(response.context['form'], CRISimpleForm)
def test_cri_simple_calc_only_retrieves_ez_cri(self):
med1 = CRI.objects.create(name='Morphine', calc_type='ez')
med2 = CRI.objects.create(name='Super Morphine', calc_type='adv')
response = self.client.get('/calc/cri/simple/', data={'weight': 25.00})
self.assertIn(med1, response.context['rx'])
self.assertNotIn(med2, response.context['rx'])
def test_cri_simple_page_returns_correct_dosages(self):
CRI.objects.create(name='Morphine', calc_type='ez', rates=[0.05, 0.5, 0.1, 1.0], factor=1/15, units='mg')
response = self.client.get('/calc/cri/simple/', data={'weight': 25.00},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
dosage = [(0.05, 0.083), (0.5, 0.833), (0.1, 0.167), (1.0, 1.667)]
self.assertIn(dosage, response.context['rx'].values())
def test_cri_simple_calc_does_not_submit_empty_strings(self):
response = self.client.get('/calc/cri/simple/', data={'weight': ''},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertNotEqual(500, response.status_code)
def test_cri_simple_calc_computes_slow_bolus(self):
response = self.client.get('/calc/cri/simple/', data={'weight': 27.50},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
bolus = {'mg': 6.875, 'mL': 1.375}
self.assertEqual(bolus, response.context['bolus'])
class CRIAdvancedTest(TestCase):
def test_cri_advanced_uses_cri_advanced_template(self):
response = self.client.get('/calc/cri/advanced/')
self.assertTemplateUsed(response, 'calc/cri_advanced.html')
def test_cri_advanced_uses_cri_advanced_form(self):
response = self.client.get('/calc/cri/advanced/')
self.assertIsInstance(response.context['form'], CRIAdvancedForm)
def test_cri_advanced_view_only_retrieves_adv_cri(self):
med1 = CRI.objects.create(name='Robitussin', calc_type='ez')
med2 = CRI.objects.create(name='Robitussin DX', calc_type='adv')
response = self.client.get('/calc/cri/advanced/', data={'weight': 25.00,
'rate': 1,
'volume': 250,
'infusion': 10})
self.assertIn(med2, response.context['rx'])
self.assertNotIn(med1, response.context['rx'])
def test_cri_advanced_calc_does_not_submit_empty_strings(self):
response = self.client.get('/calc/cri/advanced/', data={'weight': '',
'rate': '',
'volume': '',
'infusion': ''},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertNotEqual(500, response.status_code)
def test_cri_advanced_page_returns_correct_dosages(self):
CRI.objects.create(name='Dopamine', calc_type='adv', factor=1/40000)
response = self.client.get('/calc/cri/advanced/', data={'weight': 2.5,
'rate': 1,
'volume': 250,
'infusion': 10},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
dosage = {'maint': 6.875, 'maint_plus': 6.042, 'add': 9.375}
self.assertEqual(dosage, list(response.context['rx'].values())[0])
class CRIInsulinTest(TestCase):
def test_cri_insulin_uses_cri_insulin_template(self):
response = self.client.get('/calc/cri/insulin/')
self.assertTemplateUsed(response, 'calc/cri_insulin.html')
def test_cri_insulin_uses_insulin_form(self):
response = self.client.get('/calc/cri/insulin/')
self.assertIsInstance(response.context['form'], CRIInsulinForm)
def test_cri_insulin_returns_correct_dosage(self):
response = self.client.get('/calc/cri/insulin/', data={'weight': 5.5,
'rate': 175,
'volume': 1000,
'replacement': 0.12},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
dosage = {'maint': 15.125, 'maint_plus': 9.792,
'units_dog': 2.881, 'units_cat': 1.44,
'phosphorus': 1.257, 'phosphorus_excess': 5.531}
self.assertEqual(dosage, response.context['rx'])
def test_cri_insulin_does_not_submit_empty_strings(self):
response = self.client.get('/calc/cri/insulin/', data={'weight': '',
'rate': '',
'volume': '',
'replacement': ''},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertNotEqual(500, response.status_code)
class CRICPRTest(TestCase):
def test_cri_cpr_uses_cri_cpr_template(self):
response = self.client.get('/calc/cri/cpr/')
self.assertTemplateUsed(response, 'calc/cri_cpr.html')
def test_cri_cpr_uses_cri_cpr_form(self):
response = self.client.get('/calc/cri/cpr/')
self.assertIsInstance(response.context['form'], CRICPRForm)
def test_cri_cpr_returns_correct_dosage(self):
response = self.client.get('/calc/cri/cpr/', data={'weight': 0.5,
'rate': 1,
'volume': 10,
'dobutamine': 4,
'dopamine': 3,
'lidocaine': 60},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
dosage = {'maint': 1.375, 'maint_plus': 3.542,
'dose_dobutamine': 0.096, 'dose_dopamine': 0.022, 'dose_lidocaine': 0.90, 'dose_epinephrine': 0.30,
'dose_mannitol': 2.0, 'dose_solumedrol': 15.0}
self.assertEqual(dosage, response.context['rx'])
def test_cri_cpr_does_not_submit_empty_strings(self):
response = self.client.get('/calc/cri/cpr', data={'weight': '',
'rate': '',
'volume': '',
'dobutamine': '',
'dopamine': '',
'lidocaine': ''},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertNotEqual(500, response.status_code)
class CRIMetoclopramideTest(TestCase):
def test_cri_metoclopramide_uses_cri_metoclopramide_template(self):
response = self.client.get('/calc/cri/metoclopramide/')
self.assertTemplateUsed(response, 'calc/cri_metoclopramide.html')
def test_cri_metoclopramide_uses_metoclopramide_form(self):
response = self.client.get('/calc/cri/metoclopramide/')
self.assertIsInstance(response.context['form'], CRIMetoclopramideForm)
def test_cri_metoclopramide_returns_correct_dosage(self):
response = self.client.get('/calc/cri/metoclopramide/', data={'weight': 4.0,
'rate': 10,
'volume': 100,
'infusion': 4,
'inc_volume': 100,
'inc_infusion': 0.5},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
dosage = {'maint': 11.0, 'maint_plus': 7.917,
'dose': 1.333, 'concentration': 0.067,
'inc_dose': 0.167, 'inc_infusion': 4.5, 'inc_rate': 11.25}
self.assertEqual(dosage, response.context['rx'])
def test_cri_metoclopramide_does_not_submit_empty_strings(self):
response = self.client.get('/calc/cri/metoclopramide/', data={'weight': '',
'rate': '',
'volume': '',
'infusion': '',
'inc_volume': '',
'inc_infusion': ''},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertNotEqual(500, response.status_code)
def test_increase_dosage_fields_are_optional(self):
response = self.client.get('/calc/cri/metoclopramide/', data={'weight': 4.0,
'rate': 10,
'volume': 100,
'infusion': 4,
'inc_volume': '',
'inc_infusion': ''},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
dosage = {'maint': 11.0, 'maint_plus': 7.917,
'dose': 1.333, 'concentration': 0.067}
self.assertNotEqual(500, response.status_code)
self.assertEqual(dosage, response.context['rx'])
|
# -*- coding:utf8 -*-
import xlrd, os, csv, sys
reload(sys)
sys.setdefaultencoding("utf-8")
crash, maint, frame = {}, {}, {}
dicts = {3:crash, 4:crash, 5:maint, 6:frame}
files = os.listdir('..'+os.sep+'input')
for filename in files:
filedata = {} # all data in the file
workbook = xlrd.open_workbook('..'+os.sep+'input'+os.sep+filename)
for booksheet in workbook.sheets():
for row in range(1, booksheet.nrows):
for col in range(3, booksheet.ncols):
colvalue = str(booksheet.cell(row, col).value).replace('\n', '')
colvalue = colvalue.replace('\r', '').strip()
if col < 7 and not dicts[col].has_key(colvalue):
dicts[col][colvalue] = filename
outputfile = ['crash', 'maint', 'frame']
for fstr in outputfile:
f = open('..'+os.sep+'00000000'+fstr+'.csv', 'w')
writer = csv.writer(f, delimiter=',')
exec("for s in "+fstr+'.viewitems():writer.writerow(s)')
f.close()
|
import rake
import operator
import sys
text = sys.argv[1]
rake_object = rake.Rake("stopwords_pt.txt", 4, 3, 0)
#sample_file = open("x", 'r')
#text = sample_file.read()
keywords = rake_object.run(text)
print "Keywords:", keywords
print text
kw = []
for name in keywords:
if name[1]>1:
kw.append(name[0])
myList = ','.join(map(str, kw))
print myList
|
# from bs4 import BeautifulSoup
# import urllib.request
#
# url = "F:/BlogDuCinema/201611110진교준/ㄱ.html"
# soup = BeautifulSoup(urllib.request.urlopen(url).read(), 'html.parser')
# pkg_list = soup.findAll("div", "words")
#
# count = 1
# for i in pkg_list:
# title = i.findAll('a')
# print(count, "위: ", str(title)[str(title).find('title="')+7:str(title).find('">')])
# count += 1
#
alpha = "ㅎ"
sql = open("../dictionaryqueries.sql", "a", encoding="UTF8")
file = open(alpha+".txt", encoding="UTF8")
cnt = 0
for line in file.readlines():
if cnt%3 == 0:
sql.write("(\"" + alpha + "\", \"" + line[:-1] + "\", \"")
elif (cnt-2)%3 == 0:
sql.write(line[:-1] + "\"),")
sql.write("\n")
cnt += 1
sql.write("\n")
sql.close()
|
#!/usr/bin/env python3
import argparse
import re
import sys
import yaml
from matrix_client.client import MatrixClient
# Not going to care for specifics like the underscore.
# Generally match !anything:example.com with unicode support.
room_pattern = re.compile(r'^!\w+:[\w\-.]+$')
def send_message(cfg, args):
client = MatrixClient(cfg["matrix"]["server"])
client.login(username=cfg["matrix"]["username"], password=cfg["matrix"]["password"])
room = client.join_room(room_id_or_alias=args.channel)
if 'html' in args:
body = None if len(args.text) == 0 else str(args.text)
room.send_html(html=args.html, body=body, msgtype=args.type)
else:
room.client.api.send_message(room_id=room.room_id, text_content=args.text, msgtype=args.type)
def main():
"""
config.yml Example:
matrix:
server: https://matrix.org
username: ...
password: "..."
"""
with open("config.yml", 'r') as ymlfile:
cfg = yaml.safe_load(ymlfile)
parser = argparse.ArgumentParser(description='Notify a matrix channel.')
parser.add_argument('-c', '--channel', required=True, help='the channel to send the message to')
parser.add_argument('-t', '--type', required=False, help='the msgtype',
choices=('m.text', 'm.notice'), default='m.text')
parser.add_argument('text', help='the text message to send to the channel')
parser.add_argument('html', nargs='?', help='the html message to send to the channel')
args = parser.parse_args()
if room_pattern.fullmatch(args.channel) is None:
print("ERROR: Couldn't parse channel as a matrix channel", file=sys.stderr)
sys.exit(1)
send_message(cfg, args)
print("Message sent.", file=sys.stderr)
if __name__ == "__main__":
main()
|
from server.currentaccount.models import CurrentAccount
from django.contrib.auth.models import User
from tastypie.authentication import Authentication, BasicAuthentication
from tastypie.authorization import Authorization, DjangoAuthorization
from tastypie.resources import ModelResource
from tastypie import fields
from django.contrib.auth import authenticate, login, logout
class UserResource(ModelResource):
class Meta:
queryset = User.objects.all()
resource_name = 'user'
excludes = ['email', 'password', 'is_active', 'is_staff', 'is_superuser']
allowed_methods = ['get', ]
authentication = BasicAuthentication()
authorization = DjangoAuthorization()
class CurrentAccountResource(ModelResource):
owner = fields.ForeignKey(UserResource, 'owner')
class Meta:
queryset = CurrentAccount.objects.all()
resource_name = 'account'
authentication = BasicAuthentication()
authorization = DjangoAuthorization()
|
head = 0
tail = 0
import random
for i in range(1,5001):
# import random
num= random.random()
# print num
num_rounded= round(num)
# print num_rounded
if num_rounded == 1:
head += 1
print "Attempt #" + str(i) + ": Throwing a coin... it's a head!... Got " + str(head) + " heads so far and " + str(tail) + " tails so far"
if num_rounded == 0:
tail += 1
print "Attempt #" + str(i) + ": Throwing a coin... it's a tail!... Got " + str(head) + " heads so far and " + str(tail) + " tails so far"
# if num_rounded == 0:
# tail += 1
# print "Throwing a coin... it's a tail!... Got " + str(tail) + " tails so far"
|
from panda3d.core import RenderState, ColorAttrib, Vec4, Point3, GeomNode
from bsp.leveleditor.objectproperties.ObjectPropertiesWindow import ObjectPropertiesWindow
from bsp.leveleditor.geometry.Box import Box
from bsp.leveleditor.geometry.GeomView import GeomView
from bsp.leveleditor.viewport.ViewportType import VIEWPORT_2D_MASK, VIEWPORT_3D_MASK
from bsp.leveleditor import RenderModes
from bsp.leveleditor import LEGlobals
from .SelectionType import SelectionType
from bsp.leveleditor.actions.Delete import Delete
from bsp.leveleditor.actions.ChangeSelectionMode import ChangeSelectionMode
from bsp.leveleditor.DocObject import DocObject
from .GroupsMode import GroupsMode
from .ObjectMode import ObjectMode
from .FaceMode import FaceMode
from .VertexMode import VertexMode
from enum import IntEnum
from functools import partial
from PyQt5 import QtWidgets, QtCore
Bounds3DState = RenderState.make(
ColorAttrib.makeFlat(Vec4(1, 1, 0, 1))
)
Bounds2DState = RenderModes.DashedLineNoZ()
Bounds2DState = Bounds2DState.setAttrib(ColorAttrib.makeFlat(Vec4(1, 1, 0, 1)))
class SelectionManager(DocObject):
Modes = [
GroupsMode,
ObjectMode,
FaceMode,
VertexMode
]
def __init__(self, doc):
DocObject.__init__(self, doc)
self.selectedObjects = []
self.selectionMins = Point3()
self.selectionMaxs = Point3()
self.selectionCenter = Point3()
# We'll select groups by default
self.selectionModes = {}
self.funcs = {}
self.selectionMode = None
self.connected = False
self.acceptGlobal('documentActivated', self.__onDocActivated)
self.acceptGlobal('documentDeactivated', self.__onDocDeactivated)
self.accept('objectTransformChanged', self.handleObjectTransformChange)
self.accept('mapObjectBoundsChanged', self.handleMapObjectBoundsChanged)
self.addSelectionModes()
self.setSelectionMode(SelectionType.Groups)
def cleanup(self):
self.selectedObjects = None
self.selectionMins = None
self.selectionMaxs = None
self.selectionCenter = None
self.disconnectModes()
self.connected = None
self.funcs = None
if self.selectionMode:
self.selectionMode.disable()
self.selectionMode = None
for mode in self.selectionModes.values():
mode.cleanup()
self.selectionModes = None
self.selectionMode = None
self.connected = None
DocObject.cleanup(self)
def __onDocActivated(self, doc):
if doc != self.doc:
return
if self.selectionMode and not self.selectionMode.activated:
self.selectionMode.activate()
self.connectModes()
def connectModes(self):
if self.connected:
return
for mode in self.selectionModes.values():
action = base.menuMgr.action(mode.KeyBind)
action.setChecked(mode.enabled)
action.setEnabled(True)
action.connect(self.funcs[mode])
self.connected = True
def __onDocDeactivated(self, doc):
if doc != self.doc:
return
if self.selectionMode and self.selectionMode.activated:
self.selectionMode.deactivate()
self.disconnectModes()
def disconnectModes(self):
if not self.connected:
return
for mode in self.selectionModes.values():
action = base.menuMgr.action(mode.KeyBind)
action.setChecked(False)
action.setEnabled(False)
action.disconnect(self.funcs[mode])
self.connected = False
@staticmethod
def addModeActions():
editMenu = base.menuMgr.editMenu
editMenu.addSeparator()
selectBar = base.menuMgr.createToolBar("Select:")
selectBar.setIconSize(QtCore.QSize(24, 24))
selectBar.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
selectMenu = editMenu.addMenu("Select")
group = QtWidgets.QActionGroup(selectBar)
for mode in SelectionManager.Modes:
action = base.menuMgr.addAction(mode.KeyBind, mode.Name, mode.Desc, toolBar=selectBar,
menu=selectMenu, checkable=True, enabled=False, icon=mode.Icon)
group.addAction(action)
def changeSelectionMode(self, mode):
modeInst = self.selectionModes[mode]
base.actionMgr.performAction("Select %s" % modeInst.Name, ChangeSelectionMode(mode))
def getSelectionKey(self):
return self.selectionMode.Key
def getSelectionMask(self):
return self.selectionMode.Mask
def isTransformAllowed(self, bits):
return (self.selectionMode.TransformBits & bits) != 0
def addSelectionMode(self, modeInst):
self.selectionModes[modeInst.Type] = modeInst
self.funcs[modeInst] = partial(self.changeSelectionMode, modeInst.Type)
def addSelectionModes(self):
for mode in self.Modes:
self.addSelectionMode(mode(self))
def setSelectionMode(self, mode):
if self.selectionMode is not None:
oldMode = self.selectionMode.Type
oldModeInst = self.selectionMode
else:
oldMode = None
oldModeInst = None
if mode != self.selectionMode and self.selectionMode is not None:
self.selectionMode.disable()
elif mode == self.selectionMode:
return
self.deselectAll()
self.selectionMode = self.selectionModes[mode]
self.selectionMode.enable()
self.send('selectionModeChanged', [oldModeInst, self.selectionMode])
def handleMapObjectBoundsChanged(self, mapObject):
if mapObject in self.selectedObjects:
self.updateSelectionBounds()
self.send('selectedObjectBoundsChanged', [mapObject])
def handleObjectTransformChange(self, entity):
if entity in self.selectedObjects:
self.updateSelectionBounds()
self.send('selectedObjectTransformChanged', [entity])
def deleteSelectedObjects(self):
if len(self.selectedObjects) == 0:
# Nothing to delete.
return
selected = list(self.selectedObjects)
base.actionMgr.performAction("Delete %i object(s)" % len(selected), Delete(selected))
self.selectedObjects = []
self.updateSelectionBounds()
self.send('selectionsChanged')
def hasSelectedObjects(self):
return len(self.selectedObjects) > 0
def getNumSelectedObjects(self):
return len(self.selectedObjects)
def isSelected(self, obj):
return obj in self.selectedObjects
def deselectAll(self, update = True):
for obj in self.selectedObjects:
obj.deselect()
self.selectedObjects = []
if update:
self.updateSelectionBounds()
self.send('selectionsChanged')
def singleSelect(self, obj):
self.deselectAll(False)
self.select(obj)
def multiSelect(self, listOfObjs):
self.deselectAll(False)
for obj in listOfObjs:
self.select(obj, False)
self.updateSelectionBounds()
self.send('selectionsChanged')
def deselect(self, obj, updateBounds = True):
if obj in self.selectedObjects:
self.selectedObjects.remove(obj)
obj.deselect()
if updateBounds:
self.updateSelectionBounds()
self.send('selectionsChanged')
def select(self, obj, updateBounds = True):
if not obj in self.selectedObjects:
self.selectedObjects.append(obj)
obj.select()
if updateBounds:
self.updateSelectionBounds()
self.send('selectionsChanged')
def updateSelectionBounds(self):
if len(self.selectedObjects) == 0:
base.qtWindow.selectedLabel.setText("No selection.")
self.selectionMins = Point3()
self.selectionMaxs = Point3()
self.selectionCenter = Point3()
return
else:
if len(self.selectedObjects) == 1:
obj = self.selectedObjects[0]
base.qtWindow.selectedLabel.setText(obj.getName())
else:
base.qtWindow.selectedLabel.setText("Selected %i objects." % len(self.selectedObjects))
mins = Point3(9999999)
maxs = Point3(-9999999)
for obj in self.selectedObjects:
objMins, objMaxs = obj.getBounds(base.render)
if objMins.x < mins.x:
mins.x = objMins.x
if objMins.y < mins.y:
mins.y = objMins.y
if objMins.z < mins.z:
mins.z = objMins.z
if objMaxs.x > maxs.x:
maxs.x = objMaxs.x
if objMaxs.y > maxs.y:
maxs.y = objMaxs.y
if objMaxs.z > maxs.z:
maxs.z = objMaxs.z
self.selectionMins = mins
self.selectionMaxs = maxs
self.selectionCenter = (mins + maxs) / 2.0
|
from app.models import Ingredients, User, Recipes
from app import db
from flask_login import current_user
ing_list = ['Apple', 'Tabantha Wheat', 'Wildberry', 'Monster Extract',
'Acorn', 'Swift Carrot', 'Fresh Milk', 'Bird Egg', 'Hylian Rice',
'Raw Meat', 'Raw Gourmet Meat', 'Raw Whole Bird',
'Raw Bird Drumstick', 'Spicy Pepper',
'Hyrule Shroom', 'Hyrule Herb', 'Hyrule Bass',
'Rock Salt', 'Endura Shroom',
'Courser Bee Honey', 'Palm Fruit', 'Hearty Radish',
'Raw Prime Meat', 'Hearty Blueshell Snail', 'Hearty Truffle',
'Endura Carrot', 'Goat Butter', "Cane Sugar"]
def bulkIngredAdd(ing_list):
for item in ing_list:
db.session.add(Ingredients(
ing_name=item
))
db.session.commit()
def list_ingredients2(recipe):
for item in recipe:
print(item.ing_name)
def list_cup_ing(userquery):
cupboardlist = []
for item in userquery.cupboard:
cupboardlist.append(item.ingredients.ing_name)
return cupboardlist
def find_recipes():
user = User.query.filter(User.username=='tom').first()
# newtest = db.session.query(Recipes.id, func.count(Ingredients.id)).join(Recipes.contains).group_by(Recipes.id).having(func.count(Ingredients.id)==2)
# this will find recipes that have n ingredinets, return s tulpe (recipe id, n)
|
from zope import interface
from zope import component
from zope.formlib import form
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import safe_unicode
from Products.CMFPlone.interfaces import IPloneSiteRoot
from Products.CMFDefault.formlib.schema import ProxyFieldProperty
from Products.CMFDefault.formlib.schema import SchemaAdapterBase
from plone.app.controlpanel.form import ControlPanelForm
from raptus.securelogin import interfaces
from raptus.securelogin import SecureLoginMessageFactory as _
class SecureLoginControlPanelAdapter(SchemaAdapterBase):
component.adapts(IPloneSiteRoot)
interface.implements(interfaces.IConfiguration)
def __init__(self, context):
super(SecureLoginControlPanelAdapter, self).__init__(context)
self.portal = context
pprop = getToolByName(context, 'portal_properties')
self.context = pprop.securelogin_properties
self.encoding = pprop.site_properties.default_charset
ip_bypass = ProxyFieldProperty(interfaces.IConfiguration['ip_bypass'])
groups = ProxyFieldProperty(interfaces.IConfiguration['groups'])
email = ProxyFieldProperty(interfaces.IConfiguration['email'])
timeout = ProxyFieldProperty(interfaces.IConfiguration['timeout'])
token = ProxyFieldProperty(interfaces.IConfiguration['token'])
class SecureLoginControlPanel(ControlPanelForm):
form_fields = form.FormFields(interfaces.IConfiguration)
label = _(u'Secure login settings')
description = ''
form_name = ''
|
default_app_config = 'colossus.apps.subscribers.apps.SubscribersConfig'
|
#!/usr/bin/python
#
# Created by Albert Zhang on 4/10/15.
# Copyright (c) 2015 Albert Zhang. All rights reserved.
#
import os
import sys
import errno
import string
import subprocess
import re
import shutil
import random
import codecs
import json
isShowHelp = False
dirIndex = -1
outIndex = -1
for index, value in enumerate(sys.argv):
if value in ['-h', '--help']:
isShowHelp = True
elif value == '-d':
dirIndex = index + 1
elif value == '-o':
outIndex = index + 1
arglen = len(sys.argv)
if isShowHelp:
print "--------------------------------------------------------------------------"
print 'This script is used to check the missing @2x images in xcassets,'
print 'and if the @3x image exist, fix them by invoking gen_xcassets.py'
print "\n"
print "Syntax:"
print " check_xcassets.py [-h] -d <dir_to_scan> -o <output_dir>"
print "\n"
print "If there is any fix happen, the generated results will be saved in the "
print "directory specified, under the working directory."
print "--------------------------------------------------------------------------"
print "\n"
quit()
if dirIndex < 0 or dirIndex >= arglen:
print "\nSyntax error: No source dir specified.\n"
quit()
if outIndex < 0 or outIndex >= arglen:
print "\nSyntax error: No output dir specified.\n"
quit()
xcassetsDir = sys.argv[dirIndex]
tmpOutputDir = sys.argv[outIndex]
tmpCopiedDir = tmpOutputDir +'/tmp_4_copy'
files = os.listdir(xcassetsDir)
fixedCount = 0
cannotfixCount = 0
goodCount = 0
fnull = open(os.devnull, 'w')
def mkdir_p(path):
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def rmdir_e(path):
if os.path.exists(path):
shutil.rmtree(path)
for fn in files:
#print fp
fnLen = len(fn)
if fnLen > 9: # length of '.xcassets' is 9
last9 = fn[fnLen-9:fnLen]
astName = fn[0:fnLen-9]
#print last9
if last9 == '.imageset':
astDirP = xcassetsDir +'/'+ fn
jsonp = astDirP +'/Contents.json'
file2x = ''
file3x = ''
contentsJson = json.load(open(jsonp))
for img in contentsJson[u'images']:
try:
scl = img[u'scale']
if scl == u'2x':
file2x = img[u'filename']
if scl == u'3x':
file3x = img[u'filename']
except Exception as ex:
pass
file3xP = ''
file2xP = ''
if len(file3x) > 0:
file3xP = astDirP +'/'+ file3x
if len(file2x) > 0:
file2xP = astDirP +'/'+ file2x
if not os.path.exists(file3xP):
file3x = '' # set to empty to indicate the file missing
if not os.path.exists(file2xP):
file2x = '' # set to empty to indicate the file missing
if len(file2x) > 0 and len(file3x) > 0:
# we are good
print '- [ Good ] '+ astName
goodCount += 1
else:
# missing something
if len(file3x) == 0:
print '- [Missing 3x] '+ astName +' ................... cannot fix'
cannotfixCount += 1
else:
print '- [Missing 2x] '+ astName +' ................... fixing ....'
rmdir_e(tmpCopiedDir)
mkdir_p(tmpCopiedDir)
copiedFp = tmpCopiedDir +'/'+ astName +'.png'
shutil.copyfile(file3xP, copiedFp)
subprocess.check_call(['gen_xcassets.py', '-d', tmpCopiedDir, '-o', tmpOutputDir], stdout=fnull)
print '---- * fixed '+ astName
fixedCount += 1
rmdir_e(tmpCopiedDir)
print "\n"
print "Good: "+ str(goodCount) +", fixed: "+ str(fixedCount) +", cannot fix: "+ str(cannotfixCount) +"\n"
print "Note: you must manually copy the fix results from ["+ tmpOutputDir +"]"
print "\n\n"
|
"""Reports package
"""
__version__ = "$Rev: 10 $"
import pkg_resources
try:
version = pkg_resources.require("reports")[0].version
except:
version = __version__
from .report import Report
from .htmltable import HTMLTable
|
import argparse
import json
import logging
import os
try:
from ripetor import ip2as
except:
import ip2as
from datetime import datetime
from operator import itemgetter
import subprocess
from collections import OrderedDict
from ipaddress import ip_address, ip_network
def filter_ip_addrs(addr_list, ip_version="ipv4"):
if ip_version == "ipv4":
addrs_v4 = [addr for addr in addr_list if not "[" in addr]
return addrs_v4
elif ip_version == "ipv6":
addrs_v6 = [addr for addr in addr_list if "[" in addr]
return addrs_v6
else:
raise ValueError(f"invalid ip_version: {ip_version}")
def remove_port_from_addr_notation(addr_str):
ip, separator, port = addr_str.rpartition(':')
return ip
#allowed values for ip_version: "ipv4", "ipv6", "ipv4v6"
#allowwed values for filter_criteria: "entry", "exit"
def filter_relays(relays, ip_version="ipv4", filter_criteria="entry"):
if "v4" in ip_version:
relays = [r for r in relays if filter_ip_addrs(r.get("or_addresses", []), ip_version="ipv4")]
if "v6" in ip_version:
relays = [r for r in relays if filter_ip_addrs(r.get("or_addresses", []), ip_version="ipv6")]
return relays
def get_current_relays(details):
max_time = max(datetime.strptime(r["last_seen"], "%Y-%m-%d %H:%M:%S") for r in details["relays"])
relays = [r for r in details["relays"] if datetime.strptime(r["last_seen"], "%Y-%m-%d %H:%M:%S") >= max_time]
# calculate summarized ipv6 probabilities (for normalization)
relays_ipv6 = filter_relays(relays, "ipv6")
ipv6_sum_guard_probability = sum(r.get('guard_probability', 0) for r in relays_ipv6)
ipv6_sum_middle_probability = sum(r.get('middle_probability', 0) for r in relays_ipv6)
ipv6_sum_exit_probability = sum(r.get('exit_probability', 0) for r in relays_ipv6)
# add calculated ASN field and normalize v6 probability
for r in relays:
tor_asn = r.get("as")
ips_v4 = filter_ip_addrs(r.get('or_addresses', []), ip_version="ipv4")
ips_v6 = filter_ip_addrs(r.get('or_addresses', []), ip_version="ipv6")
if ips_v4:
ip_v4 = remove_port_from_addr_notation(ips_v4[0])
ip_asn_v4 = ip2as.ip2asn(ip_v4)
r['asn_v4_calculated'] = ip_asn_v4
if ips_v6:
ip_v6 = remove_port_from_addr_notation(ips_v6[0])
ip_asn_v6 = ip2as.ip2asn(ip_v6.strip("[]"))
r['asn_v6_calculated'] = ip_asn_v6
# normalize ipv6 probabilities
r['guard_probability_v6'] = r.get('guard_probability') / ipv6_sum_guard_probability
r['middle_probability_v6'] = r.get('middle_probability') / ipv6_sum_middle_probability
r['exit_probability_v6'] = r.get('exit_probability') / ipv6_sum_exit_probability
relays_ipv4 = filter_relays(relays, "ipv4")
relays_ipv6 = filter_relays(relays, "ipv6")
relays_dualstack = filter_relays(relays, "ipv4v6")
return relays, relays_ipv4, relays_ipv6, relays_dualstack
def calculate_basic_tor_relay_stats(relays):
stats = dict()
stats["rc"] = len(relays)
stats["rasn"] = len({i["as"] for i in relays if "as" in i})
stats["rbw"] = sum([i["advertised_bandwidth"] for i in relays]) / 1000 / 1000 / 1000 * 8
# Changed to exit Probability > 0
# exits = [r for r in relays if "Exit" in r["flags"]]
exits = [r for r in relays if r["exit_probability"] > 0]
stats["ec"] = len(exits)
stats["easn"] = len({i["as"] for i in exits if "as" in i})
stats["ebw"] = sum([i["advertised_bandwidth"] for i in exits]) / 1000 / 1000 / 1000 * 8
# Changed to guard Probability > 0
# guards = [r for r in relays if "Guard" in r["flags"]]
guards = [r for r in relays if r["guard_probability"] > 0]
stats["gc"] = len(guards)
stats["gasn"] = len({i["as"] for i in guards if "as" in i})
stats["gbw"] = sum([i["advertised_bandwidth"] for i in guards]) / 1000 / 1000 / 1000 * 8
return stats
def print_basic_stats(s, remark=""):
print(f"{remark}Basic Statistics (Table 1)")
print("--------------------------")
print(" All Relays & {rc:4d} & {rasn:4d} & {rbw:2.2f} \\\\".format(**s))
print(" Exit Relays & {ec:4d} & {easn:4d} & {ebw:2.2f} \\\\".format(**s))
print("Guard Relays & {gc:4d} & {gasn:4d} & {gbw:2.2f} \\\\".format(**s))
def rank_probes_per_uptime(probes):
# overall uptime or time since last boot?
# sort by uptime to get goot probes first
probes.sort(key=lambda x: x.get('total_uptime', 0), reverse=True)
return probes
def filter_probes(probes, ip_version="ipv4"):
if "v4" in ip_version:
probes = [p for p in probes if p["asn_v4"]]
if "v6" in ip_version:
probes = [p for p in probes if p["asn_v6"]]
return probes
def get_probe_ips(probe):
ip_v4 = probe.get("address_v4", None)
ip_v6 = probe.get("address_v6", None)
network_v4 = probe.get("prefix_v4") #prefix_v4': '84.114.0.0/15',
network_v6 = probe.get("prefix_v6")
# sometimes address_v4 and address_v6 are censored --> take prefix_v4': '84.114.0.0/15', 'prefix_v6': instead
if not ip_v4 and network_v4:
network = ip_network(network_v4)
ip_v4 = str(next(network.hosts())) # just take the first host
if not ip_v6 and network_v6:
network = ip_network(network_v6)
ip_v6 = str(next(network.hosts())) # just take the first host
return ip_v4, ip_v6
def get_current_probes(probes):
connected_probes = [p for p in probes["objects"] if p["status_name"] == "Connected"]
connected_probes = rank_probes_per_uptime(connected_probes)
# add calculated as field
for p in connected_probes:
asn_v4 = p.get('asn_v4') or 0
asn_v6 = p.get('asn_v6') or 0
asn_v4 = f"AS{asn_v4}"
asn_v6 = f"AS{asn_v6}"
ip_asn_v4, ip_asn_v6 = get_probe_ips(p)
if ip_asn_v4:
ip_asn_v4 = ip2as.ip2asn(ip_asn_v4)
p['asn_v4_calculated'] = ip_asn_v4
#if asn_v4 != ip_asn_v4:
# print(f"{asn_v4} != {ip_asn_v4}")
if ip_asn_v6:
ip_asn_v6 = ip2as.ip2asn(ip_asn_v6)
p['asn_v6_calculated'] = ip_asn_v6
#if asn_v6 != ip_asn_v6:
# print(f"{asn_v6} != {ip_asn_v6}")
#map(lambda x: x['asn']=x['asn_v4'], connected_probes)
connected_probes_v4 = filter_probes(connected_probes, "ipv4")
connected_probes_v6 = filter_probes(connected_probes, "ipv6")
connected_probes_dual_stack = filter_probes(connected_probes, "ipv4v6")
return connected_probes, connected_probes_v4, connected_probes_v6, connected_probes_dual_stack
def get_ordered_dict_by_value_len(unordered_dict):
ret = OrderedDict()
# construct helper list with (key, rank, value)
temp = list()
for key, value in unordered_dict.items():
new_elem = [key, len(value), value]
temp.append(new_elem)
# order helper list
temp.sort(key=lambda x: x[1], reverse=True)
# insert in right order
for key, rank, value in temp:
ret[key] = value
return ret
def get_propes_per_asn(probes):
probes_per_as_v4 = dict()
probes_per_as_v6 = dict()
for p in probes:
if p["asn_v4"]:
probes_per_as_v4.setdefault(p["asn_v4"], []).append(p)
if p["asn_v6"]:
probes_per_as_v6.setdefault(p["asn_v6"], []).append(p)
return get_ordered_dict_by_value_len(probes_per_as_v4), get_ordered_dict_by_value_len(probes_per_as_v6)
def get_probes_per_country(probes):
probes_per_country = dict()
for p in probes:
if p.get("country_code"):
probes_per_country.setdefault(p["country_code"], []).append(p)
return get_ordered_dict_by_value_len(probes_per_country)
def get_probes_per_country_asn_ranked(probes, ip_version="ipv4"):
ret_v4 = OrderedDict()
ret_v6 = OrderedDict()
probes_per_country = get_probes_per_country(probes)
for country, country_probes in probes_per_country.items():
probes_per_as_v4, probes_per_as_v6 = get_propes_per_asn(country_probes)
ret_v4[country] = probes_per_as_v4
ret_v6[country] = probes_per_as_v6
return ret_v4 if ip_version=="ipv4" else ret_v6
def calculate_basic_ripe_stats(probes, ip_version="ipv4"):
probes_per_as_v4, probes_per_as_v6 = get_propes_per_asn(probes)
stats = dict()
stats["connected_probes"] = probes
stats["probes_per_as"] = probes_per_as_v4 if ip_version=="ipv4" else probes_per_as_v6
return stats
def get_example_set_by_asn(probes, asn, ip_version="ipv4v6"):
ret_set = {"probes": [], "addresses": [], "asn":[]}
if isinstance(asn, str):
asn = int(asn.strip("AS"))
unfiltered, probes_v4, probes_v6, probes_ds = get_current_probes(probes)
if ip_version == "ipv4":
probes_per_as_v4, probes_per_as_v6 = get_propes_per_asn(probes_v4)
per_asn = probes_per_as_v4
elif ip_version == "ipv6":
probes_per_as_v4, probes_per_as_v6 = get_propes_per_asn(probes_v6)
per_asn = probes_per_as_v6
elif ip_version == "ipv4v6":
probes_per_as_v4, probes_per_as_v6 = get_propes_per_asn(probes_ds)
per_asn = probes_per_as_v4 # take v4 because more reliable?
matches = per_asn.get(asn, [])
if not matches:
print(f"no matches for AS{asn} and {ip_version}")
for p in matches:
id = p["id"]
asn_v4 = p.get('asn_v4') or 0
asn_v6 = p.get('asn_v6') or 0
ip_v4, ip_v6 = get_probe_ips(p)
addrs = []
if ip_v4:
addrs.append(f"{ip_v4}:0")
if ip_v6:
addrs.append(f"[{ip_v6}]:0")
asn = asn_v4 if "v4" in ip_version else asn_v6
ret_set["probes"].append(id)
ret_set["addresses"].append(addrs)
ret_set["asn"].append(asn)
return ret_set
def print_basic_ripe_stats(stats, remark=""):
print(f"{remark}Basic RIPE Statistics")
print("---------------------")
print("Connected probes: %6d" % len(stats["connected_probes"]))
print("in different AS: %6d" % len(stats["probes_per_as"]))
def generate_gnuplot_dat_files(relays, probes, ipv6=False):
print("Generate gnuplot .dat Files")
print("---------------------------")
# TODO Change File structure to save in other directory structure
if not ipv6:
logging.info(f'Creating plot data for IPv4')
probes_as = {"AS" + str(p["asn_v4"]) for p in probes}
proto = "ipv4"
else:
logging.info(f'Creating plot data for IPv6')
probes_as = {"AS" + str(p["asn_v6"]) for p in probes}
proto = "ipv6"
for flag in ("exit", "guard"):
filtered_relays = [r for r in relays if r[flag+"_probability"] > 0]
relays_per_as = dict()
for r in filtered_relays:
if "as" in r:
relays_per_as.setdefault(r["as"], []).append(r)
as_values = [(asn,
sum([r["%s_probability" % flag] for r in relays if "%s_probability" % flag.lower() in r]),
len(relays)
) for asn, relays in relays_per_as.items()]
as_values.sort(key=itemgetter(1), reverse=True)
with open(f"gnuplot/{flag}_{proto}_as.dat", "w+") as as_fp:
_, s_p, s_c = zip(*as_values)
summed_values = [(idx+1, sum(s_p[:idx+1]), sum(s_c[:idx+1])) for idx in range(len(s_p))]
as_fp.write("0 0 0\n")
as_fp.write("\n".join("%d %f %d" % line for line in summed_values))
with open(f"gnuplot/{flag}_{proto}_probes_as.dat", "w+") as as_probes_fp:
_, s_p, s_c = zip(* filter(lambda x: x[0] in probes_as, as_values))
summed_values = [(idx+1, sum(s_p[:idx + 1]), sum(s_c[:idx + 1])) for idx in range(len(s_p))]
as_probes_fp.write("0 0 0\n")
as_probes_fp.write("\n".join("%d %f %d" % line for line in summed_values))
def execute_gnuplot():
print("Execute gnuplot")
print("---------------")
p = subprocess.Popen(["gnuplot", "exit_as.gnuplot"], cwd="gnuplot")
p.wait()
p = subprocess.Popen(["gnuplot", "guard_as.gnuplot"], cwd="gnuplot")
p.wait()
def calculate_top_as_without_ripe_probe(relays, probes):
probes_as = {"AS" + str(p["asn_v4"]) for p in probes}
relays_wo_probe_per_as = dict()
relays_total = dict()
for r in relays:
if "as" in r and r["as"] not in probes_as:
relays_wo_probe_per_as.setdefault(str(r["as"]), []).append(r)
relays_total.setdefault(str(r["as"]), []).append(r)
top_as = [{"as": asn,
"nr_relays": len(relays),
"bw_sum": sum([r["advertised_bandwidth"] for r in relays]) / 1000/1000/1000*8 ,
"exit_sum": sum([r["exit_probability"] for r in relays if "exit_probability" in r]),
"guard_sum": sum([r["guard_probability"] for r in relays if "guard_probability" in r])
}
for asn, relays in relays_wo_probe_per_as.items()]
top_as_total = [{"as": asn,
"has_probe": 1 if asn in probes_as else 0 ,
"as_name": ip2as.get_as_name(asn),
"nr_relays": len(relays),
"bw_sum": sum([r["advertised_bandwidth"] for r in relays]) / 1000/1000/1000*8 ,
"exit_sum": sum([r["exit_probability"] for r in relays if "exit_probability" in r]),
"guard_sum": sum([r["guard_probability"] for r in relays if "guard_probability" in r])
}
for asn, relays in relays_total.items()]
return {"exit": sorted(top_as, key=itemgetter("exit_sum"), reverse=True),
"guard": sorted(top_as, key=itemgetter("guard_sum"), reverse=True)},{"exit": sorted(top_as_total, key=itemgetter("exit_sum"), reverse=True),
"guard": sorted(top_as_total, key=itemgetter("guard_sum"), reverse=True)}
def print_top_as_without_ripe_probe(top_exit, top_guard, remark=""):
print(f"{remark}Top AS without RIPE Probe")
print("-------------------------")
print(" AS\\# & Nr. Relays & Sum BW (Gbit/s) & Exit prob. & Guard prob. \\\\ \\hline\\hline")
for s in top_exit[:10]:
print("%8s & %10d & %6.2f & %10.3f & %11.3f \\\\" %
(s["as"], s["nr_relays"], s["bw_sum"], s["exit_sum"], s["guard_sum"]))
print("\\hline")
for s in top_guard[:5]:
print("%8s & %10d & %6.2f & %10.3f & %11.3f \\\\" %
(s["as"], s["nr_relays"], s["bw_sum"], s["exit_sum"], s["guard_sum"]))
print("Sum exit prob for %d AS if RIPE installed: %.3f" % (5, sum(s["exit_sum"] for s in top_exit[:5])))
print("Sum exit prob for %d AS if RIPE installed: %.3f" % (10, sum(s["exit_sum"] for s in top_exit[:10])))
def print_total_tor_as(total_exit,total_guard,remark=""):
print(f"{remark}Top AS Probes")
print("-------------------------")
print(" AS\\# & RIPE & AS Name & Nr. Relays & Sum BW (Gbit/s) & Exit prob. & Guard prob. \\\\ \\hline\\hline")
for s in total_exit[:20]:
print("%8s & %1d & %s & %10d & %6.2f & %10.3f & %11.3f \\\\" %
(s["as"], s["has_probe"],s["as_name"],s["nr_relays"], s["bw_sum"], s["exit_sum"], s["guard_sum"]))
def print_country_statistic(relays, probes, remark=""):
# TODO Split details and probes, since they are not depending
print(f"{remark}Country statistics")
print("------------------")
probes_per_country = {}
for p in probes:
probes_per_country.setdefault(p["country_code"], []).append(p)
sorted_ppc = sorted([(k, len(v)) for k, v in probes_per_country.items()], key=itemgetter(1), reverse=True)
print(f"Top probe countries: {sorted_ppc[:10]}")
# TODO Only take running relays for stats (see basic-statistics)
relays_per_country = {}
for r in relays:
if "country" in r:
relays_per_country.setdefault(r["country"], []).append(r)
sorted_rpc = sorted([(k, len(v)) for k, v in relays_per_country.items()], key=itemgetter(1), reverse=True)
print(f"Top relay countries: {sorted_rpc[:10]}")
def main():
"""Print statistics depending on provided files"""
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--details", type=str, help="details .json file from onioo")
parser.add_argument("-p", "--probes", type=str, help="probes .json file")
parser.add_argument("-i", "--ip2asn", type=str, help="ip2asn csv file")
args = parser.parse_args()
# Open Detail file
if args.details and os.path.isfile(args.details):
with open(args.details, 'r') as f:
details = json.load(f)
else:
details = None
print("No valid details file")
# Open Probes file
if args.probes and os.path.isfile(args.probes):
with open(args.probes, 'r') as f:
probes = json.load(f)
else:
probes = None
print("No valid probes file")
# Open Ip2asn file
if args.ip2asn and os.path.isfile(args.ip2asn):
ip2as.load(args.ip2asn)
else:
print("No valid ip2asn file")
# Print Basic Stats
if details:
unfiltered, relays_v4, relays_v6, relays_ds = get_current_relays(details)
basic_stats = calculate_basic_tor_relay_stats(relays_v4)
print_basic_stats(basic_stats, "[IPv4] ")
print()
basic_stats = calculate_basic_tor_relay_stats(relays_v6)
print_basic_stats(basic_stats, "[IPv6] ")
print()
# Print Stats for Probes
if probes:
unfiltered, probes_v4, probes_v6, probes_ds = get_current_probes(probes)
ripe_stats = calculate_basic_ripe_stats(probes_v4, "ipv4")
print_basic_ripe_stats(ripe_stats, "[IPv4] ")
print()
ripe_stats = calculate_basic_ripe_stats(probes_v6, "ipv6")
print_basic_ripe_stats(ripe_stats, "[IPv6] ")
print()
# Execute Gnuplot and Calculate Top AS
if details and probes:
generate_gnuplot_dat_files(relays_v4, probes_v4)
generate_gnuplot_dat_files(relays_v6, probes_v6, ipv6=True)
# execute_gnuplot()
print()
top,top_total = calculate_top_as_without_ripe_probe(relays_v4, probes_v4)
print_top_as_without_ripe_probe(top["exit"], top["guard"], "[IPv4] ")
print_total_tor_as(top_total["exit"], top_total["guard"], "[IPv4] ")
print()
top_v6,top_v6_total = calculate_top_as_without_ripe_probe(relays_v6, probes_v6)
print_top_as_without_ripe_probe(top_v6["exit"], top_v6["guard"], "[IPv6] ")
print()
print_country_statistic(relays_v4, probes_v4, "[IPv4] ")
print()
print_country_statistic(relays_v6, probes_v6, "[IPv6] ")
print()
ranked_v4 = get_probes_per_country_asn_ranked(probes_v4)
ranked_v6 = get_probes_per_country_asn_ranked(probes_v6)
print("GERMANY TOP v4")
print(list(ranked_v4['DE'].keys())[0:10])
print("GERMANY TOP v6")
print(list(ranked_v6['DE'].keys())[0:10])
print("USA TOP v4")
print(list(ranked_v4['US'].keys())[0:10])
print("USA TOP v6")
print(list(ranked_v6['US'].keys())[0:10])
print("RUSSIA TOP v4")
print(list(ranked_v4['RU'].keys())[0:10])
print("RUSSIA TOP v6")
print(list(ranked_v6['RU'].keys())[0:10])
if __name__ == '__main__':
main()
|
from wtforms import TextAreaField, StringField, Form, IntegerField, SelectField
from wtforms.fields.html5 import DateField
from wtforms.validators import InputRequired, ValidationError
class AdForm(Form):
title = StringField("Title", validators=[InputRequired()])
content = TextAreaField("Content", validators=[InputRequired()])
price = IntegerField("Price", validators=[InputRequired()])
address = StringField("Address", validators=[InputRequired()])
city = StringField("City", validators=[InputRequired()])
def validate_price(form, field):
if type(field.data) != int:
raise ValidationError('Price must be integer')
CHOICES = [((str(i), str(i))) for i in range(1, 6)]
class AdEditForm(Form):
title = StringField(validators=[InputRequired()])
content = TextAreaField(validators=[InputRequired()])
price = IntegerField(validators=[InputRequired()])
address = StringField(validators=[InputRequired()])
city = StringField(validators=[InputRequired()])
def validate_price(form, field):
if type(field.data) != int:
raise ValidationError('Price must be integer')
class ReviewForm(Form):
content = TextAreaField(validators=[InputRequired()])
grade = SelectField('Ad grade', choices=CHOICES)
class ReviewEditForm(Form):
content = TextAreaField(validators=[InputRequired()])
grade = SelectField('Ad grade', choices=CHOICES)
class RentForm(Form):
start_date = DateField(validators=[InputRequired()])
end_date = DateField(validators=[InputRequired()])
|
class Node(object):
def __init__(self,data):
self.value = data
self.less = None
self.more = None
def addL(self, data):
n = Node(data)
self.less=n
return(n)
def addR(self, data):
n = Node(data)
self.more=n
return(n)
def printer(self,lev):
print (self.value)
if (self.less):
print "left:"
self.less.printer(lev+1)
if (self.more):
print "right:"
self.more.printer(lev+1)
print "level->", lev
def isLeaf(self):
if (not(self.less) and not(self.more)):
return(1)
#returns node previous to the smallest in the subtree
def prev(self):
if (not self):
return(None)
if(not self.less):
return(None)
if(not self.less.less):
return(self)
return(self.less.prev())
root = Node(10)
n1 = root.addR(12)
n2 = root.addL(8)
n3 = n1.addL(11)
n4 = n2.addL(5)
n5 = n4.addR(6)
res = None
while(not root.isLeaf()):
p = root.prev()
if (p):
sm = p.less
p.less = sm.more
else:
sm = root
root = root.more
sm.less = None
sm.more = None
sm.less = res
res = sm
root.l = res
print "***********"
res.printer(0)
|
import random
# 定义一个函数,产生一个验证码
def generate_checkcode(n):
s = '0987654321qwertyuiopasdfghjklzxxcvbnmQWERTYUIOPADSFGHJKLZCXVBNM'
code = ''
for i in range(n):
ran = random.randint(0, len(s)-1)
code += s[ran]
return code
def login():
username = input("请输入用户名")
password = input("请输入密码")
# 得到一个验证码
code = generate_checkcode(4)
print(code)
code1 = input("请输入一个验证码")
if code.lower() == code1.lower():
if username == '李佳琪' and password =="123456":
print("用户登录成功")
else:
print("用户名或密码错误")
else:
print("验证码错误")
login()
|
employees = [{
'name': 'John Mckee',
'age': 38,
'department': 'sales'
}, {
'name': 'Lisa Crawford',
'age': 29,
'department': 'marketing'
}, {
'name': 'Sujan Patel',
'age': 33,
'department': 'hr'
}]
print(employees[1])
items = ['apple', 'orange', 'banana']
quantity = [5, 3, 2]
orders = zip(items, quantity)
print(list(orders))
orders = zip(items, quantity)
print(tuple(orders))
orders = zip(items, quantity)
orders = dict(orders)
print(list(orders.keys()))
for tuple in list(orders.items()):
print(tuple)
|
#!/usr/bin/env python
# coding: utf-8
# In[85]:
import csv
from pathlib import Path
input_file = Path('Resources','budget_data.csv')
total_number_of_months=[]
profit_loss=[]
average_change=[]
with open (input_file, 'r') as csv_file:
csv_reader = csv.reader(csv_file,delimiter=',')
next(csv_reader)
data=[row for row in csv_reader]
for row in data:
total_number_of_months.append(row[0])
profit_loss.append(int(row[1]))
for i in range(len(profit_loss)-1):
average_change.append(profit_loss[i+1] - profit_loss[i])
max_increase_month = average_change.index(max(average_change)) + 1
min_increase_month = average_change.index(min(average_change)) + 1
output_file = Path('Analysis','Financial_Analysis.txt')
with open(output_file, 'w') as file:
file.write('Financial Analysis')
file.write('\n')
file.write('-------------------')
file.write('\n')
file.write('Total Months: {}'.format(len(total_number_of_months)))
file.write('\n')
file.write('Total:'+'$'+str(sum(profit_loss)))
file.write('\n')
file.write(f"Average Change:${round(sum(average_change)/len(average_change),2)}")
file.write('\n')
file.write('Greatest Increase in Profits:{} ({}{})'.format(total_number_of_months[max_increase_month],'$',max(average_change)))
file.write('\n')
file.write('Greatest Decrease in Profits:{} ({}{})'.format(total_number_of_months[min_increase_month],'$',min(average_change)))
# Print statements
print("Financial Analysis\n",'-'*len("Financial Analysis"))
print('Total Months: {}'.format(len(total_number_of_months)))
print('Total:'+'$'+str(sum(profit_loss)))
print(f"Average Change: {round(sum(average_change)/len(average_change),2)}")
print('Greatest Increase in Profits:{} ({}{})'.format(total_number_of_months[max_increase_month],'$',max(average_change)))
print('Greatest Decrease in Profits:{} ({}{})'.format(total_number_of_months[min_increase_month],'$',min(average_change)))
|
# Create call Slither
class Slither:
# Define Slither Parameters
x = [220]
y = [308]
step = 44
length = 0
direction = 0
updateCountMax = 2
updateCount = 0
# Define Slither Length in game
def __init__(self, length):
self.length = length
for i in range(0, 280):
self.x.append(-10000)
self.y.append(-10000)
# initial positions, no collision.
self.x[1] = 1 * 44
self.x[2] = 2 * 44
# Define Slither Move
def update(self):
self.updateCount = self.updateCount + 1
if self.updateCount > self.updateCountMax:
# update previous positions
for i in range(self.length - 1, 0, -1):
self.x[i] = self.x[i - 1]
self.y[i] = self.y[i - 1]
# update position of head of snake
if self.direction == 0:
self.x[0] = self.x[0] + self.step
if self.direction == 1:
self.x[0] = self.x[0] - self.step
if self.direction == 2:
self.y[0] = self.y[0] - self.step
if self.direction == 3:
self.y[0] = self.y[0] + self.step
self.updateCount = 0
# Check direction of Slither
def moveRight(self):
self.direction = 0
def moveLeft(self):
self.direction = 1
def moveUp(self):
self.direction = 2
def moveDown(self):
self.direction = 3
# Show Slither
def draw(self, surface, image):
if self.length <= 5:
for i in range(0, self.length):
surface.blit(image, (self.x[i], self.y[i]))
else:
for i in range(0, self.length - 5):
surface.blit(image, (self.x[i], self.y[i]))
|
from django.apps import AppConfig
class PatrocinadoresConfig(AppConfig):
name = 'patrocinadores'
|
# Given an array of length N whose each element is a tuple (base, exponent),
# find the position in the array (1-indexing) such that base ^ exponent
# is largest!
# Methodology: A ^ B > C ^ D iff B * ln(A) > D * ln(C)
from math import *
def findMaxExpoValue():
arrInput = readInputFromFile()
arrLen = len(arrInput)
maxIndex = 0
# For each arrInput[I], we have:
# arrInput[I][0] contains the base
# arrInput[I][1] contains the exponent
for index in range(1, arrLen):
curValue = arrInput[index][1] * log(arrInput[index][0])
curMaxValue = arrInput[maxIndex][1] * log(arrInput[maxIndex][0])
if curValue > curMaxValue:
maxIndex = index
# The final result must be 1-indexing!
return maxIndex + 1
def readInputFromFile():
strFile = input('Enter the path of the input file: ')
file = open(strFile, 'r')
arrInput = []
for line in file:
twoNum = line.split(',')
arrInput.append((int(twoNum[0]), int(twoNum[1])))
return arrInput
|
"""Top-level project Main Module."""
from IncomeAccountOverhead import income_account_overhead
from BalanceSheetOverhead import balance_sheet_overhead
from StockBondOverhead import stock_bond_overhead
from ReferenceOverhead import reference_overhead
from EndSearchOverhead import end_search_overhead
import ZoneNeutralOps as zno
import sys
sys.path.append('../../../runtime_data/')
import RunTimeData
def main():
"""Top-level project Main function."""
# trigger console printing function and define variables for mid-runtime print statements.
starting_data = RunTimeData.starting_print_statement()
start_time = starting_data[0]
time_elapsed = starting_data[1]
# read in ZoneClassifications files.
zones_full = '../from_summit/ZoneClassifications.csv'
zones_small = '../from_summit/ZoneClassifications_Smaller.csv'
# trigger file_operations classes to search zones in zone files for identifying strings.
zone_data_account = income_account_overhead(zones_full, zones_small)
zone_data_balance = balance_sheet_overhead(zones_full, zones_small)
zone_data_stock_bond = stock_bond_overhead(zones_full, zones_small)
zone_data_reference = reference_overhead(zones_full, zones_small)
zone_data_end_search = end_search_overhead(zones_full, zones_small)
# prepare output dataframes from above classes as part of original zoning file and save.
zno.update_and_output(zones_small, zone_data_account.output_dataframe,
zone_data_balance.output_dataframe,
zone_data_stock_bond.output_dataframe,
zone_data_reference.output_dataframe,
zone_data_end_search.output_dataframe)
# print concluding job console statement with summarising data.
RunTimeData.concluding_print_statement(start_time, time_elapsed)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import json
import Queue
import ctypes
import select
import socket
import logging
import threading
from time import sleep
from datetime import datetime, timedelta
from threading import Event, Timer
from signal import signal, SIGINT
from collections import deque
from binascii import a2b_hex, b2a_hex
try:
from mushroom_pb2 import *
sensor_type_dict = {
TEMP : 'temperature',
LIGHT: 'light',
HUMI : 'humidity',
CO2 : 'co2',
}
controller_type_dict = {
XUNHUAN_FAN : 'xunhuan_fan',
JINFENG_FAN : 'jinfeng_fan',
PAIFENG_FAN : 'paifeng_fan',
JIASHIQI : 'jiashiqi',
YASUOJI : 'yasuoji',
NEIJI : 'neiji',
YELLOW_LIGHT : 'yello_light',
RED_LIGHT : 'red_light',
BLUE_LIGHT : 'blue_light',
}
except Exception:
pass
sys_config_dict = {
'TIME_SYNC_CYCLE' : 50,
}
room_dict = {
'sensor': [],
'controller': [],
}
# 开
ON = 1
# 关
OFF = 0
# 成功
SUC = 0
# 失败
FAI = -1
# 异常
ERR = -2
# 全局线程队列
thread_dict = {}
THREAD_TASK = 'task_deliver'
THREAD_ARM = 'arm_server'
THREAD_DJANGO = 'django_server'
THREAD_POLICY = 'threshold_loader'
THREAD_SENSOR_DATA_STORAGE = 'sensor_data_storage'
#===========执行策略相关=================#
# 环境限定范围,由单独的线程负责不断刷新,键为房间号,值为一个队列,长度始终为2.
# 其中第一个值为包含了当前使用的环境限定范围的元组,第二个值下一次刷新时间
threshold = {}
#:环境限制条件载入周期
THRESHOLD_LOAD_CYCLE = 5
#============任务队列模块配置==============#
#: 任务超时时长(s)
TASK_TIMEOUT = 5
#:最大任务号
MAX_TASK_ID = 99999
# 任务线程条件变量等待周期
TASK_WAIT_CIRCLE = 1
#:任务就绪状态
TASK_READY = 0
#:任务等待状态
TASK_WAITING = 1
#:任务完成状态
TASK_FINISHED = 2
#==============数据库模块配置=============#
try:
import pyodbc
except Exception:
print "pyodbc not installed, please install it first"
sys.exit()
#: 数据库连接参数
db_conn_info = {
# "HOST" : "localhost\\sqlserver2008",
# "USER" : "sa",
# "PASSWORD" : "cslg123456,./",
"HOST" : "127.0.0.1",
"USER" : "wsngump",
"PASSWORD" : "wsngump",
"DATABASE" : "mushroom",
}
POLICY_NEW = 2
POLICY_RUNNING = 1
POLICY_OLD = 0
db_reconnect_cycle = [10, 30, 60, 120]
#============套接字队列模块配置=============#
#: select 超时时间
SELECT_TIMEOUT = 2
#: 僵尸套接字连接判断时间
SOCKET_TIMEOUT = 10
#: 对 ARM 提供链接服务的地址及端口
ARM_SERVER_ADDR = ['10.18.50.66', 9000]
#: 对 Django 提供链接服务的地址及端口
DJANGO_SERVER_ADDR = ['10.18.50.66', 9001]
#: 方向,本系统中包括 ARM 和 Django
BIRTH_TYPE_MANUAL = 0
BIRTH_TYPE_AUTO = 1
arm_client_list = []
arm_client_dic = {}
django_client_list = []
django_client_dic = {}
#=============日志模块配置===============#
#: 日志配置参数
log_conf = {
'ERROR' : ON,
'COMMUNICATION' : ON,
'DEBUG' : ON,
'WORK' : ON,
}
log_file = {
'ERROR' : '..\log\error.txt',
'COMMUNICATION' : '..\log\communication.txt',
'WORK' : '..\log\work.txt',
'DEBUG' : '..\log\debug.txt',
}
LOG_TIMER_CYCLE = 1
#: 全局日志管理器
try:
from log_manager import Logger
# log_manager = LogManager()
log_handler = Logger('', )
except Exception:
pass
#=============通信协议模块配置===============#
#----- 控制端——>数据层 -------#
#: 数据包头标志
A_HEAD = 'MUSHROOM'
#: 数据包结束符
A_END = a2b_hex('13')
#: 与Django通信信息包的版本号
A_VERSION = 1
#: 数据包长度占字节数
A_pkg_byte = 3
#: 数据包版本号
# A_version_byte = 2
#: 业务层消息头占字节数
A_header_byte = 3
#收数据超时
RECV_TIMEOUT = 3
#: 与Django通信消息包的头标志
D_HEAD = 'MUSHROOM'
#: 与Django通信信息包的版本号
D_VERSION = 1
#: 数据包版本号
D_version_byte = 1
#: 业务层消息头占字节数
D_lenght_byte = 4
#============数据存储模块配置===============#
MAX_BUFFER_LEN = 1000000
# 缓存的待存储数据
sensor_data_queue = deque(maxlen = MAX_BUFFER_LEN)
DATA_STORING_CYCLE = 10
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 10 11:19:29 2019
@author: Vall
"""
import iv_analysis_module as iva
import matplotlib.pyplot as plt
import iv_save_module as ivs
import iv_utilities_module as ivu
import numpy as np
#%%
# Parameters
home = r'C:\Users\Usuario\OneDrive\Labo 6 y 7'
name = 'M_20190610_02'
# Save parameters
autosave = True
overwrite = True
# Plot parameters
plot_params = dict(
plot = False,
interactive = False,
autoclose = True,
)
plot_params = ivu.InstancesDict(plot_params)
# Fit parameters
fit_params = dict(
use_full_mean = True,
use_experiments = [1], # First is 0, not 1!
send_tail_to_zero = True,
tail_method = 'mean', # Could also be 'min' or 'max' or any numpy function
use_fraction = .2,
choose_t0 = True,
choose_tf = False,
max_svalues = 100,
)
fit_params = ivu.InstancesDict(fit_params)
# Create full filename
filename = ivs.filenameToMeasureFilename(name, home)
# Load data
t, V, details = ivs.loadNicePumpProbe(filename)
# Choose data to fit
if fit_params.use_full_mean:
data = np.mean(V, axis=1)
else:
data = np.mean(V[:, fit_params.use_experiments], axis=1)
# Choose time interval to fit
t0 = 40.12385 # This is an initial time we think that optimizes it
i = np.argmin(np.abs(t-t0)) # We'll take this index as main initial time
Ni = 40 # We'll try this many index to the right and left from the main index
svalues = 10 #nuber of singular values
#%%
# Now iterate, fitting on different initial times
results = []
other_results = []
fit_terms = []
jgood = [] # Here we'll collect the index that allow fitting
for j in range(max(i-Ni,0), i+Ni):
# Choose initial time t0
t0j = t[j]
# print(t0j, j)
# Crop data accorddingly
tj, dataj = iva.cropData(t0j, t, data)
fit_params.time_range = (t0j, t[-1])
fit_params.voltage_zero = 0
# Use linear prediction, if allowed
try:
res, other, plot = iva.linearPrediction(tj,
dataj,
details['dt'],
svalues=svalues,
printing=False)
jgood.append(j)
results.append(res)
other_results.append(other)
fit_terms.append(plot.fit)
except:
results.append(None)
other_results.append(None)
fit_terms.append(None)
del t0j, tj, dataj, res, other, plot
# Now select only the fits that satisfy us
jreallygood = []
jrare = [] # The ones that hold only one oscillant term
frequencies = []
quality = []
chi = []
meanqdiff = []
stdqdiff = []
nterms = []
for j in jgood:
res = results[j]
other = other_results[j]
if res.shape[0]!=1:
imax = np.argmin(np.abs(res[:,0] - 9 * np.ones(len(res[:,0]))))
if res[imax,0] != 0:
frequencies.append(res[imax,0])
quality.append(res[imax,2])
chi.append(other['chi_squared'])
jreallygood.append(j)
term = fit_terms[j]
meanqdiff.append( np.mean( (term[:,1]-term[:,imax+2])**2 ) )
stdqdiff.append( np.std( (term[:,1]-term[:,imax+2])**2 ))
nterms.append(res.shape[0])
else:
if res[0,0] != 0:
frequencies.append(res[0,0])
quality.append(res[0,2])
chi.append(other['chi_squared'])
jreallygood.append(j)
jrare.append(j)
term = fit_terms[j]
meanqdiff.append( np.mean( (term[:,1]-term[:,3])**2 ) )
stdqdiff.append( np.std( (term[:,1]-term[:,imax+2])**2 ))
nterms.append(res.shape[0])
del res, other
#%%
fig, axs = plt.subplots(2, 3)
axs[0, 0].plot(jreallygood, frequencies, 'x')
axs[0, 0].grid()
axs[0, 1].plot(jreallygood, quality, 'o')
axs[0, 1].grid()
axs[0, 2].plot(jreallygood, nterms, 'o')
axs[0, 2].grid()
axs[1, 0].plot(jreallygood, chi, '.')
axs[1, 0].grid()
axs[1, 1].plot(jreallygood, meanqdiff, 'x')
axs[1, 1].grid()
axs[1, 2].plot(jreallygood, stdqdiff, 'x')
axs[1, 2].grid()
plt.show()
#%%
plt.figure()
plt.plot(jreallygood, frequencies, 'x')
plt.plot(i, frequencies[i], 'xr')
plt.ylabel('Frecuencia (GHz)')
plt.grid()
plt.figure()
plt.plot(jreallygood, quality, 'o')
plt.plot(i, quality[i], 'or')
plt.ylabel('Factor de calidad')
plt.grid()
plt.figure()
plt.plot(jreallygood, chi, '.')
plt.plot(i, chi[i], 'xr')
plt.ylabel('Chi cuadrado')
plt.grid()
plt.figure()
plt.plot(jreallygood, meanqdiff, 'x')
plt.plot(i, meanqdiff[i], 'xr')
plt.ylabel('Diferencia cuadrática media')
plt.grid()
plt.figure()
plt.plot(jreallygood, stdqdiff, 'x')
plt.plot(i, stdqdiff[i], 'xr')
plt.ylabel('Desviación estándar de la diferencia cuadrática')
plt.grid()
plt.figure()
plt.plot(jreallygood, nterms, 'o')
plt.plot(i, stdqdiff[i], 'xr')
plt.ylabel('Numero de terminos ajustados')
plt.grid()
|
from django.contrib import admin
from .models import Visitors, Entry_Schedule
# Register your models here.
admin.site.register(Visitors)
admin.site.register(Entry_Schedule)
|
import os
from collections import OrderedDict
import torch
import logging
from easydict import EasyDict as edict
import yaml
def print_to_screen(loss, lr, its, epoch, its_num,
logger, data_time, train_time, mem, acc=0):
logger.info(("[%d][%d/%d]\t"%(epoch, its, its_num)+
"Loss:%.5f\t"%(loss)+"Lr:%.6f\t"%(lr)
+"Data:%.4dms\t"%(data_time*1000)+"Train:%.4dms\t"%(train_time*1000))
+"Mem:%.2fGb\t"%(mem) +"Prec@1:%.4f"%(acc))
def save_checkpoints(save_path, model, opt, epoch,lrs=None):
if lrs is not None:
states = { 'model_state': model.state_dict(),
'epoch': epoch + 1,
'opt_state': opt.state_dict(),
'lrs':lrs.state_dict(),}
else:
states = { 'model_state': model.state_dict(),
'epoch': epoch + 1,
'opt_state': opt.state_dict(),
'lrs':lrs.state_dict(),}
torch.save(states, save_path)
def load_checkpoints(model, opt, save_path, logger, lrs=None):
try:
states = torch.load(save_path.EXPS+save_path.NAME+save_path.MODEL)
model.load_state_dict(states['model_state'])
opt.load_state_dict(states['opt_state'])
current_epoch = states['epoch']
if lrs is not None:
lrs.load_state_dict(states['lrs'])
logger.info('loading checkpoints success')
except:
current_epoch = 0
logger.info("no checkpoints")
return current_epoch
def model_complexity(model,cfg,logger):
from ptflops import get_model_complexity_info
flops, params = get_model_complexity_info(model, (cfg.MODEL.IN_DIM, cfg.TRAIN.CROP, cfg.TRAIN.CROP),
as_strings=True, print_per_layer_stat=True)
logger.info('{:<30} {:<8}'.format('Computational complexity: ', flops))
logger.info('{:<30} {:<8}'.format('Number of parameters: ', params))
def load_cfg(cfg):
cfg_name = cfg.PATH.EXPS+cfg.PATH.NAME+'/'+cfg.PATH.NAME+'.yaml'
if not os.path.exists(cfg.PATH.EXPS+cfg.PATH.NAME):
os.mkdir(cfg.PATH.EXPS+cfg.PATH.NAME)
# for log path, can only change by code file
logging.basicConfig(format='%(message)s',
level=logging.DEBUG,
filename=cfg.PATH.EXPS+cfg.PATH.NAME+cfg.PATH.LOG)
stream_handler = logging.StreamHandler()
logger = logging.getLogger(cfg.PATH.NAME)
logger.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
if os.path.exists(cfg_name):
logger.info('start loading config files...')
seed_add = 10
with open(cfg_name) as f:
old_cfg = edict(yaml.load(f, Loader=yaml.FullLoader))
for k, v in old_cfg.items():
if k in cfg:
if isinstance(v, dict):
for vk, vv in v.items():
if vk in cfg[k]:
cfg[k][vk] = vv
else:
logger.error("{} not exist in config.py".format(vk))
else:
cfg[k] = v
else:
logger.error("{} not exist in config.py".format(k))
logger.info('loading config files success')
cfg.DETERMINISTIC.SEED += seed_add
logger.info('change random seed success')
else:
logger.info('start creating config files...')
cfg_dict = dict(cfg)
for k, v in cfg_dict.items():
if isinstance(v, edict):
cfg_dict[k] = dict(v)
with open(cfg_name, 'w') as f:
yaml.dump(dict(cfg_dict), f, default_flow_style=False)
logger.info('update config files success')
return logger
class SelfData(object):
def __init__(self):
self.value = 0
self.counter = 0 + 1e-8
def add_value(self,add_value):
self.counter += 1
self.value += add_value.data.cpu().numpy()
def avg(self):
return self.value/self.counter
class CalculateAcc(object):
def __init__(self,topk=1):
self.count_success_a = 0 + 1e-8
self.count = 0+ 1e-8
self.topk = topk
def add_value(self,output,target):
self.count += output.shape[0]
_, preds = output.data.topk(self.topk,1,True,True)
preds = preds.t()
for pred in preds:
self.count_success_a += pred.eq(target.data.view_as(pred)).sum().numpy()
def print_(self):
return (self.count_success_a/self.count)
def load_test_checkpoints(model, save_path, logger, use_best=False):
#try:
if use_best:
print(save_path.EXPS+save_path.NAME+save_path.BESTMODEL)
states= torch.load(save_path.EXPS+save_path.NAME+save_path.BESTMODEL) if torch.cuda.is_available() \
else torch.load(save_path.EXPS+save_path.NAME+save_path.BESTMODEL, map_location=torch.device('cpu'))
else:
states= torch.load(save_path.EXPS+save_path.NAME+save_path.MODEL) if torch.cuda.is_available() \
else torch.load(save_path.EXPS+save_path.NAME+save_path.MODEL, map_location=torch.device('cpu'))
#logger.debug("success")
#try:
model.load_state_dict(states['model_state'])
# except:
# states_no_module = OrderedDict()
# for k, v in states['model_state'].items():
# name_no_module = k[7:]
# states_no_module[name_no_module] = v
# model.load_state_dict(states_no_module)
logger.info('loading checkpoints success')
# except:
# logger.error("no checkpoints")
def plot_result_data(acc_total, acc_val_total, loss_total, losss_val_total, cfg_path, epoch):
import matplotlib.pyplot as plt
y = range(epoch)
plt.plot(y,acc_total,linestyle="-", linewidth=1,label='acc_train')
plt.plot(y,acc_val_total,linestyle="-", linewidth=1,label='acc_val')
plt.legend(('acc_train', 'acc_val'), loc='upper right')
plt.xlabel("Training Epoch")
plt.ylabel("Acc on dataset")
plt.savefig('{}/acc.png'.format(cfg_path))
plt.cla()
plt.plot(y,loss_total,linestyle="-", linewidth=1,label='loss_train')
plt.plot(y,losss_val_total,linestyle="-", linewidth=1,label='loss_val')
plt.legend(('loss_train', 'loss_val'), loc='upper right')
plt.xlabel("Training Epoch")
plt.ylabel("Loss on dataset")
plt.savefig('{}/loss.png'.format(cfg_path))
|
# Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import warnings
from dataclasses import dataclass
from functools import wraps
from types import FunctionType, MethodDescriptorType, MethodType, ModuleType
from typing import Any, Container, Mapping, Optional, cast
import numpy as np
from typing_extensions import Protocol
from .runtime import runtime
from .utils import find_last_user_frames, find_last_user_stacklevel
__all__ = ("clone_module", "clone_np_ndarray")
FALLBACK_WARNING = (
"cuNumeric has not implemented {name} "
+ "and is falling back to canonical numpy. "
+ "You may notice significantly decreased performance "
+ "for this function call."
)
MOD_INTERNAL = {"__dir__", "__getattr__"}
NDARRAY_INTERNAL = {
"__array_finalize__",
"__array_function__",
"__array_interface__",
"__array_prepare__",
"__array_priority__",
"__array_struct__",
"__array_ufunc__",
"__array_wrap__",
}
def filter_namespace(
ns: Mapping[str, Any],
*,
omit_names: Optional[Container[str]] = None,
omit_types: tuple[type, ...] = (),
) -> dict[str, Any]:
omit_names = omit_names or set()
return {
attr: value
for attr, value in ns.items()
if attr not in omit_names and not isinstance(value, omit_types)
}
class AnyCallable(Protocol):
def __call__(self, *args: Any, **kwargs: Any) -> Any:
...
@dataclass(frozen=True)
class CuWrapperMetadata:
implemented: bool
single: bool = False
multi: bool = False
class CuWrapped(AnyCallable, Protocol):
_cunumeric: CuWrapperMetadata
__wrapped__: Any
__name__: str
__qualname__: str
def implemented(
func: AnyCallable, prefix: str, name: str, reporting: bool = True
) -> CuWrapped:
name = f"{prefix}.{name}"
wrapper: CuWrapped
if reporting:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
location = find_last_user_frames(
not runtime.args.report_dump_callstack
)
runtime.record_api_call(
name=name,
location=location,
implemented=True,
)
return func(*args, **kwargs)
else:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
return func(*args, **kwargs)
# This is incredibly ugly and unpleasant, but @wraps(func) doesn't handle
# ufuncs the way we need it to. The alternative would be to vendor and
# modify a custom version of @wraps
if hasattr(wrapper.__wrapped__, "_name"):
wrapper.__name__ = wrapper.__wrapped__._name
wrapper.__qualname__ = wrapper.__wrapped__._name
# TODO (bev) Scraping text to set flags seems a bit fragile. It would be
# preferable to start with flags, and use those to update docstrings.
multi = "Multiple GPUs" in (getattr(func, "__doc__", None) or "")
single = "Single GPU" in (getattr(func, "__doc__", None) or "") or multi
wrapper._cunumeric = CuWrapperMetadata(
implemented=True, single=single, multi=multi
)
return wrapper
def unimplemented(
func: AnyCallable,
prefix: str,
name: str,
reporting: bool = True,
self_fallback: Optional[str] = None,
) -> CuWrapped:
name = f"{prefix}.{name}"
# Skip over NumPy's `__array_function__` dispatch wrapper, if present.
# NumPy adds `__array_function__` dispatch logic through decorators, but
# still makes the underlying code (which converts all array-like arguments
# to `numpy.ndarray` through `__array__`) available in the
# `_implementation` field.
# We have to skip the dispatch wrapper, otherwise we will trigger an
# infinite loop. Say we're dealing with a call to `cunumeric.foo`, and are
# trying to fall back to `numpy.foo`. If we didn't skip the dispatch
# wrapper of `numpy.foo`, then NumPy would ask
# `cunumeric.ndarray.__array_function__` to handle the call to `numpy.foo`,
# then `cunumeric.ndarray.__array_function__` would call `cunumeric.foo`,
# and we would end up here again.
func = getattr(func, "_implementation", func)
wrapper: CuWrapped
if reporting:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
location = find_last_user_frames(
not runtime.args.report_dump_callstack
)
runtime.record_api_call(
name=name,
location=location,
implemented=False,
)
if self_fallback:
self_value = getattr(args[0], self_fallback)()
args = (self_value,) + args[1:]
return func(*args, **kwargs)
else:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
stacklevel = find_last_user_stacklevel()
warnings.warn(
FALLBACK_WARNING.format(name=name),
stacklevel=stacklevel,
category=RuntimeWarning,
)
if self_fallback:
self_value = getattr(args[0], self_fallback)()
args = (self_value,) + args[1:]
return func(*args, **kwargs)
wrapper._cunumeric = CuWrapperMetadata(implemented=False)
return wrapper
def clone_module(
origin_module: ModuleType, new_globals: dict[str, Any]
) -> None:
"""Copy attributes from one module to another, excluding submodules
Function types are wrapped with a decorator to report API calls. All
other values are copied as-is.
Parameters
----------
origin_module : ModuleTpe
Existing module to clone attributes from
new_globals : dict
a globals() dict for the new module to clone into
Returns
-------
None
"""
mod_name = origin_module.__name__
missing = filter_namespace(
origin_module.__dict__,
omit_names=set(new_globals).union(MOD_INTERNAL),
omit_types=(ModuleType,),
)
reporting = runtime.args.report_coverage
from ._ufunc.ufunc import ufunc as lgufunc
for attr, value in new_globals.items():
# Only need to wrap things that are in the origin module to begin with
if attr not in origin_module.__dict__:
continue
if isinstance(value, (FunctionType, lgufunc)):
wrapped = implemented(
cast(AnyCallable, value), mod_name, attr, reporting=reporting
)
new_globals[attr] = wrapped
from numpy import ufunc as npufunc
for attr, value in missing.items():
if isinstance(value, (FunctionType, npufunc)):
wrapped = unimplemented(value, mod_name, attr, reporting=reporting)
new_globals[attr] = wrapped
else:
new_globals[attr] = value
def should_wrap(obj: object) -> bool:
return isinstance(obj, (FunctionType, MethodType, MethodDescriptorType))
def clone_np_ndarray(cls: type) -> type:
"""Copy attributes from np.ndarray to cunumeric.ndarray
Method types are wrapped with a decorator to report API calls. All
other values are copied as-is.
"""
origin_class = np.ndarray
class_name = f"{origin_class.__module__}.{origin_class.__name__}"
missing = filter_namespace(
origin_class.__dict__,
# this simply omits ndarray internal methods for any class. If
# we ever need to wrap more classes we may need to generalize to
# per-class specification of internal names to skip
omit_names=set(cls.__dict__).union(NDARRAY_INTERNAL),
)
reporting = runtime.args.report_coverage
for attr, value in cls.__dict__.items():
# Only need to wrap things that are in the origin class to begin with
if not hasattr(origin_class, attr):
continue
if should_wrap(value):
wrapped = implemented(value, class_name, attr, reporting=reporting)
setattr(cls, attr, wrapped)
for attr, value in missing.items():
if should_wrap(value):
wrapped = unimplemented(
value,
class_name,
attr,
reporting=reporting,
self_fallback="__array__",
)
setattr(cls, attr, wrapped)
else:
setattr(cls, attr, value)
return cls
def is_implemented(obj: Any) -> bool:
return hasattr(obj, "_cunumeric") and obj._cunumeric.implemented
def is_single(obj: Any) -> bool:
return hasattr(obj, "_cunumeric") and obj._cunumeric.single
def is_multi(obj: Any) -> bool:
return hasattr(obj, "_cunumeric") and obj._cunumeric.multi
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time, math
import socket
import numpy as np
import rospy
from std_msgs.msg import String
#
# IMPORTANT NOTE: Don't add a coding line here! It's not necessary for
# site files
#
# IBUKI MODULE 2.0
#
#==============================================================================
# IBUKI'S CONTROLLER IP LIST
#
# The definition of ibuki Raspis, mbeds ip
# There are three Raspis, ten mbeds in use in Ibuki testtype
# there is no need to change anything here if no additional device is added
#==============================================================================
ip_dict = {'master':"192.168.99.101",
'ibuki2':"192.168.99.102",
'ibuki3':"192.168.99.103",
'handr':"192.168.99.2",
'handl':"192.168.99.3",
'armr':"192.168.99.12",
'arml':"192.168.99.13",
'neck':"192.168.99.22",
'headl':"192.168.99.31",
'headc':"192.168.99.32",
'headr':"192.168.99.33",
'hip':"192.168.99.42",
'wheel':"192.168.99.52",
'codex':"192.168.99.99",
'test':"119.63.197.151"
}
#==============================================================================
# IBUKI'S CONTROLLER PORT LIST
#
# The definition of ibuki Raspis, mbeds port
# wheel controller uses two ports
# there is no need to change anything here if no additional device is added
#==============================================================================
port_dict = {'handl':10006,
'handr':10007,
'tts':10008,
'tts2':10009,
'tts3':10010,
'headl':10011,
'headc':10012,
'headr':10013,
'arml':10014,
'armr':10015,
'neck':10016,
'hip':10017,
'wheel':10018,
'wheel2':10019,
'executor':10099,
'test':56
}
#==============================================================================
# IBUKI'S CONTROLLER DEFALULT POSITION LIST
#
# The definition of joint positions of every controller
# protocol : |0xxxx|0xxxx|0xxxx|0xxxx|0xxxx| xxxx is joint position*1000
# please be careful when you change the values
# TODO: change it to tuples or uneditable lists
#==============================================================================
default_dict = {'handl':'0437304104042240389604015',
'handr':'0604505925059250574605537',
'headl':'035000430042000410005900',
'headc':'0340006300049000485004000',
'headr':'0410005400045000600004790',
'arml':'0730005000050000500005000',
'armr':'0270004600045000630005300',
'neck':'0800003000048300500005100',
'hip':'0500004871052360500005000',
'wheel':'0500005000010000500004000',
'test':'0500005000050000500005000'
}
#==============================================================================
# IBUKI'S POSTURES LIST
#
# ibuki's posture
# TODO: enable the function of perform mapping destination
#==============================================================================
pose_dict = {'rightarm':'0600005000050000500005000'}
"socket initialization"
motorsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def ip(__devicename):
return ip_dict[__devicename]
def port(__portname):
return port_dict[__portname]
def default(__devicename):
return default_dict[__devicename]
def pose(__posename):
return pose_dict[__posename]
#==============================================================================
# PATTERN GENERATOR
#
# read .ysq file and return motion-time and timing-time (list)
#==============================================================================
def pattern_generator(_filename):
"read .ysq file"
motion_file = open('/root/ibuki_ws/src/ibuki_motion/src/symposium/'+_filename,'r')
motion_list = []
"get list of timing and motion [('timing','motion')]"
for line in motion_file.readlines():
line = line.strip()
#print(line)
motion_line = line.split('\t')
motion_list.append(motion_line)
#print(motion_line)
#print(motion_list)
"get time sequences"
temp_timing = []
inte_timing = []
#same effect with lambda function
for index in range(0,len(motion_list)):
temp_timing.append(motion_list[index][0])
for index in range(0,len(temp_timing)):
temp_timing[index]=float(temp_timing[index])
if (index == 0):
inte_timing.append(temp_timing[index])
else:
inte_timing.append(temp_timing[index]-temp_timing[index-1])
#print(inte_timing)
"get motion sequences"
motion_pattern = []
#emotion_sign = []
for index in range(0,len(motion_list)):
motion_pattern.append(motion_list[index][1])
# for index in range(0,len(motion_list)):
# emotion_sign.append(motion_list[index][2])
"what return is two lists: motion-time and timing-time."
return motion_pattern, inte_timing#, emotion_sign
#==============================================================================
# MOTION GENERATOR
#
# read .ysq file, generate mapping motion to the correct controller
#
# mainly for stable demo, not recommened when you are doing research
#
# the weak point of .ysq file is, the input is completely map command,so
# if default value is changed, your .ysq file would be wholely shift from
# the new default.
# TODO: a new kind of file is needed, or firstly develop the function of
# shifting .ysq file
#==============================================================================
def motion_generator(_filename,_devicename):
"socket initialization"
#motorsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
message_init = default(_devicename)
"Timer"
start_clk = time.time() #actual time of running start
"THRESHOLD: USUALLY WE DON'T NEED PRECISE SYNC BUT IF PLEASE KEEP EYE ON"
inte_threshold = 0.005
generated_motion_pattern = []
interval_timing = []
#empty = []
"PATTERN GENERATOR"
generated_motion_pattern, interval_timing = pattern_generator(_filename)
#print(start_clk)
for index in range(0,len(interval_timing)):
#motorsock.sendto(message_init,(ip(_devicename), port(_devicename)))
"Make correct timing for each motion, MODE:lay-back"
if (index == 0):
motorsock.sendto(message_init,(ip(_devicename), port(_devicename)))
time.sleep(inte_threshold)
time.sleep(interval_timing[index])
"Change to motor command encode"
message_motor = generated_motion_pattern[index]
print(message_motor)
motorsock.sendto(message_motor,(ip(_devicename), port(_devicename)))
end_clk = time.time()
print('[RUNNING_TIME] ',end_clk-start_clk)
#==============================================================================
# EXECUTE MOTOR (ROUND-TRIP)
#
# enable the a specific controller, and control the joints to a specific
# position, and keep it for a time, and then return to the default position
#
# CAUTION: WE DON'T CONTROL SPEED IN THIS FUNCTION!!!
#
# usually used in fast motion(mouth, finger, blink)
# So the interval_time should not be set more than 1 sec
#==============================================================================
def exec_motor(message_motor, _devicename, interval_time = 0.2):
"Timer"
start_clk = time.time() #actual time of running start
"Threshold: Because there is a delay when Ibuki due playing .wav"
#print(start_clk)
message_init = default(_devicename)
print(message_motor)
motorsock.sendto(message_motor, (ip(_devicename), port(_devicename)))
time.sleep(interval_time*0.5)
motorsock.sendto(message_init,(ip(_devicename), port(_devicename)))
time.sleep(interval_time*0.5)
end_clk = time.time()
print('runningtime:', end_clk-start_clk)
#==============================================================================
# RESET
#
# reset to default position
# nothing good to say
# TODO: build a slow reset system, mbed controller feedback, new ports needed
#==============================================================================
def reset(_devicename):
message_init = default(_devicename)
motorsock.sendto(message_init,(ip(_devicename), port(_devicename)))
print('reset to ',message_init)
#time.sleep(interval_time*0.5)
#==============================================================================
# MERGE FUNCTION OF IBUKI
#
# merge the int list [x,xx,xxx,xxxx,xxxx] into a complete string
# return sendable mbed command string
# the INPUT must be in global use
#==============================================================================
def merge_them(_global_joint_now):
_message = ''
_joint_send = []
for them in range(len(_global_joint_now)):
_joint_send.append(str(_global_joint_now[them]).zfill(5))
_message = _message.join(_joint_send)
_message.replace(" ","")
#print(_message)
return _message
#==============================================================================
# SEPERATE COMMAND STRING
#
# seperate the command string ["0xxxx0xxxx0xxxx0xxxx0xxxx"] to
# an int list [xxxx,xxxx,xxxx,xxxx,xxxx]
#==============================================================================
def seperate_command_string(_command):
list_command =[]
for index in range(0,5):
list_command.append(int(_command[index*5:index*5+5]))
return list_command
#==============================================================================
# SEPERATE ANGLES
#
# usually used in reading value of mbed sensors
# protocal: 'axxaxxxa' xx is sensor value (int)
# return list of two sensor data
# TODO: increase the usablility
#==============================================================================
def seperate_angles(_rosmessage):
if type(_rosmessage.data) != int:
_string = _rosmessage.data
_list_angle = [0,0,0]
if _string.startswith('a'):
_list_angle = _string.split('a')
#why I chose from 1? because 0 is an empty value
_list_angle[1] = int(_list_angle[1])
_list_angle[2] = int(_list_angle[2])
print ([_list_angle[1], _list_angle[2]])
return [_list_angle[1], _list_angle[2]]
#==============================================================================
# JOINT TO WHERE
#
# THIS FUNCTION USE .YSQ FILE
# use joint_now (global) to move the joints to a specific position linearlly
# joint_now is also changed, although we don't get any feedback
# depends on the precision of PID controll of that controller
#==============================================================================
def joint_to_where(_joint_now,_filename,_devicename):
"np the joint_now"
_joint_now_np = np.array(_joint_now)
"open the .ysq file"
#old_timing = 0
new_timing = 0
target_motion_pattern = []
time_needed = []
#emotion = []
start_clk = time.time() #actual time of running start
target_motion_pattern, time_needed = pattern_generator(_filename)
"change the motion command to the joint position list"
for i in range(len(target_motion_pattern)):
"init"
mrg_to_target = _joint_now_np
list_target_pos = seperate_command_string(target_motion_pattern[i])
new_timing = time_needed[i]
"where is joint now? compare it with the nst target position"
mrg_to_target = np.array(list_target_pos) - mrg_to_target
"how long is the time needed, generate the steps"
inteval_jtw = 0.05
time_inteval_jtw = new_timing# - old_timing
lines_nb = int(time_inteval_jtw/inteval_jtw)
#old_timing = new_timing
step_to_target = mrg_to_target/lines_nb
#step_to_targetint = list(np.array(step_to_target,dtype = 'int32'))
"for line = 0 to line = maxline, add the step gradully"
for lines in range(0, lines_nb):
_joint_now_np = _joint_now_np + step_to_target
_joint_now_int = list(np.array(_joint_now_np,dtype = 'int32'))
_message_ = merge_them(_joint_now_int)
"send the signal to the correct device"
motorsock.sendto(_message_, (ip(_devicename),port(_devicename)))
time.sleep(inteval_jtw)
print(_message_)
"deal with the joint now lists"
end_clk = time.time()
print('[RUNNING_TIME] ',end_clk-start_clk)
_joint_now = list(_joint_now_np)
return _joint_now
#==============================================================================
# JOINT TO THERE
#
# use joint_now (global) to move the joints to a specific position linearlly
# joint_now is also changed, although we don't get any feedback
# depends on the precision of PID controll of that controller
#==============================================================================
def joint_to_there(_joint_now,_joint_there,_time_needed,_devicename):
new_timing = 0
start_clk = time.time() #actual time of running start
"np the joint_now"
_joint_now_np = np.array(_joint_now)
target_motion_pattern, time_needed = _joint_there, _time_needed
mrg_to_target = _joint_now_np
list_target_pos = seperate_command_string(target_motion_pattern)
new_timing = time_needed
"where is joint now? compare it with the nst target position"
mrg_to_target = np.array(list_target_pos) - mrg_to_target
"how long is the time needed, generate the steps"
inteval_jtw = 0.05
time_inteval_jtw = new_timing# - old_timing
lines_nb = int(time_inteval_jtw/inteval_jtw)
#old_timing = new_timing
step_to_target = mrg_to_target/lines_nb
#step_to_targetint = list(np.array(step_to_target,dtype = 'int32'))
"for line = 0 to line = maxline, add the step gradully"
for lines in range(0, lines_nb):
_joint_now_np = _joint_now_np + step_to_target
_joint_now_int = list(np.array(_joint_now_np,dtype = 'int32'))
_message_ = merge_them(_joint_now_int)
"send the signal to the correct device"
motorsock.sendto(_message_, (ip(_devicename),port(_devicename)))
time.sleep(inteval_jtw)
print(_message_)
"deal with the joint now lists"
end_clk = time.time()
print('[RUNNING_TIME] ',end_clk-start_clk)
_joint_now = list(_joint_now_np)
return _joint_now
"positionname is a string"
def to_position(_pos_name, _dev_name):
motorsock.sendto(_pos_name, (ip(_dev_name), port(_dev_name)))
#==============================================================================
# JOINT
#
# class Joint
# joint_now is also changed, although we don't get any feedback
# depends on the precision of PID controll of that controller
#==============================================================================
class Joint(object):
def __init__(self):
self.name = 'test'
self.spinrate = 20
self.threshold = [0,0,0,0]
self.fromwhich = 2
self.joint_default = seperate_command_string(default(self.name))
self.joint_now = seperate_command_string(default(self.name))
self.key = None
self.loop_rate = rospy.Rate(self.spinrate)
#Publisher
self.pub = rospy.Publisher('info_'+self.name, String, queue_size = 25)
#Subscriber
rospy.Subscriber('keys', String, self.callback)
rospy.Subscriber('peer', String, self.move_limb) #peer from PC
#rospy.Subscriber('peer',, self.move_arml)
def callback(self, msg):
self.key = msg.data
if(msg.data == 'j'):
print("hello")
msg.data == 'm'
elif(msg.data == 'r'):
self.joint_now = joint_to_where(self.joint_now,'motion_'+self.name+'a.ysq',self.name)
elif(msg.data == 't'):
self.joint_now = joint_to_where(self.joint_now,'motion_'+self.name+'b.ysq',self.name)
elif(msg.data == 'g'):
self.pub.publish('stop please..')
elif(msg.data == 'b'):
reset(self.name)
def start(self):
rospy.loginfo(self.name)
while not rospy.is_shutdown():
#message = ibuki.merge_them(self.joint_now)
#self.pub.publish(message)
self.loop_rate.sleep()
"in neck, _which = 0 and 1"
def change_joint_now(self, sensor_value, _which = 2):
self.joint_now[_which] = int(self.joint_default[_which]+ self.threshold[0] +\
self.threshold[1]*math.sin(sensor_value*2*math.pi/360))
self.joint_now[_which+1] = int(self.joint_default[_which + 1] + self.threshold[2] +\
self.threshold[3]*math.sin(sensor_value*2*math.pi/360))
"""UNIQUE FUNCITONS"""
def move_limb(self, msg):
list_sensor = seperate_angles(msg)
# print(list_sensor)
the_value_i_want = int(list_sensor[0]) + 90
#print the_value_i_want
self.change_joint_now(the_value_i_want,self.fromwhich)
#print(self.joint_now)
_message_wait = merge_them(self.joint_now)
print _message_wait
to_position(_message_wait,self.name)
#==============================================================================
# JOINT
#
# class Joint
# joint_now is also changed, although we don't get any feedback
# depends on the precision of PID controll of that controller
#==============================================================================
class IdleJoint(object):
def __init__(self):
self.name = 'test'
self.spinrate = 20
self.whichkind = 0
self.joint_default = seperate_command_string(default(self.name))
self.joint_now = seperate_command_string(default(self.name))
self.key = None
self.loop_rate = rospy.Rate(self.spinrate)
#Publisher
self.pub = rospy.Publisher('info_'+self.name, String, queue_size = 25)
#Subscriber
rospy.Subscriber('keys', String, self.callback)
if self.name == 'headl':
rospy.Subscriber('blink', String, self.keys_blink)
#rospy.Subscriber('peer',, self.move_arml)
def callback(self, msg):
self.key = msg.data
if(msg.data == 'j'):
print("hello")
msg.data == 'm'
elif(msg.data == 'r'):
self.joint_now = joint_to_where(self.joint_now,'motion_'+self.name+'a.ysq',self.name)
elif(msg.data == 't'):
self.joint_now = joint_to_where(self.joint_now,'motion_'+self.name+'b.ysq',self.name)
elif(msg.data == 'g'):
self.pub.publish('stop please..')
elif(msg.data == 'b'):
reset(self.name)
def keys_blink(self, msg):
print msg.data
if(msg.data == 'blink'):
exec_motor('0350005800052000410005900',self.name)
msg.data == 'm'
def start(self):
rospy.loginfo(self.name)
while not rospy.is_shutdown():
#message = ibuki.merge_them(self.joint_now)
#self.pub.publish(message)
self.loop_rate.sleep()
|
def result(coords):
q1, q2, q3, q4, axis = 0, 0, 0, 0, 0
for c in coords:
if c[0] == 0 or c[1] == 0:
axis += 1
elif c[0] > 0:
if c[1] > 0:
q1 += 1
else:
q4 += 1
else:
if c[1] > 0:
q2 += 1
else:
q3 += 1
print("Q1: {0}\nQ2: {1}\nQ3: {2}\nQ4: {3}\nAXIS: {4}".format(
q1, q2, q3, q4, axis))
tc = int(input())
coords = []
for t in range(tc):
coords.append(list(map(int, input().split())))
result(coords)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.