text
stringlengths 8
6.05M
|
|---|
from loc import nearby_search
from flask import Flask
import json
import requests
# 2. Create the app object
app = Flask(__name__)
@app.route('/')
def home():
return 'Server is Online"'
@app.route('/article',methods = ['get'])
def art():
# Opening JSON file
f = open('article.json')
# returns JSON object as
# a dictionary
data = json.load(f)
return data
@app.route('/nearby',methods = ['get'])
def nearby():
res = requests.get('https://ipinfo.io/')
data = res.json()
loc=data['loc']
r = nearby_search(loc)
return r
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
|
import util_tools
from pathlib import Path
files = Path('../outputs_santi/class_outputs/').glob('*')
|
print("pastrami has been sold out\n")
sandwish_orders=['StrawBerry','pastrami','Banana','pastrami','pastrami','Apple','Pine','watermelon']
while 'pastrami' in sandwish_orders:
sandwish_orders.remove('pastrami')
print("Here are left sandwish")
for sandwish in sandwish_orders:
print(sandwish)
|
# -*- coding: utf-8 -*-
from architect.repository.client import BaseClient
from celery.utils.log import get_logger
logger = get_logger(__name__)
class EspClient(BaseClient):
def __init__(self, **kwargs):
super(EspClient, self).__init__(**kwargs)
def check_status(self):
return True
def generate_image(self, params):
return True
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 4 10:27:24 2019
@author: se14
"""
# simple ROC analysis of results (not the FROC analysis!)
import pandas as pd
import sklearn.metrics
import matplotlib
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'qt5')
matplotlib.rcParams['figure.figsize'] = (8.0, 6.0)
matplotlib.rcParams['font.size'] = 14
#%% load the results file
results_file = f'final_results/3D_CNN_FP_reduction.csv'
results_df = pd.read_csv(results_file)
labels = results_df['class']
predictions = results_df['probability']
fpr,tpr,_ = sklearn.metrics.roc_curve(labels, predictions)
auc = sklearn.metrics.roc_auc_score(labels, predictions)
plt.plot(fpr,tpr,label=f'AUC={auc}')
plt.legend()
plt.xlabel(f'FPR')
plt.ylabel(f'TPR')
plt.title(f'Multi-view 2D false positive reduction (3 views)')
|
import pygame, sys
from pygame.locals import *
pygame.init()
DISPLAYSURF = pygame.display.set_mode ((400, 300))
MYSURF = pygame.display.set_mode((500,800))
pygame.display.set_caption ('Hello World')
pygame.draw.line(DISPLAYSURF, (255,255,255), (0,0), (50,50))
pygame.draw.rect(DISPLAYSURF, (255,255,255), (0,0,25,25))
pygame.draw.rect(MYSURF, (255,255,255), (25,25,100,100))
pygame.draw.circle(MYSURF, (255,255,255), (150,150), 70)
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
|
"""
# 计数排序
"""
import os
import logging
import itertools
logger = logging.getLogger(__name__)
def counting_sort(arr: list):
"""计数排序"""
counts = [0] * (max(arr) + 1) # 0~max包含max+1个元素
for element in arr:
# 计数,统计每个元素出现的次数, counts的索引代表arr中的元素值
counts[element] += 1
counts = list(itertools.accumulate(counts)) # 统计arr中某个元素应该出现位置, 即统计arr有多少个比某个元素小
new_arr = [0] * len(arr)
for element in reversed(arr):
# 数组反转,是为了保证数组中相同的元素先出现依然排在前面
index = counts[element] - 1 # 获取当前元素在排序数组中位置,-1因为下标索引从0开始
new_arr[index] = element
counts[element] -= 1 # 取出一个元素之后那么比此元素小的数量就要-1了
arr = new_arr
return arr
if __name__ == '__main__':
logging.basicConfig(format="[%(asctime)s %(filename)s: %(lineno)s] %(message)s",
level=logging.INFO,
filemode="a",
filename=None)
logger.info("计数排序结果: {}".format(counting_sort([1, 3, 6, 2, 4, 7, 9, 5, 8])))
|
from typing import Callable, Tuple, Union, Optional, Dict
import numpy as np
from inspect import signature
class Op:
def __init__(self, name: str, description: str, op: Callable, partial_difs: Tuple[Callable]):
assert len(signature(op).parameters) == len(partial_difs)
self._name = name
self._desc = description
self._op = op
self._partials = partial_difs
def __call__(self, *args):
return self._op.__call__(*args)
def __str__(self):
return f"{self._name}: {self._desc}"
@property
def name(self):
return self._name
@property
def description(self):
return self._desc
def partial(self, i):
return self._partials[i]
class OpTable:
def __init__(self, *ops: Op):
self._ops = { op.name: op for op in ops }
def __getitem__(self, op_name: str) -> Op:
return self._ops[op_name]
def __len__(self):
return len(self._ops)
def op_descriptions(self) -> Dict[str, str]:
return {op.name: op.description for op in self._ops.values()}
add = Op(
"add",
"Scalar or vecrtor addition. If one arg is a matrix then the other arg "
"must be a matrix of the same shape",
lambda x, y: x + y,
(
lambda x, y, c: c,
lambda x, y, c: c
)
)
smul = Op(
"smul",
"Scalar multiplication. The first arg must be a scalar, the second arg "
"may be a scalar or a matrix",
lambda x, y: x * y,
(
lambda x, y, c: (c * y).sum(),
lambda x, y, c: c * x * np.ones_like(y),
)
)
mmul = Op(
"mmul",
"Matrix multiplication. Both args must be matrices and have compatible "
"shapes.",
lambda x, y: x @ y,
(
lambda x, y, c: c @ y.T,
lambda x, y, c: x.T @ c,
)
)
relu = Op(
"relu",
"For each elament x in a matrix set x = max(x, 0)",
lambda x: np.maximum(x, 0.0),
(
lambda x, c: np.where(x > 0, 1.0, 0.0),
),
)
loss = Op(
"loss",
"Calculate the RMS loss between a target and observed values",
lambda target, actual: np.sqrt(np.mean(np.square(target - actual))),
(
lambda t, a, c: c * 0.5 * (t - a) * t.size,
lambda t, a, c: c * 0.5 * (a - t) * a.size,
)
)
default_op_table = OpTable(add, smul, mmul, relu, loss)
|
# coding=utf-8
from myspider.items import QiushiItem
from scrapy.http import Request
from scrapy.spiders import CrawlSpider
class qiushi(CrawlSpider):
name = 'qiushi'
allowed_domains = ['www.qiushibaike.com']
start_urls = ['https://www.qiushibaike.com/8hr/']
# 糗事百科
def parse(self, response):
content_left_div = response.xpath('//div[@id="content-left"]')
content_div_list = content_left_div.xpath('./div[contains(@id, "qiushi_tag_")]')
for duan in content_div_list:
item = QiushiItem()
user_page = duan.xpath('div[contains(@class, "author")]/a/@href').extract()
if user_page:
item['user_page'] = response.urljoin(user_page[0])
item['user_gravator'] = duan.xpath('div[contains(@class, "author")]/a/img/@src').extract_first()
item['user_nickname'] = duan.xpath('div[contains(@class, "author")]/a/img/@alt').extract_first()
item['user_age'] = duan.xpath('div[contains(@class, "author")]/div/text()').extract_first()
gender = duan.xpath('div[contains(@class, "author")]/div/@class').extract_first()
if gender:
item['user_gender'] = gender.split(' ')[1]
url = duan.xpath('a[@class="contentHerf"]/@href').extract_first()
item['url'] = response.urljoin(url)
item['duan_content'] = duan.xpath('a[@class="contentHerf"]/div[@class="content"]').extract_first()
tu = duan.xpath('div[@class="thumb"]').extract_first()
if tu:
item['duan_content'] += tu
item['duan_pos'] = duan.xpath('div[@class="stats"]/span[@class="stats-vote"]/i[@class="number"]/text()').extract_first()
yield item
next_page = content_left_div.xpath('./ul[@class="pagination"]/li[last()]/a/@href').extract()
if next_page:
next_url = response.urljoin(next_page[0])
yield Request(url=next_url, callback=self.parse)
|
from django.db import models
from django.contrib.auth.models import User
from stocks.models import Stock
from cryptocurrencies.models import Cryptocurrency
class StockInvestment(models.Model):
investor = models.ForeignKey(User, on_delete=models.CASCADE)
asset = models.ForeignKey(Stock, on_delete=models.CASCADE)
class CryptoInvestment(models.Model):
investor = models.ForeignKey(User, on_delete=models.CASCADE)
asset = models.ForeignKey(Cryptocurrency, on_delete=models.CASCADE)
|
# coding=utf-8
#####################################
# Imports
#####################################
# Python native imports
from PyQt5 import QtCore, QtWidgets, QtGui
import logging
from time import time
import paramiko
#####################################
# Global Variables
#####################################
THREAD_HERTZ = 5
IP = "192.168.1.10"
USER = "nvidia"
PASS = "nvidia"
#####################################
# BashConsole Class Definition
#####################################
class BashConsole(QtCore.QThread):
text_update_ready__signal = QtCore.pyqtSignal(str)
def __init__(self, shared_objects):
super(BashConsole, self).__init__()
# ########## Reference to class init variables ##########
self.shared_objects = shared_objects
self.left_screen = self.shared_objects["screens"]["left_screen"]
self.console_text_edit = self.left_screen.console_line_edit # type: QtWidgets.QTextEdit
self.ssh_console_command_line_edit = self.left_screen.ssh_console_command_line_edit # type:QtWidgets.QLineEdit
self.ssh_scan_for_hosts_button = self.left_screen.ssh_scan_for_hosts_button # type: QtWidgets.QPushButton
self.ssh_host_line_edit = self.left_screen.ssh_host_line_edit # type: QtWidgets.QLineEdit
self.ssh_list_wifi_button = self.left_screen.ssh_list_wifi_button # type: QtWidgets.QPushButton
self.ssh_equipment_login_button = self.left_screen.ssh_equipment_login_button # type: QtWidgets.QPushButton
self.ssh_equipment_logout_button = self.left_screen.ssh_equipment_logout_button # type: QtWidgets.QPushButton
self.ssh_equipment_status_button = self.left_screen.ssh_equipment_status_button # type: QtWidgets.QPushButton
self.ssh_equipment_start_button = self.left_screen.ssh_equipment_start_button # type: QtWidgets.QPushButton
self.ssh_equipment_stop_button = self.left_screen.ssh_equipment_stop_button # type: QtWidgets.QPushButton
self.ssh_ssid_line_edit = self.left_screen.ssh_ssid_line_edit # type:QtWidgets.QLineEdit
self.ssh_connect_ssid_push_button = self.left_screen.ssh_ssid_push_button # type: QtWidgets.QPushButton
self.ssh_disconnect_wifi_button = self.left_screen.ssh_disconnect_wifi_button # type: QtWidgets.QPushButton
# ########## Get the settings instance ##########
self.settings = QtCore.QSettings()
# ########## Get the Pick And Plate instance of the logger ##########
self.logger = logging.getLogger("groundstation")
# ########## Thread Flags ##########
self.run_thread_flag = True
# ########## Class Variables ##########
self.bash_process = None
self.new_widget = None
self.window = None
self.wait_time = 1.0 / THREAD_HERTZ
self.ssh_client = None
self.set_text_contents = ""
self.new_command_text = ""
self.new_command = False
def run(self):
while not self.ssh_client:
try:
self.ssh_client = paramiko.SSHClient()
self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh_client.connect(IP, username=USER, password=PASS, compress=True)
except:
print "No connection"
if not self.run_thread_flag:
return
self.ssh_client = None
self.msleep(1000)
while self.run_thread_flag:
start_time = time()
if self.new_command:
_, ssh_stdout, ssh_stderr = self.ssh_client.exec_command(self.new_command_text)
stdout_read = ssh_stdout.read()
stderr_read = ssh_stderr.read()
output = ""
output += "\n%s@%s:$" % (USER, IP)
output += self.new_command_text + "\n"
output += stdout_read.decode("utf-8") if stdout_read else ""
output += stderr_read.decode("utf-8") if stderr_read else ""
self.set_text_contents += output
self.text_update_ready__signal.emit(self.set_text_contents)
self.new_command = False
time_diff = time() - start_time
self.msleep(max(int(self.wait_time - time_diff), 0))
del self.bash_process
def on_text_readout_updated__slot(self):
self.console_text_edit.moveCursor(QtGui.QTextCursor.End)
def on_text_editing_finished__slot(self):
self.new_command_text = self.ssh_console_command_line_edit.text()
self.new_command = True
def on_list_wifi_button_pressed__slot(self):
self.new_command_text = "nmcli dev wifi list"
self.new_command = True
def on_login_button_pressed__slot(self):
current_ip = self.ssh_host_line_edit.text()
self.new_command_text = "python equipment_servicing_interface.py '%s' 'LOGIN MTECH GITRDONE' HELP" % current_ip
print self.new_command_text
self.new_command = True
def on_logout_button_pressed__slot(self):
current_ip = self.ssh_host_line_edit.text()
self.new_command_text = "python equipment_servicing_interface.py '%s' LOGOUT" % current_ip
self.new_command = True
def on_status_button_pressed__slot(self):
current_ip = self.ssh_host_line_edit.text()
self.new_command_text = "python equipment_servicing_interface.py '%s' STATUS" % current_ip
self.new_command = True
def on_start_button_pressed__slot(self):
current_ip = self.ssh_host_line_edit.text()
self.new_command_text = "python equipment_servicing_interface.py '%s' START" % current_ip
self.new_command = True
def on_stop_button_pressed__slot(self):
current_ip = self.ssh_host_line_edit.text()
self.new_command_text = "python equipment_servicing_interface.py '%s' STOP" % current_ip
self.new_command = True
def on_ssh_scan_for_hosts_pressed__slot(self):
current_ip = self.ssh_host_line_edit.text()
find_dot = current_ip.rfind(".")
if find_dot > 0:
current_ip = current_ip[:find_dot + 1] + "0"
self.new_command_text = "nmap -sP %s/24 -oG - | awk '/Up$/{print $2}'" % current_ip
self.new_command = True
else:
self.set_text_contents += "IP address for range search not valid. Try again."
self.text_update_ready__signal.emit(self.set_text_contents)
def on_connect_ssid_button_pressed__slot(self):
ssid_text = self.ssh_ssid_line_edit.text()
self.new_command_text = "sudo nmcli dev wifi connect %s" % ssid_text
self.new_command = True
def on_disconnect_ssid_button_pressed__slot(self):
ssid_text = self.ssh_ssid_line_edit.text()
self.new_command_text = "sudo nmcli con down id %s ; sudo nmcli connection delete %s" % (ssid_text, ssid_text)
self.new_command = True
def connect_signals_and_slots(self):
self.text_update_ready__signal.connect(self.console_text_edit.setText)
self.ssh_console_command_line_edit.editingFinished.connect(self.on_text_editing_finished__slot)
self.console_text_edit.textChanged.connect(self.on_text_readout_updated__slot)
self.ssh_scan_for_hosts_button.clicked.connect(self.on_ssh_scan_for_hosts_pressed__slot)
self.ssh_equipment_login_button.clicked.connect(self.on_login_button_pressed__slot)
self.ssh_equipment_logout_button.clicked.connect(self.on_logout_button_pressed__slot)
self.ssh_equipment_status_button.clicked.connect(self.on_status_button_pressed__slot)
self.ssh_equipment_start_button.clicked.connect(self.on_start_button_pressed__slot)
self.ssh_equipment_stop_button.clicked.connect(self.on_stop_button_pressed__slot)
self.ssh_list_wifi_button.clicked.connect(self.on_list_wifi_button_pressed__slot)
self.ssh_connect_ssid_push_button.clicked.connect(self.on_connect_ssid_button_pressed__slot)
self.ssh_disconnect_wifi_button.clicked.connect(self.on_disconnect_ssid_button_pressed__slot)
def setup_signals(self, start_signal, signals_and_slots_signal, kill_signal):
start_signal.connect(self.start)
signals_and_slots_signal.connect(self.connect_signals_and_slots)
kill_signal.connect(self.on_kill_threads_requested__slot)
def on_kill_threads_requested__slot(self):
self.run_thread_flag = False
|
import scapy.all as scapy
def sniff(interface):
scapy.sniff(iface = interface, store = False, prn = process_sniffed_packet, filter = 'port 22')
def process_sniffed_packet(packet):
print(packet.summary())
sniff("eth0")
|
import argparse
import numpy as np
import tensorflow as tf
import tensorflow_compression as tfc
import os
from scipy import misc
import CNN_recurrent
import motion
import functions
import helper
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--path", default='BasketballPass')
parser.add_argument("--frame", type=int, default=100)
parser.add_argument("--f_P", type=int, default=6)
parser.add_argument("--b_P", type=int, default=6)
parser.add_argument("--mode", default='PSNR', choices=['PSNR', 'MS-SSIM'])
parser.add_argument("--python_path", default='path_to_python')
parser.add_argument("--CA_model_path", default='path_to_CA_EntropyModel_Test')
parser.add_argument("--l", type=int, default=1024, choices=[8, 16, 32, 64, 256, 512, 1024, 2048])
parser.add_argument("--N", type=int, default=128, choices=[128])
parser.add_argument("--M", type=int, default=128, choices=[128])
args = parser.parse_args()
# settings
_, GOP_size, GOP_num, \
path, path_com, path_bin, path_lat = helper.configure_decoder(args)
# decode I frames
for g in range(GOP_num + 1):
I_index = g * GOP_size + 1
if I_index <= args.frame:
helper.decode_I(args, I_index, path_com, path_bin)
F1 = misc.imread(path_com + 'f001.png')
Height = np.size(F1, 0)
Width = np.size(F1, 1)
# placeholder
string_mv_tensor = tf.placeholder(tf.string, [])
string_res_tensor = tf.placeholder(tf.string, [])
# decode motion latent
entropy_bottleneck = tfc.EntropyBottleneck(dtype=tf.float32, name='entropy_bottleneck')
motion_latent_hat = entropy_bottleneck.decompress(
tf.expand_dims(string_mv_tensor, 0), [Height//16, Width//16, args.M], channels=args.M)
# decode residual latent
entropy_bottleneck_2 = tfc.EntropyBottleneck(dtype=tf.float32, name='entropy_bottleneck_1_1')
residual_latent_hat = entropy_bottleneck_2.decompress(
tf.expand_dims(string_res_tensor, 0), [Height//16, Width//16, args.M], channels=args.M)
# load model
saver = tf.train.Saver(max_to_keep=None)
model_path = './model/RAE_' + args.mode + '_' + str(args.l)
saver.restore(sess, save_path=model_path + '/model.ckpt')
for g in range(GOP_num + 1):
I_index = g * GOP_size + 1
if I_index <= args.frame:
# if there exists forward P frame(s), I_index + 1 is decoded by the bottleneck
if args.f_P > 0 and I_index + 1 <= args.frame:
with open(path_bin + 'f' + str(I_index + 1).zfill(3) + '.bin', "rb") as ff:
mv_len = np.frombuffer(ff.read(2), dtype=np.uint16)
string_mv = ff.read(np.int(mv_len))
string_res = ff.read()
latent_mv, latent_res = sess.run([motion_latent_hat, residual_latent_hat], feed_dict={
string_mv_tensor: string_mv,
string_res_tensor: string_res})
np.save(path_lat + '/f' + str(I_index + 1).zfill(3) + '_mv.npy', latent_mv)
np.save(path_lat + '/f' + str(I_index + 1).zfill(3) + '_res.npy', latent_res)
print('Decoded latents frame', I_index + 1)
# if there exists backward P frame(s), I_index - 1 is decoded by the bottleneck
if args.b_P > 0 and I_index - 1 >= 1:
with open(path_bin + 'f' + str(I_index - 1).zfill(3) + '.bin', "rb") as ff:
mv_len = np.frombuffer(ff.read(2), dtype=np.uint16)
string_mv = ff.read(np.int(mv_len))
string_res = ff.read()
latent_mv, latent_res = sess.run([motion_latent_hat, residual_latent_hat], feed_dict={
string_mv_tensor: string_mv,
string_res_tensor: string_res})
np.save(path_lat + '/f' + str(I_index - 1).zfill(3) + '_mv.npy', latent_mv)
np.save(path_lat + '/f' + str(I_index - 1).zfill(3) + '_res.npy', latent_res)
print('Decoded latents frame', I_index - 1)
|
from celery import shared_task
@shared_task
def debug_task(msg: str) -> None:
print(msg)
|
import random
from PiratesTreasure.ServerPackage import Cell
from PiratesTreasure.ServerPackage import Player
class World:
def __init__(self,players = []):
self.M_rows = 10
self.N_columns = (10 + len(players))
#self.listOfPlayersOnBoard = self.makeListOfPlayersOnTheBoard(players)
self.CreateWorld(self.M_rows, self.N_columns)
def MovePlayer(self,Player_id,NewI,NewJ,players = []):
for c in players:
if (players[c].PlayerID == Player_id):
players[c].i = NewI
players[c].j = NewJ
"""""
def makeListOfPlayersOnTheBoard(self,players):
listPlayers = []
for c in players:
listPlayers[c].append(PlayersOnTheBoard(c)) # i add a list of players on the board to know their location
return listPlayers
"""
def CreateWorld(self,rows,column):
cell_board = []
for y in range(rows):
cell_board.append([])
self.FillBoard(cell_board, rows, column)
def FillBoard(self,cell_board,rows,columns):
#islandORsea = random.randint(0,1) #Determines whether its sea or island
for y in range(rows):
for x in range(columns):
cell_board[y][x] = Cell(0) #filled the board in sea #self.CellState(islandORsea) #Determines the numbers of Gold coins in each cell
while ((rows * columns) / 3):
cell_board[random.randint(rows,columns)][random.randint(rows,columns)] = Cell(1) #One-third of the Board is island
|
from django.contrib import admin
from . models import Produto, Pedido, InformacaoPedido
# Register your models here.
admin.site.register(Produto)
admin.site.register(Pedido)
admin.site.register(InformacaoPedido)
|
def myFunc():
print("Inside mainFn.py and myFunc")
print("Inside mainFn.py")
if __name__=="__main__":
print("Inside mainFn.py, main is called")
else:
print("Inside mainFn.py, main is not called")
|
from django.apps import AppConfig
class MantenimientotablaConfig(AppConfig):
name = 'mantenimientoTabla'
|
dicto = dict()
|
import os
import time
from ..utils import PublicKey
import toml
server_key_filename = "~/_mypaas/authorized_keys"
config_filename = "~/_mypaas/config.toml"
last_key_read = 0
_authorized_keys = {}
def get_public_key(fingerprint):
"""Get the public key for the given fingerprint"""
# Read the keys from the filesystem at most once each few seconds,
# to prevent attacks on the auth service.
global last_key_read
if last_key_read < time.time() - 5:
last_key_read = time.time()
_authorized_keys.clear()
_authorized_keys.update(get_authorized_keys(server_key_filename))
return _authorized_keys.get(fingerprint, None)
def get_authorized_keys(filename):
"""Read the authorized public keys from the file system.
Returns a dict of PublicKey objects.
"""
if filename.startswith("~"):
filename = os.path.expanduser(filename)
if not os.path.isfile(filename):
return {}
with open(filename, "rb") as f:
text = f.read().decode(errors="ignore")
keys = {}
for line in text.split("\n"):
line = line.strip()
if not line or line.startswith("#"):
continue
try:
key = PublicKey.from_str(line)
except Exception:
print(f"Does not look like a public key: {line}")
else:
keys[key.get_id()] = key
return keys
def load_config():
"""Load config from disk."""
filename = os.path.expanduser(config_filename)
try:
with open(filename, "rb") as f:
return toml.loads(f.read().decode())
except Exception:
return {"init": {}, "env": {}}
def save_config(config):
"""Save config to disk."""
filename = os.path.expanduser(config_filename)
with open(filename, "wb") as f:
f.write(toml.dumps(config).encode())
|
import timeit
start = timeit.default_timer()
A=[9,12,33,47,53,67,78,92]
B=[48,81]
C=[13,41,62]
D=[1,3,45,79]
E=[14,16,24,44,46,55,57,64,74,82,87,98]
F=[10,31]
G=[6,25]
H=[23,39,50,56,65,68]
I=[32,70,73,83,88,93]
J=[15]
K=[4]
L=[26,37,51,84]
M=[22,27]
N=[18,58,59,66,71,91]
O=[0,5,7,54,72,90,99]
P=[38,95]
Q=[94]
R=[29,35,40,42,77,80]
S=[11,19,36,76,86,96]
T=[17,20,30,43,49,69,75,85,97]
U=[8,61,63]
V=[34]
W=[60,89]
X=[28]
Y=[21,52]
Z=[2]
arrsp=['#']
count2=0
count1=0
count3=0
count4=0
count5=0
count6=0
count7=0
count8=0
count9=0
count10=0
count11=0
count12=0
count13=0
count14=0
count15=0
count16=0
count17=0
count18=0
count19=0
count20=0
count21=0
count22=0
count23=0
count24=0
count25=0
count26=0
count27=0
n=input("Enter a text:")
n=n.upper()
l=list(n)
l1=[]
#encryption
for i in range(len(l)):
if l[i]== 'A':
temp1=A[count1]
l1.append(temp1)
count1=count1+1
if count1>=8:
count1=0
elif l[i]== "B":
temp1=B[count2]
l1.append(temp1)
count2=count2+1
if count2>=2:
count2=0
elif l[i]== "C":
temp1=C[count3]
l1.append(temp1)
count3=count3+1
if count3>=3:
count3=0
elif l[i]== "D":
temp1=D[count4]
l1.append(temp1)
count4=count4+1
if count4>=4:
count4=0
elif l[i]== "E":
temp1=E[count5]
l1.append(temp1)
count5=count5+1
if count5>=12:
count5=0
elif l[i]== "F":
temp1=F[count6]
l1.append(temp1)
count6=count6+1
if count6>=2:
count6=0
elif l[i]== "G":
temp1=G[count7]
l1.append(temp1)
count7=count7+1
if count7>=2:
count7=0
elif l[i]== "H":
temp1=H[count8]
l1.append(temp1)
count8=count8+1
if count8>=6:
count8=0
elif l[i]== "I":
temp1=I[count9]
l1.append(temp1)
count9=count9+1
if count9>=6:
count9=0
elif l[i]== "J":
temp1=J[count10]
l1.append(temp1)
count10=count10+1
if count10>=1:
count10=0
elif l[i]== "K":
temp1=K[count11]
l1.append(temp1)
count11=count11+1
if count11>=1:
count11=0
elif l[i]== "L":
temp1=L[count12]
l1.append(temp1)
count12=count12+1
if count12>=4:
count12=0
elif l[i]== "M":
temp1=M[count13]
l1.append(temp1)
count13=count13+1
if count13>=2:
count13=0
elif l[i]== "N":
temp1=N[count14]
l1.append(temp1)
count14=count14+1
if count14>=6:
count14=0
elif l[i]== "O":
temp1=O[count15]
l1.append(temp1)
count15=count15+1
if count15>=7:
count15=0
elif l[i]== "P":
temp1=P[count16]
l1.append(temp1)
count16=count16+1
if count16>=2:
count16=0
elif l[i]== "Q":
temp1=Q[count17]
l1.append(temp1)
count17=count17+1
if count17>=1:
count17=0
elif l[i]== "R":
temp1=R[count18]
l1.append(temp1)
count18=count18+1
if count18>=6:
count18=0
elif l[i]== "S":
temp1=S[count19]
l1.append(temp1)
count19=count19+1
if count19>=6:
count19=0
elif l[i]== "T":
temp1=T[count20]
l1.append(temp1)
count20=count20+1
if count20>=9:
count20=0
elif l[i]== "U":
temp1=U[count21]
l1.append(temp1)
count21=count21+1
if count21>=3:
count21=0
elif l[i]== "V":
temp1=V[count22]
l1.append(temp1)
count22=count22+1
if count22>=1:
count22=0
elif l[i]== "W":
temp1=W[count23]
l1.append(temp1)
count23=count23+1
if count23>=2:
count23=0
elif l[i]== "X":
temp1=X[count24]
l1.append(temp1)
count24=count24+1
if count24>=1:
count24=0
elif l[i]== "Y":
temp1=Y[count25]
l1.append(temp1)
count25=count25+1
if count25>=2:
count25=0
elif l[i]== "Z":
temp1=Z[count26]
l1.append(temp1)
count26=count26+1
if count26>=1:
count26=0
elif l[i]== " ":
temp1=arrsp[count27]
l1.append(temp1)
count27=count27+1
if count27>=1:
count27=0
print("ENCRYPTION:")
print(l1)
j=[]
for i in range(len(l1)):
if (l1[i] in A):
j.append('a')
elif (l1[i] in B):
j.append('b')
elif (l1[i] in C):
j.append('c')
elif (l1[i] in D):
j.append('d')
elif (l1[i] in E):
j.append('e')
elif (l1[i] in F):
j.append('f')
elif (l1[i] in G):
j.append('g')
elif (l1[i] in H):
j.append('h')
elif (l1[i] in I):
j.append('i')
elif (l1[i] in J):
j.append('j')
elif (l1[i] in K):
j.append('k')
elif (l1[i] in L):
j.append('l')
elif (l1[i] in M):
j.append('m')
elif (l1[i] in N):
j.append('n')
elif (l1[i] in O):
j.append('o')
elif (l1[i] in P):
j.append('p')
elif (l1[i] in Q):
j.append('q')
elif (l1[i] in R):
j.append('r')
elif (l1[i] in S):
j.append('s')
elif (l1[i] in T):
j.append('t')
elif (l1[i] in U):
j.append('u')
elif (l1[i] in V):
j.append('v')
elif (l1[i] in W):
j.append('w')
elif (l1[i] in X):
j.append('x')
elif (l1[i] in Y):
j.append('y')
elif (l1[i] in Z):
j.append('z')
elif (l1[i] in arrsp):
j.append(' ')
print("DECRYPTION:")
print(j)
stop = timeit.default_timer()
print (stop - start )
|
from django.shortcuts import render, get_object_or_404
from .serializers import SnsSerializer, TodoSerializer, SnsSerializer, SnsCreateSerializer, CommentSerializer
from rest_framework.decorators import api_view, permission_classes, authentication_classes
from django.http import JsonResponse, HttpResponse
from rest_framework.permissions import IsAuthenticated
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from .models import SnsModel, CommentModel
from django.contrib.auth import get_user_model
from accounts.models import User
from accounts.serializers import ImageSerializer
# Create your views here.
@api_view(['POST'])
@permission_classes([IsAuthenticated])
@authentication_classes([JSONWebTokenAuthentication]) # JWT 방식으로 인증 및 허가 하겠다.
def sns_create(request):
serializer = SnsSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return HttpResponse(status=400)
@api_view(['POST'])
@permission_classes([IsAuthenticated,])
@authentication_classes([JSONWebTokenAuthentication]) # JWT 방식으로 인증 및 허가 하겠다.
def todo_create(request):
serializer = TodoSerializer(data=request.POST)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return HttpResponse(status=400)
@api_view(['POST'])
def snscreate(request):
aaa = request.FILES['file']
title = request.POST['title']
user = request.POST['user']
username = request.POST['user']
user = get_object_or_404(get_user_model(), username=user)
nickname= user.nickname
img = SnsModel.objects.create(image=aaa, title=title, user=user, username=username, nickname=nickname)
user.sns_count += 1
user.save()
return JsonResponse({"asd":"asd"})
def snslist(request):
sns = SnsModel.objects.all().order_by('-id')
if request.method == 'GET':
serializer = SnsSerializer(sns, many=True)
return JsonResponse(serializer.data, safe=False)
@api_view(['POST'])
def commentcreate(request,id):
sns = get_object_or_404(SnsModel, id=id)
content = request.POST['content']
user = request.POST['user']
username = request.POST['user']
user = get_object_or_404(get_user_model(), username=user)
img = CommentModel.objects.create(content=content, sns=sns, create_user=user, username=username)
user.comment_count += 1
user.save()
serializer = CommentSerializer(instance=img)
return JsonResponse(serializer.data)
@api_view(['GET','DELETE'])
def comment(request,id):
if request.method == 'GET':
sns = get_object_or_404(SnsModel, id=id)
comments = sns.commentmodel_set.all()
serializer = CommentSerializer(comments, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == 'DELETE':
comment = get_object_or_404(CommentModel, id=id)
user = get_object_or_404(get_user_model(), username=comment.username)
user.comment_count -= 1
user.save()
serializer = CommentSerializer(instance=comment)
comment.delete()
return JsonResponse(serializer.data)
@api_view(['DELETE'])
def snsdelete(request, id):
sns = get_object_or_404(SnsModel, id=id)
user = get_object_or_404(get_user_model(), nickname=sns.nickname)
user.sns_count += -1
user.save()
serializer = SnsSerializer(instance=sns)
sns.delete()
return JsonResponse(serializer.data)
def getranker(request):
snsrankers = User.objects.all().order_by('-sns_count')[:5]
sns_rankers = ['',0,'','',0,'','',0,'','',0,'','',0,'',]
idx = 0
for ranker in snsrankers:
sns_rankers[idx] = ranker.nickname
sns_rankers[idx+1] = ranker.sns_count
origin = ranker.username
origin = origin[0:2] + '****'
sns_rankers[idx+2] = origin
idx += 3
commentrankers = User.objects.all().order_by('-comment_count')[:5]
comment_rankers = ['',0,'','',0,'','',0,'','',0,'','',0,'',]
idx = 0
for ranker in commentrankers:
comment_rankers[idx] = ranker.nickname
comment_rankers[idx+1] = ranker.comment_count
origin = ranker.username
origin = origin[0:2] + '****'
comment_rankers[idx+2] = origin
idx += 3
rankerdata = {
'sns1_name':sns_rankers[0],'sns1_count':sns_rankers[1],'sns1_id':sns_rankers[2],
'sns2_name':sns_rankers[3],'sns2_count':sns_rankers[4],'sns2_id':sns_rankers[5],
'sns3_name':sns_rankers[6],'sns3_count':sns_rankers[7],'sns3_id':sns_rankers[8],
'sns4_name':sns_rankers[9],'sns4_count':sns_rankers[10],'sns4_id':sns_rankers[11],
'sns5_name':sns_rankers[12],'sns5_count':sns_rankers[13],'sns5_id':sns_rankers[14],
'comment1_name':comment_rankers[0],'comment1_count':comment_rankers[1],'comment1_id':comment_rankers[2],
'comment2_name':comment_rankers[3],'comment2_count':comment_rankers[4],'comment2_id':comment_rankers[5],
'comment3_name':comment_rankers[6],'comment3_count':comment_rankers[7],'comment3_id':comment_rankers[8],
'comment4_name':comment_rankers[9],'comment4_count':comment_rankers[10],'comment4_id':comment_rankers[11],
'comment5_name':comment_rankers[12],'comment5_count':comment_rankers[13],'comment5_id':comment_rankers[14],
}
return JsonResponse(rankerdata)
def getuser(request,id):
sns = get_object_or_404(SnsModel, id=id)
user = get_object_or_404(get_user_model(), nickname=sns.nickname)
serializer = ImageSerializer(user)
return JsonResponse(serializer.data)
|
#coding:utf-8
from PyQt5.QtCore import *
import FLUS_Utils
import numpy as np
import time, os
import xml.etree.cElementTree as ET
from sklearn.externals import joblib
from gdalconst import *
class NNTrainingThread(QThread):
"""
采样 + NN 训练 + 预测的外部线程,与界面主线程不同,可以引入防止界面假死
"""
# 结束信号
finished = pyqtSignal()
# 发送信号,传给主界面,用于显示在状态栏
sendMsg = pyqtSignal(str)
# 发送信号,传给主界面,用于弹出对话框
sendMsg2Box = pyqtSignal(str, str)
def __init__(self, parent):
QThread.__init__(self, parent)
def setParam(self, wd, xmlfn):
"""
为线程设置参数
:param wd: 工作路径,用于查找xml文件,写入发展概率文件
:param xmlfn: xml文件名
:return:
"""
self.sWorkDirectory = wd
self.xmlfn = xmlfn
def parseXML(self):
"""
解析xml
:return: None if fail, 1 if success
"""
xmlfn = self.sWorkDirectory + "/" + self.xmlfn
if (not os.path.exists(xmlfn)):
# print("ann_config.xml does not exist")
self.sendMsg2Box.emit("Please set ANN-based Probability\nEstimation Parameters First!", "error")
return None
else:
tree = ET.parse(xmlfn)
root = tree.getroot()
# = = = = 解析 xml 文件 = = = = = = = = = = =
self.sLUFileName = root.find("FilePaths").find("LandUseFilePath").text # 土地利用数据文件名
self.sPgFileName = root.find("FilePaths").find("ProbabilityFilePath").text # 存储的发展概率文件名
self.vsFeatFileName = []
for feat in root.find("FilePaths").find("FeatureFilePaths").findall("FeatureFilePath"):
self.vsFeatFileName.append(feat.text) # 空间变量文件名,由于存在多张图像,因此存入列表
# 从xml文件读取采样方法和采样比例
self.sSampleMethod = root.find("Sampling").find("Method").text
self.dSampleRate = float(root.find("Sampling").find("Ratio").text)
# 从xml文件读取神经网络训练参数
self.nHhiddenlayersize = int(root.find("NeuralNetwork").find("HiddenLayer").text)
self.nMaxIter = int(root.find("NeuralNetwork").find("MaximumIteration").text)
self.dAlpha = float(root.find("NeuralNetwork").find("Alpha").text)
self.dLearningRate = float(root.find("NeuralNetwork").find("LearningRate").text)
return 1
def sampling(self):
"""
采样
:return:
"""
self.poLU, msg = FLUS_Utils.loadImage(self.sLUFileName)
self.dLUNodata = self.poLU.mdInvalidValue
# 读取空间变量数据集,并且提出 nodata 值
self.pppFeats = np.zeros((len(self.vsFeatFileName), self.poLU.mnRows, self.poLU.mnCols))
self.vdFeatNodata = []
for i in range(len(self.vsFeatFileName)):
flusimg, msg = FLUS_Utils.loadImage(self.vsFeatFileName[i])
self.vdFeatNodata.append(flusimg.mdInvalidValue)
self.pppFeats[i] = flusimg.mpArray[0]
# 采集样本
self.sendMsg.emit("Sampling... [ %s / %.3f ]" % (self.sSampleMethod, self.dSampleRate))
if (self.sSampleMethod == "Uniform"):
self.ppSamples = FLUS_Utils.uniformSample2table(self.pppFeats, self.poLU.mpArray, self.dSampleRate, self.dLUNodata , self.vdFeatNodata)
else:
self.ppSamples = FLUS_Utils.randomSample2table(self.pppFeats, self.poLU.mpArray, self.dSampleRate, self.dLUNodata, self.vdFeatNodata)
self.sendMsg.emit("Sample Success! Valid Sample Number=%d" % (self.ppSamples.shape[0]))
# 将样本存入文件,文件名形如training_sample_20171220121212.csv
self.sendMsg.emit("Save Sample File...")
timestr = time.strftime('%Y%m%d%H%M%S')
if(not os.path.exists(self.sWorkDirectory+"/training_samples")):
os.mkdir(self.sWorkDirectory+"/training_samples")
np.savetxt(self.sWorkDirectory + "/training_samples/training_sample_" + timestr + ".csv", self.ppSamples, delimiter=',')
self.sendMsg.emit("Save Sample File Success!")
def trainNN(self):
"""
训练神经网络模型
:return:
"""
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier
# 提取X, Y,用于训练
X = self.ppSamples[:, :-1]
Y = self.ppSamples[:, -1]
del self.ppSamples
# 构造神经网络,训练
self.clf = MLPClassifier(hidden_layer_sizes=(self.nHhiddenlayersize,), max_iter=self.nMaxIter, alpha=self.dAlpha,
solver='sgd', tol=1e-9,
learning_rate_init=self.dLearningRate)
# self.clf = RandomForestClassifier(n_estimators= 10)
self.sendMsg.emit("Training...")
self.clf.fit(X, Y)
score = self.clf.score(X, Y)
del X, Y
joblib.dump(self.clf, self.sWorkDirectory + "/ANNModel.pkl")
#self.sendMsg.emit("Training Success! Score = %.2f%%"%(score*100))
self.sendMsg2Box.emit("Training Success! Score = %.2f%%"%(score*100), "info")
def predictPg(self):
"""
预测发展概率
:return:
"""
self.sendMsg.emit("Predicting...")
# 首先获取类别数目
validVals = np.unique(self.poLU.mpArray)
classNum = validVals.shape[0]
if (self.dLUNodata in validVals):
classNum = validVals.shape[0] - 1
probability = np.zeros((classNum, self.pppFeats.shape[1], self.pppFeats.shape[2]))
# 这里可以开多线程
for i in range(self.pppFeats.shape[1]):
for j in range(self.pppFeats.shape[2]):
arr = np.zeros((1, self.pppFeats.shape[0]))
# 如果数据为 nodata,则跳过,发展概率赋值为 nodata
nodataflag = abs(self.poLU.mpArray[0, i, j] - self.dLUNodata) > 1e-6
for k in range(self.pppFeats.shape[0]):
arr[0, k] = self.pppFeats[k, i, j]
nodataflag = nodataflag and abs(arr[0, k] - self.vdFeatNodata[k]) > 1e-6
if (nodataflag == False):
for k in range(classNum):
probability[k, i, j] = self.vdFeatNodata[0]
continue
# 预测各类概率并且存入矩阵
prob = self.clf.predict_proba(arr)
for k in range(classNum):
probability[k, i, j] = prob[0, k]
#self.progressBar.setValue(i / self.pppFeats.shape[1])
if((i+1)%10 == 0 or (i+1)==self.pppFeats.shape[0]):
self.sendMsg.emit("Predicting...%d/%d"%(i+1, self.pppFeats.shape[1]))
# 保存文件
self.sendMsg.emit("Saving Probability File...")
if (FLUS_Utils.array2Tif(probability, self.sPgFileName, GDT_Float32, self.vdFeatNodata[0], self.poLU.mpoDataset.GetGeoTransform(),
self.poLU.mpoDataset.GetProjection()) == None):
self.sendMsg2Box.emit("File Exists!\nPlease Delete it first!", "error")
self.finished.emit()
# 清理内存
del self.poLU, self.pppFeats, probability
# 重构run函数
def run(self):
#self.finished.emit()
if(self.parseXML()!= None):
self.sampling()
self.trainNN()
self.predictPg()
self.finished.emit() # 这里千万不要写成finished().emit()!!!! debug了很久
# 动态类型一时爽,代码重构火葬场。
def __del__(self):
self.clf = None
class CAThread(QThread):
"""
CA 线程
"""
finished = pyqtSignal()
sendMsg = pyqtSignal(str)
sendMsg2Box = pyqtSignal(str, str)
def __init__(self, parent):
QThread.__init__(self, parent)
self.sWorkDirectory = None
self.xmlfn = None
self.nRows = 1 # 行数
self.nCols = 1 # 列数
self.nMaxIter = 300 # 最大迭代次数
self.nWinSize = 7 # 邻域窗口大小
self.dAccelerate = 0.1 # 加速率
self.nLUClassNum = 2 # 土地利用类别数目
self.pCurrentAmount = np.zeros((self.nLUClassNum)) # 当前各类用地像元数目,1d array
self.pTargetAmount = np.zeros((self.nLUClassNum)) # 目标各类用地像元数目, 1d array
self.ppCostMatrix = np.ones((self.nLUClassNum, self.nLUClassNum)) # 类别之间的转换矩阵, 1d array
self.pNeighborWeight = np.zeros((self.nLUClassNum)) # 各类用地的邻域权重, 1d array
self.pppProbability = np.zeros((self.nLUClassNum, self.nRows, self.nCols)) # 神经网络训练的发展适宜性, 3d array
self.ppRestrictive = np.zeros((self.nRows, self.nCols) ) # 限制性因素
self.ppCurrentLU = np.zeros((self.nRows, self.nCols)) # 当前用地分布, 2d array
self.ppSimulateLU = np.zeros((self.nRows, self.nCols)) # 模拟用地分布, 2d array
def setParam(self, wd, xmlfn):
"""
为线程设置参数
:param wd: 工作路径,用于查找xml文件,写入发展概率文件
:param xmlfn: xml文件名
:return:
"""
self.sWorkDirectory = wd
self.xmlfn = xmlfn
def parseXML(self):
"""
解析参数
:return:
"""
xmlfn = self.sWorkDirectory + "/" + self.xmlfn
if (not os.path.exists(xmlfn)):
# print("ann_config.xml does not exist")
self.sendMsg2Box.emit("Please set ANN-based Probability\nEstimation Parameters First!", "error")
return None
else:
tree = ET.parse(xmlfn)
root = tree.getroot()
# = = = = 解析 xml 文件 = = = = = = = = = = =
self.sCurLUFileName = root.find("FilePaths").find("InitialLandUseFilePath").text
self.sSimLUFileName = root.find("FilePaths").find("SimulationLandUseFilePath").text
self.sDevelopProbFileName = root.find("FilePaths").find("ProbabilityFilePath").text
self.sRestrictiveFileName = root.find("FilePaths").find("RestrictiveFilePath").text
self.nWinSize = int(root.find("SimulationParameters").find("NeighborSize").text)
if(self.nWinSize % 2 == 0):
self.nWinSize += 1
self.nMaxIter = int(root.find("SimulationParameters").find("MaxIterationNum").text)
self.dAccelerate = float(root.find("SimulationParameters").find("Accelerate").text)
self.nLUClassNum = int(root.find("SimulationParameters").find("ClassNum").text)
# 惯性系数
self.pInertia = np.ones((self.nLUClassNum))
# 读取邻域权重向量
self.pNeighborWeight = np.zeros((self.nLUClassNum))
weightNodes = root.find("SimulationParameters").find("NeighborWeights").findall("ClassWeight")
for i in range(len(weightNodes)):
self.pNeighborWeight[i] = float(weightNodes[i].text)
# 获取当前各类像元数目
self.pCurrentAmount = np.zeros((self.nLUClassNum))
currentNodes = root.find("SimulationParameters").find("CurrentAmounts").findall("ClassAmount")
for i in range(len(currentNodes)):
self.pCurrentAmount[i] = int((currentNodes[i].text))
# 读取目标像元数目
self.pTargetAmount = np.zeros((self.nLUClassNum))
targetNodes = root.find("SimulationParameters").find("TargetAmounts").findall("ClassAmount")
for i in range(len(targetNodes)):
self.pTargetAmount[i] = int((targetNodes[i].text))
# 发展需求量
self.pDemandAmount = self.pTargetAmount - self.pCurrentAmount
# 读取类型间转换矩阵
self.ppCostMatrix = np.zeros((self.nLUClassNum, self.nLUClassNum))
costNodes = root.find("SimulationParameters").find("CostMatrix").findall("CostRow")
for i in range(len(targetNodes)):
costlist = (costNodes[i].text).split(",")
for j in range(len(costlist)):
self.ppCostMatrix[i,j] = float(costlist[j])
self.sendMsg.emit("parse xml success!")
def init(self):
"""
读取文件并申请内存:包括初始土地利用数据和模拟土地利用数据,发展概率,限制性因素
:return:
"""
# 读取影像
luimg, msg = FLUS_Utils.loadImage(self.sCurLUFileName)
self.nRows = luimg.mnRows
self.nCols = luimg.mnCols
self.pGeoTransform = luimg.mpoDataset.GetGeoTransform()
self.sProjectionRef = luimg.mpoDataset.GetProjection()
self.ppCurrentLU = luimg.mpArray[0]
self.ppSimulateLU = luimg.mpArray[0]
self.dLUNodata = luimg.mdInvalidValue
self.gDataType = luimg.mgDataType
del luimg
probimg,msg = FLUS_Utils.loadImage(self.sDevelopProbFileName)
self.pppProbability = probimg.mpArray
self.dProbNodata = probimg.mdInvalidValue
del probimg
restrictiveimg, msg = FLUS_Utils.loadImage(self.sRestrictiveFileName)
self.ppRestrictive = restrictiveimg.mpArray[0]
self.dRestrictiveNodata = restrictiveimg.mdInvalidValue
del restrictiveimg
self.sendMsg2Box.emit("init success!", "info")
def calNeighbor(self, r, c):
"""
计算(r, c)位置处的邻域效应,
:param r: 行
:param c: 列
:return:返回一个包含n个float数值的1d numpy array,其中n为用地类数
"""
# get half size
nHalfSize = (self.nWinSize - 1) / 2
nLeft = int(c - nHalfSize)
nRight = int(c + nHalfSize)
nBottom = int(r + nHalfSize)
nTop = int(r - nHalfSize)
if(not c > nHalfSize):
nLeft = 0
if(not c < self.nCols-nHalfSize):
nRight = self.nCols-1
if(not r > nHalfSize):
nTop = 0
if(not r < self.nRows-nHalfSize):
nBottom = self.nRows - 1
pNeighborEffect = np.zeros(self.nLUClassNum)
dNeighborTotal = 0.0
for i in range(nTop, nBottom+1):
for j in range(nLeft, nRight+1):
val = int(self.ppCurrentLU[i,j])
if (val > self.nLUClassNum or val < 1 or i==j):
continue
pNeighborEffect[val-1] += self.pNeighborWeight[val-1]*1
dNeighborTotal += self.pNeighborWeight[val-1]
if(dNeighborTotal > 1):
pNeighborEffect /= (dNeighborTotal)
else:
pNeighborEffect = np.zeros(self.nLUClassNum)
return pNeighborEffect
def iteration(self):
nIter = 0
bFlag = False
while not bFlag and nIter < self.nMaxIter:
# bFlag = True
# for current, target in zip(self.pCurrentAmount, self.pTargetAmount):
# bFlag = bFlag and (current < target)
# # 各类像元数目均达标,则满足要求
# if(not bFlag):
# break
print ("iter %d current demand: "%(nIter+1), self.pDemandAmount, end = '')
self.sendMsg.emit("iteration %d/%d..."%(nIter+1, self.nMaxIter))
samplenum = 0
# 每次随机选取1000个点
while samplenum < self.nRows*self.nCols*0.01:
r = np.random.randint(0, self.nRows)
c = np.random.randint(0, self.nCols)
val = int(self.ppSimulateLU[r,c])
pc = self.ppRestrictive[r, c] # 限制因素
# 如果是nodata
if(abs(val - self.dLUNodata) < 1e-6 or abs(self.pppProbability[0,r,c] - self.dProbNodata) < 1e-6 or abs(pc - self.dRestrictiveNodata) < 1e-6):
continue
# 如果限制发展
if (pc < 1e-6):
continue
pg = self.pppProbability[:, r, c]
pn = self.calNeighbor(r, c)
#pr = np.random.uniform()
pd = pg*(0.5+0.5*pn)*(0.1+pc*0.9)*self.pInertia*self.ppCostMatrix[val-1]
if(pd.sum() < 1e-6):
continue
ind, p = FLUS_Utils.rouletteWheelSelection(pd)
if(p < 0.1):
continue
if((self.pDemandAmount[ind] > 0 and self.pCurrentAmount[ind] < self.pTargetAmount[ind]) ):
# 更新目前各类像元的数目
self.ppSimulateLU[r,c] = ind + 1
self.pCurrentAmount[ind] += 1
self.pCurrentAmount[val-1] -= 1
# print (ind, val)
samplenum += 1
# 计算目前的发展需求,修正惯性值
pDemand_t_1 = self.pTargetAmount - self.pCurrentAmount
for i in range(self.pInertia.shape[0]):
if(abs(pDemand_t_1[i]) < abs(self.pDemandAmount[i])):
continue
if(self.pDemandAmount[i] < 0 and pDemand_t_1[i] < 0):
self.pInertia[i] *= (self.pDemandAmount[i]/pDemand_t_1[i])
elif(self.pDemandAmount[i] > 0 and pDemand_t_1[i] > 0):
self.pInertia[i] *= (pDemand_t_1[i]/self.pDemandAmount[i])
# 更新发展需求
self.pDemandAmount = pDemand_t_1
print ("\t\tinertia: ", self.pInertia)
nIter += 1
bFlag = True
for current, target in zip(self.pCurrentAmount, self.pTargetAmount):
bFlag = bFlag and abs(current - target) < 1
self.sendMsg.emit("save file...")
if (FLUS_Utils.array2Tif(self.ppSimulateLU.reshape(1, self.nRows, self.nCols), self.sSimLUFileName, self.gDataType, self.dLUNodata,
self.pGeoTransform,
self.sProjectionRef) == None):
self.sendMsg2Box.emit("File Exists!\nPlease Delete it first!", "error")
self.finished.emit()
def run(self):
"""
启动线程
:return:
"""
self.parseXML()
self.init()
self.iteration()
self.finished.emit()
pass
|
#!/home/pi/.pyenv/shims/python
# 电子书项目
# 提供电子书下载
# 提供kindle电子书推送服务
# 提供epub电子书在线阅读服务
import aiopg
import os.path
import psycopg2
import tornado.escape
import tornado.httpserver
import tornado.ioloop
import tornado.locks
import tornado.options
import tornado.web
import unicodedata
# define
from tornado.options import define, options
# Request Handler
import MubenMain
import MubenAdmin
import MubenAuth
define("port", default=8888, help="default listen on port 8888", type=int)
define("db_host", default=db_host, help="blog database host")
define("db_port", default=db_port, help="postgresql database default port 5432")
define("db_database", default=db_database, help="ebooks website database")
define("db_user", default=db_user, help="ebooks website database user")
define("db_passwd", default=db_password, help="ebooks website database password")
async def maybe_create_tables(db):
try:
with (await db.cursor()) as cur:
await cur.execute("SELECT COUNT(*) FROM ebooks LIMIT 1")
await cur.fetchone()
except psycopg2.ProgrammingError:
with open("schema.sql") as f:
schma = f.read()
with (await db.cursor()) as cur:
await cur.execute(schema)
class Application(tornado.web.Application):
def __init__(self, db):
self.db = db
handlers = [
(r"/", MubenMain.HomeHandler).
(r"/index", MubenMain.HomeHandler),
(r"/search", MubenMain.SearchHandler),
# admin page
(r"/admin/login", MubenAdmin.AuthLoginHandler),
(r"/admin/logout", MubenAdmin.AuthLogoutHandler),
(r"/admin/index", MubenAdmin.IndexHandler),
(r"/admin/ebooks/manager", MubenAdmin.BooksMangerHandler),
# auth page
(r"/auth/login", MubenAuth.AuthLoginHandler),
(r"/auth/logout", MubenAuth.AuthLogouthandler),
(r"/auth/register", MubenAuth.AuthRegisterHandler),
]
settings = dict(
project_title=u"Muben Ebooks for Kindle Post and EPUB Reader",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "ebookstatic"),
xsrf_cookies=True,
cookie_secret="__TODO:_GENERATE_YOUROWN_RANDOM_VALUE_HERE_",
login_url="/auth/login",
debug=True,
)
supper(Application, self).__init__(handlers, **settings)
async def main():
tornado.options.parse_command_line()
# Create the global connection pool
async with aiopg.create_pool(
host=options.db_host,
port=options.db_port,
user=options.db_user,
password=options.db_passwd,
dbname=options.db_database,
) as db:
await maybe_create_tables(db)
app = Application(db)
app.listen(options.port)
# Simply shutdown with Ctrl-C
# More gracefully should call shutdown_event.set()
shutdown_event = tornado.lock.Event()
await shutdown_event.await()
if __name__ == "__main__":
tornado.ioloop.IOLoop.current().run_sync(main)
|
# Copyright Amazon.com, Inc. and its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT
#
# Licensed under the MIT License. See the LICENSE accompanying this file
# for the specific language governing permissions and limitations under
# the License.
from datalake_library.commons import init_logger
from datalake_library.configuration.resource_configs import DynamoConfiguration, SQSConfiguration, S3Configuration
from datalake_library.interfaces.dynamo_interface import DynamoInterface
from datalake_library.interfaces.sqs_interface import SQSInterface
from datalake_library import octagon
from datalake_library.octagon import Artifact, EventReasonEnum, peh
logger = init_logger(__name__)
octagon_client = (
octagon.OctagonClient()
.with_run_lambda(True)
.build()
)
def lambda_handler(event, context):
"""Updates the S3 objects metadata catalog
Arguments:
event {dict} -- Dictionary with details on previous processing step
context {dict} -- Dictionary with details on Lambda context
Returns:
{dict} -- Dictionary with outcome of the process
"""
try:
component = context.function_name.split('-')[-2].title()
peh.PipelineExecutionHistoryAPI(octagon_client).retrieve_pipeline_execution(event['body']['peh_id'])
octagon_client.update_pipeline_execution(status="Pre-Stage {} Processing".format(component), component=component)
logger.info('Fetching transformed objects')
processed_keys = event['body']['processedKeys']
team = event['body']['team']
pipeline = event['body']['pipeline']
dataset = event['body']['dataset']
logger.info('Initializing DynamoDB config and Interface')
dynamo_config = DynamoConfiguration()
dynamo_interface = DynamoInterface(dynamo_config)
logger.info('Storing metadata to DynamoDB')
for key in processed_keys:
object_metadata = {
'bucket': S3Configuration().stage_bucket,
'key': key,
'team': team,
'pipeline': pipeline,
'dataset': dataset,
'stage': 'pre-stage'
}
dynamo_interface.update_object_metadata_catalog(object_metadata)
logger.info('Sending messages to next SQS queue if it exists')
sqs_config = SQSConfiguration(team, pipeline, dataset)
sqs_interface = SQSInterface(sqs_config.get_post_stage_queue_name)
sqs_interface.send_batch_messages_to_fifo_queue(processed_keys, 10, '{}-{}'.format(team, dataset))
octagon_client.end_pipeline_execution_success()
except Exception as e:
logger.error("Fatal error", exc_info=True)
octagon_client.end_pipeline_execution_failed(component=component,
issue_comment="Pre-Stage {} Error: {}".format(component, repr(e)))
raise e
return 200
|
import math
import unittest
from game import Game
from model.maps.generators.forest_generator import ForestGenerator
from model.maps.area_map import AreaMap
class TestForestGenerator(unittest.TestCase):
def test_generate_generates_trees(self):
Game()
width, height = (10, 10)
expected_num_trees = math.floor(ForestGenerator.TREE_PERCENTAGE * width * height)
actual_num_trees = 0
num_trees_set = set()
for _ in range(10):
Game.instance.area_map = AreaMap(width, height)
ForestGenerator(Game.instance.area_map).generate()
for y in range(height):
for x in range(width):
if not Game.instance.area_map.tiles[x][y].is_walkable:
actual_num_trees += 1
num_trees_set.add(actual_num_trees)
self.assertTrue(x for x in num_trees_set if x >= expected_num_trees)
def test_generate_fills_holes(self):
Game()
# Generate a bunch of trees with a known seed that generates holes.
# This is fragile, but there's no other way to test this.
# This is valuable, because there's a ton of code/complexity behind
# this (breadth-first search, etc.).
width, height = (60, 40)
Game.instance.area_map = AreaMap(width, height)
pre_fill_num_trees = math.floor(ForestGenerator.TREE_PERCENTAGE * width * height)
Game.instance.random.seed(1)
ForestGenerator(Game.instance.area_map).generate()
actual_num_trees = 0
for y in range(height):
for x in range(width):
if not Game.instance.area_map.tiles[x][y].is_walkable:
actual_num_trees += 1
# Strictly more trees because of filled holes
# With 60x40 and seed=1, fills 6 gaps with trees
self.assertGreater(actual_num_trees, pre_fill_num_trees)
def test_generate_generates_monsters(self):
Game()
width, height = 15, 15
Game.instance.area_map = AreaMap(width, height)
fg = ForestGenerator(Game.instance.area_map)
fg.generate()
self.assertTrue(fg._area_map is Game.instance.area_map)
self.assertGreaterEqual(len(Game.instance.area_map.entities), 1)
|
from rlpy.Tools.run import run
run("examples/mdp_chain/mdp_chain_post.py","./Results/Tests/mdp_chain/PSRL",ids=range(5), parallelization ="joblib")
run("examples/mdp_chain/mdp_chain_lspi.py","./Results/Tests/mdp_chain/LSPI",ids=range(5), parallelization ="joblib")
run("examples/mdp_chain/mdp_chain_sarsa.py","./Results/Tests/mdp_chain/SARSA",ids=range(5), parallelization ="joblib")
run("examples/mdp_chain/mdp_chain_ucrl.py","./Results/Tests/mdp_chain/UCRL",ids=range(5), parallelization ="joblib")
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 11 15:03:17 2021
An example shows how to write pd.DataFram into excel through openpyxl
@author: renfo
"""
import pandas as pd
import numpy as np
import random
import openpyxl
from openpyxl.utils.dataframe import dataframe_to_rows
import os
mainfolder = os.getcwd()
os.chdir(mainfolder)
# generate a dataframe
data = {
"position":["Top", "Mid", "Btm"]*3,
# random.gauss(mu, sig)
"A":np.array([ random.gauss(3,1.2) for x in range(9) ]),
"B":np.array([ random.gauss(8,0.5) for x in range(9) ]),
"C":np.array([ random.gauss(48,1.5) for x in range(9) ]),
}
df = pd.DataFrame(data)
# write pd.DataFrame into excel via openpyxl
wb = openpyxl.Workbook()
ws = wb.active
for r in dataframe_to_rows(df, index=False, header=True):
ws.append(r)
wb.save("pandas_openpyxl.xlsx")
|
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import lru_cache
from typing import Optional, List, Dict, Tuple
from ethtx.decoders.decoders.semantics import decode_events_and_functions
from ethtx.models.semantics_model import (
AddressSemantics,
ContractSemantics,
ParameterSemantics,
ERC20Semantics,
TransformationSemantics,
FunctionSemantics,
EventSemantics,
Signature,
SignatureArg,
)
from ethtx.providers import EtherscanProvider, Web3Provider, ENSProvider
from ethtx.providers.semantic_providers.database import ISemanticsDatabase
from ethtx.semantics.protocols_router import amend_contract_semantics
from ethtx.semantics.solidity.precompiles import precompiles
from ethtx.semantics.standards.erc20 import ERC20_FUNCTIONS, ERC20_EVENTS
from ethtx.semantics.standards.erc721 import ERC721_FUNCTIONS, ERC721_EVENTS
class SemanticsRepository:
def __init__(
self,
database_connection: ISemanticsDatabase,
etherscan_provider: EtherscanProvider,
web3provider: Web3Provider,
ens_provider: ENSProvider,
):
self.database = database_connection
self.etherscan = etherscan_provider
self._web3provider = web3provider
self._ens_provider = ens_provider
self._records: Optional[List] = None
def record(self) -> None:
"""Records is an array used to hold semantics used in tx decing process.
This recording is used just for logging"""
self._records = []
def end_record(self) -> List:
tmp_records = self._records
self._records = None
return tmp_records
def _read_stored_semantics(
self, address: str, chain_id: str
) -> Optional[AddressSemantics]:
def decode_parameter(_parameter):
components_semantics = []
if "components" in _parameter:
for component in _parameter["components"]:
components_semantics.append(decode_parameter(component))
decoded_parameter = ParameterSemantics(
parameter_name=_parameter["parameter_name"],
parameter_type=_parameter["parameter_type"],
components=components_semantics,
indexed=_parameter["indexed"],
dynamic=_parameter["dynamic"],
)
return decoded_parameter
if not address:
return None
ZERO_HASH = "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
raw_address_semantics = self.database.get_address_semantics(chain_id, address)
if raw_address_semantics:
if raw_address_semantics.get("erc20"):
erc20_semantics = ERC20Semantics(
name=raw_address_semantics["erc20"]["name"],
symbol=raw_address_semantics["erc20"]["symbol"],
decimals=raw_address_semantics["erc20"]["decimals"],
)
else:
erc20_semantics = None
if raw_address_semantics["contract"] == ZERO_HASH:
contract_semantics = ContractSemantics(
code_hash=raw_address_semantics["contract"], name="EOA"
)
else:
raw_contract_semantics = self.database.get_contract_semantics(
raw_address_semantics["contract"]
)
events = {}
for signature, event in raw_contract_semantics["events"].items():
parameters_semantics = []
for parameter in event["parameters"]:
parameters_semantics.append(decode_parameter(parameter))
events[signature] = EventSemantics(
signature=signature,
anonymous=event["anonymous"],
name=event["name"],
parameters=parameters_semantics,
)
functions = {}
for signature, function in raw_contract_semantics["functions"].items():
inputs_semantics = []
for parameter in function["inputs"]:
inputs_semantics.append(decode_parameter(parameter))
outputs_semantics = []
for parameter in function["outputs"]:
outputs_semantics.append(decode_parameter(parameter))
functions[signature] = FunctionSemantics(
signature=signature,
name=function["name"],
inputs=inputs_semantics,
outputs=outputs_semantics,
)
transformations = {}
for signature, parameters_transformations in raw_contract_semantics[
"transformations"
].items():
transformations[signature] = {}
for parameter, transformation in parameters_transformations.items():
transformations[signature][parameter] = TransformationSemantics(
transformed_name=transformation["transformed_name"],
transformed_type=transformation["transformed_type"],
transformation=transformation["transformation"],
)
contract_semantics = ContractSemantics(
code_hash=raw_contract_semantics["code_hash"],
name=raw_contract_semantics["name"],
events=events,
functions=functions,
transformations=transformations,
)
name = raw_address_semantics.get("name", address)
if name == address and not raw_address_semantics["is_contract"]:
name = self._ens_provider.name(
provider=self._web3provider._get_node_connection(chain_id),
address=address,
)
address_semantics = AddressSemantics(
chain_id=chain_id,
address=address,
name=name,
is_contract=raw_address_semantics["is_contract"],
contract=contract_semantics,
standard=raw_address_semantics["standard"],
erc20=erc20_semantics,
)
return address_semantics
return None
@lru_cache(maxsize=128)
def get_semantics(self, chain_id: str, address: str) -> Optional[AddressSemantics]:
if not address:
return None
ZERO_HASH = "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
address_semantics = self._read_stored_semantics(address, chain_id)
if not address_semantics:
# try to read the semantics form the Etherscan provider
provider = self._web3provider
code_hash = provider.get_code_hash(address, chain_id)
if code_hash != ZERO_HASH:
# smart contract
raw_semantics, decoded = self.etherscan.contract.get_contract_abi(
chain_id, address
)
if decoded and raw_semantics:
# raw semantics received from Etherscan
events, functions = decode_events_and_functions(
raw_semantics["abi"]
)
standard, standard_semantics = self._decode_standard_semantics(
address, raw_semantics["name"], events, functions
)
if standard == "ERC20":
erc20_semantics = standard_semantics
else:
proxy_erc20 = provider.guess_erc20_proxy(address, chain_id)
if proxy_erc20:
erc20_semantics = ERC20Semantics(**proxy_erc20)
else:
erc20_semantics = None
contract_semantics = ContractSemantics(
code_hash=code_hash,
name=raw_semantics["name"],
events=events,
functions=functions,
transformations={},
)
address_semantics = AddressSemantics(
chain_id=chain_id,
address=address,
name=raw_semantics["name"],
is_contract=True,
contract=contract_semantics,
standard=standard,
erc20=erc20_semantics,
)
else:
# try to guess if the address is a toke
potential_erc20_semantics = provider.guess_erc20_token(
address, chain_id
)
if potential_erc20_semantics:
standard = "ERC20"
erc20_semantics = ERC20Semantics(
name=potential_erc20_semantics["name"],
symbol=potential_erc20_semantics["symbol"],
decimals=potential_erc20_semantics["decimals"],
)
else:
standard = None
erc20_semantics = None
contract_semantics = ContractSemantics(
code_hash=code_hash, name=address
)
address_semantics = AddressSemantics(
chain_id=chain_id,
address=address,
name=address,
is_contract=True,
contract=contract_semantics,
standard=standard,
erc20=erc20_semantics,
)
else:
# externally owned address
contract_semantics = ContractSemantics(code_hash=ZERO_HASH, name="EOA")
name = self._ens_provider.name(
provider=self._web3provider._get_node_connection(chain_id),
address=address,
)
address_semantics = AddressSemantics(
chain_id=chain_id,
address=address,
name=name,
is_contract=False,
contract=contract_semantics,
)
self.update_semantics(address_semantics)
# amend semantics with locally stored updates
amend_contract_semantics(address_semantics.contract)
if self._records is not None:
self._records.append(address)
return address_semantics
def _decode_standard_semantics(
self, address, name, events, functions
) -> Tuple[Optional[str], Optional[ERC20Semantics]]:
standard = None
standard_semantics = None
if not address:
return standard, standard_semantics
if all(erc20_event in events for erc20_event in ERC20_EVENTS) and all(
erc20_function in functions for erc20_function in ERC20_FUNCTIONS
):
standard = "ERC20"
try:
provider = self._web3provider
token_data = provider.get_erc20_token(address, name, functions)
standard_semantics = ERC20Semantics(
name=token_data["name"],
symbol=token_data["symbol"],
decimals=token_data["decimals"],
)
except Exception:
standard_semantics = ERC20Semantics(name=name, symbol=name, decimals=18)
elif all(erc721_event in events for erc721_event in ERC721_EVENTS) and all(
erc721_function in functions for erc721_function in ERC721_FUNCTIONS
):
standard = "ERC721"
standard_semantics = None
return standard, standard_semantics
@lru_cache(maxsize=128)
def get_event_abi(self, chain_id, address, signature) -> Optional[EventSemantics]:
if not address:
return None
semantics = self.get_semantics(chain_id, address)
event_semantics = (
semantics.contract.events.get(signature) if semantics else None
)
return event_semantics
@lru_cache(maxsize=128)
def get_transformations(
self, chain_id, address, signature
) -> Optional[Dict[str, TransformationSemantics]]:
if not address:
return None
semantics = self.get_semantics(chain_id, address)
if semantics:
transformations = semantics.contract.transformations.get(signature)
else:
transformations = None
return transformations
@lru_cache(maxsize=128)
def get_anonymous_event_abi(self, chain_id, address) -> Optional[EventSemantics]:
if not address:
return None
semantics = self.get_semantics(chain_id, address)
event_semantics = None
if semantics:
anonymous_events = {
signature
for signature, event in semantics.contract.events.items()
if event.anonymous
}
if len(anonymous_events) == 1:
event_signature = anonymous_events.pop()
event_semantics = semantics.contract.events[event_signature]
return event_semantics
@lru_cache(maxsize=128)
def get_function_abi(
self, chain_id, address, signature
) -> Optional[FunctionSemantics]:
if not address:
return None
semantics = self.get_semantics(chain_id, address)
function_semantics = (
semantics.contract.functions.get(signature) if semantics else None
)
return function_semantics
@lru_cache(maxsize=128)
def get_constructor_abi(self, chain_id, address) -> Optional[FunctionSemantics]:
if not address:
return None
semantics = self.get_semantics(chain_id, address)
constructor_semantics = (
semantics.contract.functions.get("constructor") if semantics else None
)
if constructor_semantics:
constructor_semantics.outputs.append(
ParameterSemantics(
parameter_name="__create_output__",
parameter_type="ignore",
indexed=False,
dynamic=True,
)
)
return constructor_semantics
def get_address_label(self, chain_id, address, proxies=None) -> str:
if not address:
return ""
if int(address, 16) in precompiles:
contract_label = "Precompiled"
else:
semantics = self.get_semantics(chain_id, address)
if semantics.erc20:
contract_label = semantics.erc20.symbol
elif proxies and address in proxies:
contract_label = proxies[address].name
else:
contract_label = (
semantics.name if semantics and semantics.name else address
)
return contract_label
@lru_cache(maxsize=128)
def check_is_contract(self, chain_id, address) -> bool:
if not address:
return False
semantics = self.get_semantics(chain_id, address)
is_contract = semantics is not None and semantics.is_contract
return is_contract
@lru_cache(maxsize=128)
def get_standard(self, chain_id, address) -> Optional[str]:
if not address:
return None
semantics = self.get_semantics(chain_id, address)
standard = semantics.standard if semantics is not None else None
return standard
def get_token_data(
self, chain_id, address, proxies=None
) -> Tuple[Optional[str], Optional[str], Optional[int], Optional[str]]:
if not address:
return None, None, None, None
semantics = self.get_semantics(chain_id, address)
if semantics and semantics.erc20:
token_name = (
semantics.erc20.name if semantics and semantics.erc20 else address
)
token_symbol = (
semantics.erc20.symbol if semantics and semantics.erc20 else "Unknown"
)
token_decimals = (
semantics.erc20.decimals if semantics and semantics.erc20 else 18
)
elif proxies and address in proxies and proxies[address].token:
token_name = proxies[address].token.name
token_symbol = proxies[address].token.symbol
token_decimals = proxies[address].token.decimals
else:
token_name = address
token_symbol = "Unknown"
token_decimals = 18
return token_name, token_symbol, token_decimals, "ERC20"
def update_address(self, chain_id, address, contract) -> Dict:
updated_address = {"network": chain_id, "address": address, **contract}
self.database.insert_address(address=updated_address, update_if_exist=True)
return updated_address
def update_semantics(self, semantics) -> None:
if not semantics:
return
contract_id = self.database.insert_contract(
contract=semantics.contract.dict(), update_if_exist=True
)
updated_address_semantics = semantics.copy()
updated_address_semantics.contract = (
updated_address_semantics.contract.code_hash
)
self.database.insert_address(
address=updated_address_semantics.dict(), update_if_exist=True
)
if contract_id:
self.insert_contract_signatures(semantics.contract)
def insert_contract_signatures(self, contract_semantics: ContractSemantics) -> None:
for _, v in contract_semantics.functions.items():
if not v.signature.startswith("0x"):
continue
if v.inputs and v.inputs[0].parameter_type == "tuple":
args = [
SignatureArg(name=param.parameter_name, type=param.parameter_type)
for param in v.inputs[0].components
]
else:
args = (
[
SignatureArg(
name=param.parameter_name, type=param.parameter_type
)
for param in v.inputs
]
if v.inputs
else []
)
new_signature = Signature(
signature_hash=v.signature, name=v.name, args=args
)
self.update_or_insert_signature(new_signature)
def get_most_used_signature(self, signature_hash: str) -> Optional[Signature]:
signatures = list(
self.database.get_signature_semantics(signature_hash=signature_hash)
)
if signatures:
most_common_signature = max(signatures, key=lambda x: x["count"])
signature = Signature(
signature_hash=most_common_signature["signature_hash"],
name=most_common_signature["name"],
args=most_common_signature["args"],
count=most_common_signature["count"],
tuple=most_common_signature["tuple"],
guessed=most_common_signature["guessed"],
)
return signature
return None
def update_or_insert_signature(self, signature: Signature) -> None:
signatures = self.database.get_signature_semantics(
signature_hash=signature.signature_hash
)
for sig in signatures:
if (
signature.name == sig["name"]
and signature.signature_hash == sig["signature_hash"]
and len(signature.args) == len(sig["args"])
):
if signature.args and any(
arg for arg in list(sig["args"][0].values()) if "arg" in arg
):
for index, argument in enumerate(sig["args"]):
argument["name"] = signature.args[index].name
argument["type"] = signature.args[index].type
sig["count"] += 1
sig["guessed"] = False
self.database.insert_signature(signature=sig, update_if_exist=True)
break
else:
self.database.insert_signature(signature=signature.dict())
|
# Generated by Django 2.2.2 on 2019-08-10 17:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0003_chart'),
]
operations = [
migrations.CreateModel(
name='Option_Chain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('call_symbol', models.CharField(max_length=40)),
('strike_price', models.FloatField(max_length=40)),
('put_symbol', models.CharField(max_length=40)),
],
),
migrations.DeleteModel(
name='Chart',
),
]
|
from flask import Blueprint
from flask import jsonify
from shutil import copyfile, move
from google.cloud import storage
from google.cloud import bigquery
from google.oauth2 import service_account
from flask import request
from google.auth.transport.requests import AuthorizedSession
import dataflow_pipeline.gestion_humana.gestiones_beam as gestiones_beam
import dataflow_pipeline.gestion_humana.documentos_pendientes_beam as documentos_pendientes_beam
import dataflow_pipeline.gestion_humana.parametros_dias_beam as parametros_dias_beam
import dataflow_pipeline.gestion_humana.e_d_preguntas_beam as e_d_preguntas_beam
import dataflow_pipeline.gestion_humana.e_d_tipo_respuestas_beam as e_d_tipo_respuestas_beam
import dataflow_pipeline.gestion_humana.e_d_competencia_beam as e_d_competencia_beam
import dataflow_pipeline.gestion_humana.e_d_detalle_evaluacion_beam as e_d_detalle_evaluacion_beam
import dataflow_pipeline.gestion_humana.e_d_rel_competencia_beam as e_d_rel_competencia_beam
import dataflow_pipeline.gestion_humana.e_d_historico_evaluacion_beam as e_d_historico_evaluacion_beam
import dataflow_pipeline.gestion_humana.e_d_rel_usuarios_beam as e_d_rel_usuarios_beam
import dataflow_pipeline.gestion_humana.e_d_tb_usuarios_beam as e_d_tb_usuarios_beam
import dataflow_pipeline.gestion_humana.e_d_centro_costos_beam as e_d_centro_costos_beam
import dataflow_pipeline.gestion_humana.e_d_uen_beam as e_d_uen_beam
import dataflow_pipeline.gestion_humana.e_d_cargos_beam as e_d_cargos_beam
import dataflow_pipeline.gestion_humana.e_d_perfiles_beam as e_d_perfiles_beam
import dataflow_pipeline.gestion_humana.e_d_base_beam as e_d_base_beam
import cloud_storage_controller.cloud_storage_controller as gcscontroller
import dataflow_pipeline.massive as pipeline
from google.oauth2 import service_account
import os
import time
import socket
import _mssql
import datetime
import sys
#coding: utf-8
gto_api = Blueprint('gto_api', __name__)
fileserver_baseroute = ("//192.168.20.87", "/media")[socket.gethostname()=="contentobi"]
@gto_api.route("/gestiones")
def gestiones():
client = bigquery.Client()
QUERY1 = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Gestiones_EPS"')
query_job = client.query(QUERY1)
rows = query_job.result()
data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT Id_Gestion,Persona_que_reporta,Sede,Cedula,Nombre_completo,Eps,Cod_Centro_Costos,Nombre_Centro_Costos,Tipo_incapacidad,Buscar_diagnostico,Codigo_diagnostico,Nombre_diagnostico,Numero_incapacidad,Dias_incapacidad,Fecha_inicial_liquidacion,Fecha_real_inicial,Fecha_final_incapacidad,Ajuste_incapacidad_salario_minimo,Prorroga,Documento_prorroga,Accidente_transito,IBC_mes_anterior,IBC_Cotizacion_especifico,Fecha_recibido_incapacidad,Mes_aplicaco_nomina,VoBo_Ejecucion_RPA,Tiene_Transcripcion,Documentacion_transcripcion,Documento_pendiente,Correo_responsable,Fecha_Notificacion_Docs_Incompletos,Fecha_Envio_Docs_Incompletos,Fecha_Envio_Docs_Correcto,Documentacion_Completa,Nro_Incapacidad,Insert_date,Transcrito_Por,Fecha_Sol_Transcripcion,Fecha_Transcripcion,Fecha_Max_Transcripcion,Fecha_Sol_Cobro,Fecha_Cobro,Fecha_Max_Cobro,Valor_pagado,Fecha_pago,Fecha_Max_Pago,Estado_Gossem,Marca_Gosem,Fecha_Proceso_Gossem,Duracion_Gossem FROM ' + tabla_bd )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['Id_Gestion']).encode('utf-8') + "|"
text_row += str(row['Persona_que_reporta']).encode('utf-8') + "|"
text_row += str(row['Sede']).encode('utf-8') + "|"
text_row += str(row['Cedula']).encode('utf-8') + "|"
text_row += str(row['Nombre_completo']).encode('utf-8') + "|"
text_row += str(row['Eps']).encode('utf-8') + "|"
text_row += str(row['Cod_Centro_Costos']).encode('utf-8') + "|"
text_row += str(row['Nombre_Centro_Costos']).encode('utf-8') + "|"
text_row += str(row['Tipo_incapacidad']).encode('utf-8') + "|"
text_row += str(row['Buscar_diagnostico']).encode('utf-8') + "|"
text_row += str(row['Codigo_diagnostico']).encode('utf-8') + "|"
text_row += str(row['Nombre_diagnostico']).encode('utf-8') + "|"
text_row += str(row['Numero_incapacidad']).encode('utf-8') + "|"
text_row += str(row['Dias_incapacidad']).encode('utf-8') + "|"
text_row += str(row['Fecha_inicial_liquidacion']).encode('utf-8') + "|"
text_row += str(row['Fecha_real_inicial']).encode('utf-8') + "|"
text_row += str(row['Fecha_final_incapacidad']).encode('utf-8') + "|"
text_row += str(row['Ajuste_incapacidad_salario_minimo']).encode('utf-8') + "|"
text_row += str(row['Prorroga']).encode('utf-8') + "|"
text_row += str(row['Documento_prorroga']).encode('utf-8') + "|"
text_row += str(row['Accidente_transito']).encode('utf-8') + "|"
text_row += str(row['IBC_mes_anterior']).encode('utf-8') + "|"
text_row += str(row['IBC_Cotizacion_especifico']).encode('utf-8') + "|"
text_row += str(row['Fecha_recibido_incapacidad']).encode('utf-8') + "|"
text_row += str(row['Mes_aplicaco_nomina']).encode('utf-8') + "|"
text_row += str(row['VoBo_Ejecucion_RPA']).encode('utf-8') + "|"
text_row += str(row['VoBo_Ejecucion_RPA_Seguimiento']).encode('utf-8') + "|"
text_row += str(row['Tiene_Transcripcion']).encode('utf-8') + "|"
text_row += str(row['Documentacion_transcripcion']).encode('utf-8') + "|"
text_row += str(row['Documento_pendiente']).encode('utf-8') + "|"
text_row += str(row['Correo_responsable']).encode('utf-8') + "|"
text_row += str(row['Fecha_Notificacion_Docs_Incompletos']).encode('utf-8') + "|"
text_row += str(row['Fecha_Envio_Docs_Incompletos']).encode('utf-8') + "|"
text_row += str(row['Fecha_Envio_Docs_Correcto']).encode('utf-8') + "|"
text_row += str(row['Documentacion_Completa']).encode('utf-8') + "|"
text_row += str(row['Nro_Incapacidad']).encode('utf-8') + "|"
text_row += str(row['Insert_date']).encode('utf-8') + "|"
text_row += str(row['Transcrito_Por']).encode('utf-8') + "|"
text_row += str(row['Fecha_Sol_Transcripcion']).encode('utf-8') + "|"
text_row += str(row['Fecha_Transcripcion']).encode('utf-8') + "|"
text_row += str(row['Fecha_Max_Transcripcion']).encode('utf-8') + "|"
text_row += str(row['Fecha_Sol_Cobro']).encode('utf-8') + "|"
text_row += str(row['Fecha_Cobro']).encode('utf-8') + "|"
text_row += str(row['Fecha_Max_Cobro']).encode('utf-8') + "|"
text_row += str(row['Valor_pagado']).encode('utf-8') + "|"
text_row += str(row['Fecha_pago']).encode('utf-8') + "|"
text_row += str(row['Fecha_Max_Pago']).encode('utf-8') + "|"
text_row += str(row['Estado_Gossem']).encode('utf-8') + "|"
text_row += str(row['Marca_Gosem']).encode('utf-8') + "|"
text_row += str(row['Fecha_Proceso_Gossem']).encode('utf-8') + "|"
text_row += str(row['Duracion_Gossem']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "gestiones" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-gto")
try:
deleteQuery = "DELETE FROM `contento-bi.gestion_humana.Gestiones` WHERE 1=1"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = gestiones_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-gto')
blob = bucket.blob("gestiones" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
# return jsonify(flowAnswer), 200
return "Cargue exitoso Tabla de gestiones " + flowAnswer
######################################## DOCUMENTOS PENDIENTES ######################################
@gto_api.route("/d_pendientes")
def d_pendientes():
client = bigquery.Client()
QUERY1 = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "D_pendientes"')
query_job = client.query(QUERY1)
rows = query_job.result()
data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT MAD_Mail_Documentos,MAD_Id_Gestion,MAD_Correo_Responsble,MAD_Nombre_Completo,MAD_Documento,MAD_Fecha_Real_Inicial,MAD_Documento_Pendiente,MAD_Insert_Date FROM ' + tabla_bd )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['MAD_Mail_Documentos']).encode('utf-8') + "|"
text_row += str(row['MAD_Id_Gestion']).encode('utf-8') + "|"
text_row += str(row['MAD_Correo_Responsble']).encode('utf-8') + "|"
text_row += str(row['MAD_Nombre_Completo']).encode('utf-8') + "|"
text_row += str(row['MAD_Documento']).encode('utf-8') + "|"
text_row += str(row['MAD_Fecha_Real_Inicial']).encode('utf-8') + "|"
text_row += str(row['MAD_Documento_Pendiente']).encode('utf-8') + "|"
text_row += str(row['MAD_Insert_Date']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "d_pendientes" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-gto")
try:
deleteQuery = "DELETE FROM `contento-bi.gestion_humana.D_pendientes` WHERE 1=1"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = documentos_pendientes_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-gto')
blob = bucket.blob("d_pendientes" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
# return jsonify(flowAnswer), 200
return "Cargue exitoso Tabla de documentos pendientes " + flowAnswer
####################################### DIAS HABILES #####################################
@gto_api.route("/dias")
def dias():
client = bigquery.Client()
QUERY1 = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Info_EPS"')
query_job = client.query(QUERY1)
rows = query_job.result()
data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT Eps,Url,User_Eps,Pass,Correo_Notificaciones,Estado,Tiempo_Transcripcion_Empresa,Tiempo_Cobro_Empresa,Tiempo_Transcripcion_EPS,Tiempo_Pago_EPS,Insert_Date FROM ' + tabla_bd )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['Eps']).encode('utf-8') + "|"
text_row += str(row['Url']).encode('utf-8') + "|"
text_row += str(row['User_Eps']).encode('utf-8') + "|"
text_row += str(row['Pass']).encode('utf-8') + "|"
text_row += str(row['Correo_Notificaciones']).encode('utf-8') + "|"
text_row += str(row['Estado']).encode('utf-8') + "|"
text_row += str(row['Tiempo_Transcripcion_Empresa']).encode('utf-8') + "|"
text_row += str(row['Tiempo_Cobro_Empresa']).encode('utf-8') + "|"
text_row += str(row['Tiempo_Transcripcion_EPS']).encode('utf-8') + "|"
text_row += str(row['Tiempo_Pago_EPS']).encode('utf-8') + "|"
text_row += str(row['Insert_Date']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "dias" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-gto")
try:
deleteQuery = "DELETE FROM `contento-bi.gestion_humana.Parametros_dias` WHERE 1=1"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = parametros_dias_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-gto')
blob = bucket.blob("dias" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
# return jsonify(flowAnswer), 200
return "Cargue exitoso Tabla de parametros dias " + flowAnswer
##################################### INFORME EVALUACION DE DESEMPENO ###########################################
############################################## TABLA PREGUNTAS ##################################################
@gto_api.route("/preguntas")
def preguntas():
client = bigquery.Client()
QUERY1 = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Preguntas"')
query_job = client.query(QUERY1)
rows = query_job.result()
# data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
# HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_pregunta,nombre_pregunta,estado,fecha_modif,user_modif FROM ' + tabla_bd )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_pregunta']).encode('utf-8') + "|"
text_row += str(row['nombre_pregunta']).encode('utf-8') + "|"
text_row += str(row['estado']).encode('utf-8') + "|"
text_row += str(row['fecha_modif']).encode('utf-8') + "|"
text_row += str(row['user_modif']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "evaluacion_desempeno/preguntas" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-gto")
try:
deleteQuery = "DELETE FROM `contento-bi.gestion_humana.E_D_Preguntas` WHERE id_pregunta > 1"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = e_d_preguntas_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-gto')
blob = bucket.blob("evaluacion_desempeno/preguntas" + ".csv")
# Eliminar el archivo en la variable
# blob.delete()
return "Cargue exitoso Tabla de preguntas " + flowAnswer
################################ TIPO REPUESTA #################################
@gto_api.route("/tipo_respuestas")
def respuestas():
client = bigquery.Client()
QUERY1 = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Tipo_respuesta"')
query_job = client.query(QUERY1)
rows = query_job.result()
# data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
# HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_tipo_respuesta,nombre_respuesta,estado,fecha_modif,usuario_modif FROM ' + tabla_bd )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_tipo_respuesta']).encode('utf-8') + "|"
text_row += str(row['nombre_respuesta']).encode('utf-8') + "|"
text_row += str(row['estado']).encode('utf-8') + "|"
text_row += str(row['fecha_modif']).encode('utf-8') + "|"
text_row += str(row['usuario_modif']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "evaluacion_desempeno/tipo_respuestas" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-gto")
try:
deleteQuery = "DELETE FROM `contento-bi.gestion_humana.E_D_tipo_respuesta` WHERE id_tipo_respuesta <> "" "
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = e_d_tipo_respuestas_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-gto')
blob = bucket.blob("evaluacion_desempeno/tipo_respuestas" + ".csv")
# Eliminar el archivo en la variable
# blob.delete()
return "Cargue exitoso Tabla de tipo_respuestas " + flowAnswer
############################ TB COMPETENCIA #################################
@gto_api.route("/competencia")
def competencia():
client = bigquery.Client()
QUERY1 = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Competencia"')
query_job = client.query(QUERY1)
rows = query_job.result()
# data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
# HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_competencia,nombre_competencia,id_cargo,estado,fecha_modif,usuario_modif,peso FROM ' + tabla_bd )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_competencia']).encode('utf-8') + "|"
text_row += str(row['nombre_competencia']).encode('utf-8') + "|"
text_row += str(row['id_cargo']).encode('utf-8') + "|"
text_row += str(row['estado']).encode('utf-8') + "|"
text_row += str(row['fecha_modif']).encode('utf-8') + "|"
text_row += str(row['usuario_modif']).encode('utf-8') + "|"
text_row += str(row['peso']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "evaluacion_desempeno/competencia" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-gto")
try:
deleteQuery = "DELETE FROM `contento-bi.gestion_humana.E_D_competencia` WHERE id_competencia <> "" "
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = e_d_competencia_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-gto')
blob = bucket.blob("evaluacion_desempeno/competencia" + ".csv")
# Eliminar el archivo en la variable
# blob.delete()
return "Cargue exitoso Tabla de competencia " + flowAnswer
############################ TB EVALUACION DETALLE ############################
@gto_api.route("/detalle_eva")
def detalle_eva():
dateini = request.args.get('dateini')
dateend = request.args.get('dateend')
if dateini is None:
dateini = ""
else:
dateini = dateini
if dateend is None:
dateend = ""
else:
dateend = dateend
client = bigquery.Client()
QUERY1 = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Detalle_evaluacion"')
query_job = client.query(QUERY1)
rows = query_job.result()
# data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
# HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
# if dateini == "":
# conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
# conn.execute_query('SELECT id_evaluacion_detalle,notaXPregunta,estado,fecha_modif,usuario_modif,id_relcompetencia,pesoXCompetencia,respuestaAbierta,id_evaluacion_historico FROM ' + tabla_bd + ' WHERE CONVERT(DATE, fecha_modif) = CONVERT(DATE,GETDATE())')
if dateini == "":
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_evaluacion_detalle,notaXPregunta,estado,fecha_modif,usuario_modif,id_relcompetencia,pesoXCompetencia,respuestaAbierta,id_evaluacion_historico FROM ' + tabla_bd )
else:
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_evaluacion_detalle,notaXPregunta,estado,fecha_modif,usuario_modif,id_relcompetencia,pesoXCompetencia,respuestaAbierta,id_evaluacion_historico FROM ' + tabla_bd + ' WHERE CONVERT(DATE, fecha_modif)' ' between ' + "'" + dateini + "'" +" and " + "'" + dateend + "'" )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_evaluacion_detalle']).encode('utf-8') + "|"
text_row += str(row['notaXPregunta']).encode('utf-8') + "|"
text_row += str(row['estado']).encode('utf-8') + "|"
text_row += str(row['fecha_modif']).encode('utf-8') + "|"
text_row += str(row['usuario_modif']).encode('utf-8') + "|"
text_row += str(row['id_relcompetencia']).encode('utf-8') + "|"
text_row += str(row['pesoXCompetencia']).encode('utf-8') + "|"
text_row += str(row['respuestaAbierta']).encode('utf-8') + "|"
text_row += str(row['id_evaluacion_historico']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "evaluacion_desempeno/detalle_eva" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-gto")
try:
if dateini == "":
# deleteQuery = 'DELETE FROM `contento-bi.gestion_humana.E_D_detalle_eva` WHERE CAST(SUBSTR(fecha_modif,0,10) AS DATE) = CURRENT_DATE()'
deleteQuery = 'DELETE FROM `contento-bi.gestion_humana.E_D_detalle_eva` WHERE 1 = 1'
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
else:
deleteQuery2 = 'DELETE FROM `contento-bi.gestion_humana.E_D_detalle_eva` WHERE CAST(SUBSTR(fecha_modif,0,10) AS DATE) between ' + "'" + dateini + "'" +" and " + "'" + dateend + "'"
client = bigquery.Client()
query_job = client.query(deleteQuery2)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = e_d_detalle_evaluacion_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-gto')
blob = bucket.blob("evaluacion_desempeno/detalle_eva" + ".csv")
# Eliminar el archivo en la variable
# blob.delete()
return "Cargue exitoso Tabla de competencia " + flowAnswer
################################### REL COMPETENCIA ##############################
@gto_api.route("/rel_competencia")
def rel_competencia():
client = bigquery.Client()
QUERY1 = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Rel_competencia"')
query_job = client.query(QUERY1)
rows = query_job.result()
# data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
# HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_relcompetencia,id_competencia,id_pregunta,id_tipo_respuesta,estado,fecha_modif,usuario_modif FROM ' + tabla_bd )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_relcompetencia']).encode('utf-8') + "|"
text_row += str(row['id_competencia']).encode('utf-8') + "|"
text_row += str(row['id_pregunta']).encode('utf-8') + "|"
text_row += str(row['id_tipo_respuesta']).encode('utf-8') + "|"
text_row += str(row['estado']).encode('utf-8') + "|"
text_row += str(row['fecha_modif']).encode('utf-8') + "|"
text_row += str(row['usuario_modif']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "evaluacion_desempeno/rel_competencia" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-gto")
try:
deleteQuery = "DELETE FROM `contento-bi.gestion_humana.E_D_rel_competencia` WHERE 1 = 1 "
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = e_d_rel_competencia_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-gto')
blob = bucket.blob("evaluacion_desempeno/rel_competencia" + ".csv")
# Eliminar el archivo en la variable
# blob.delete()
return "Cargue exitoso Tabla de competencia " + flowAnswer
################################### HISTORICO EVALUACIONES ##############################
@gto_api.route("/historico_eva")
def historico_eva():
dateini = request.args.get('dateini')
dateend = request.args.get('dateend')
if dateini is None:
dateini = ""
else:
dateini = dateini
if dateend is None:
dateend = ""
else:
dateend = dateend
client = bigquery.Client()
QUERY = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Historico_evaluacion"')
query_job = client.query(QUERY)
rows = query_job.result()
data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
# Nos conectamos a la BD y obtenemos los registros
# if dateini == "":
# conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
# conn.execute_query('SELECT id_evaluacion_historico,observaciones,documento_evaluador,id_cargo_evaluador,documento_evaluado,id_cargo_evaluado,id_centrocosto,fecha_cierre,fecha_evaluacion FROM ' + tabla_bd + ' WHERE CONVERT(DATE, fecha_evaluacion) = CONVERT(DATE,GETDATE())')
# cloud_storage_rows = ""
if dateini == "":
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_evaluacion_historico,observaciones,documento_evaluador,id_cargo_evaluador,documento_evaluado,id_cargo_evaluado,id_centrocosto,fecha_cierre,fecha_evaluacion FROM ' + tabla_bd )
cloud_storage_rows = ""
else:
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_evaluacion_historico,observaciones,documento_evaluador,id_cargo_evaluador,documento_evaluado,id_cargo_evaluado,id_centrocosto,fecha_cierre,fecha_evaluacion FROM ' + tabla_bd + ' WHERE CONVERT(DATE, fecha_evaluacion)' ' between ' + "'" + dateini + "'" +" and " + "'" + dateend + "'" )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_evaluacion_historico']).encode('utf-8') + "|"
text_row += str(row['observaciones']).encode('utf-8').replace('\n','') + "|"
text_row += str(row['documento_evaluador']).encode('utf-8') + "|"
text_row += str(row['id_cargo_evaluador']).encode('utf-8') + "|"
text_row += str(row['documento_evaluado']).encode('utf-8') + "|"
text_row += str(row['id_cargo_evaluado']).encode('utf-8') + "|"
text_row += str(row['id_centrocosto']).encode('utf-8') + "|"
text_row += str(row['fecha_cierre']).encode('utf-8') + "|"
text_row += str(row['fecha_evaluacion']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "evaluacion_desempeno/historico_eva" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-gto")
try:
if dateini == "":
# deleteQuery = 'DELETE FROM `contento-bi.gestion_humana.E_D_historico_eva` WHERE CAST(SUBSTR(fecha_evaluacion,0,10) AS DATE) = CURRENT_DATE()'
deleteQuery = 'DELETE FROM `contento-bi.gestion_humana.E_D_historico_eva` WHERE 1=1'
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
else:
deleteQuery2 = 'DELETE FROM `contento-bi.gestion_humana.E_D_historico_eva` WHERE CAST(SUBSTR(fecha_evaluacion,0,10) AS DATE) between ' + "'" + dateini + "'" +" and " + "'" + dateend + "'"
client = bigquery.Client()
query_job = client.query(deleteQuery2)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = e_d_historico_evaluacion_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-gto')
blob = bucket.blob("evaluacion_desempeno/historico_eva" + ".csv")
# Eliminar el archivo en la variable
# blob.delete()
# return jsonify(flowAnswer), 200
return "tabla historico de evaluacion cargada" + flowAnswer
############################### REL USUARIOS #############################
@gto_api.route("/rel_usuarios")
def rel_usuarios():
client = bigquery.Client()
QUERY1 = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Rel_usuarios"')
query_job = client.query(QUERY1)
rows = query_job.result()
# data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
# HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_Relusuario,documento,id_Centrocosto,id_cargo,id_perfil,estado,fecha_modf,usuario_modif FROM ' + tabla_bd )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_Relusuario']).encode('utf-8') + "|"
text_row += str(row['documento']).encode('utf-8') + "|"
text_row += str(row['id_Centrocosto']).encode('utf-8') + "|"
text_row += str(row['id_cargo']).encode('utf-8') + "|"
text_row += str(row['id_perfil']).encode('utf-8') + "|"
text_row += str(row['estado']).encode('utf-8') + "|"
text_row += str(row['fecha_modf']).encode('utf-8') + "|"
text_row += str(row['usuario_modif']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "evaluacion_desempeno/rel_usuarios" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-gto")
try:
deleteQuery = "DELETE FROM `contento-bi.gestion_humana.E_D_rel_usuarios` WHERE 1 = 1 "
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = e_d_rel_usuarios_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-gto')
blob = bucket.blob("evaluacion_desempeno/rel_usuarios" + ".csv")
# Eliminar el archivo en la variable
# blob.delete()
return "Cargue exitoso Tabla de rel ususarios " + flowAnswer
############################### TB USUSARIOS ###############################
@gto_api.route("/tb_usuarios")
def tb_usuarios():
client = bigquery.Client()
QUERY1 = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Usuarios"')
query_job = client.query(QUERY1)
rows = query_job.result()
# data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
# HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT documento,nombre_usuario,da,estado,fecha_modf,usuario_modif,fecha_ingreso FROM ' + tabla_bd )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['documento']).encode('utf-8') + "|"
text_row += str(row['nombre_usuario']).encode('utf-8') + "|"
text_row += str(row['da']).encode('utf-8') + "|"
text_row += str(row['estado']).encode('utf-8') + "|"
text_row += str(row['fecha_modf']).encode('utf-8') + "|"
text_row += str(row['usuario_modif']).encode('utf-8') + "|"
text_row += str(row['fecha_ingreso']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "evaluacion_desempeno/tb_usuarios" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-gto")
try:
deleteQuery = "DELETE FROM `contento-bi.gestion_humana.E_D_tb_usuarios` WHERE 1 = 1 "
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = e_d_tb_usuarios_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-gto')
blob = bucket.blob("evaluacion_desempeno/tb_usuarios" + ".csv")
# Eliminar el archivo en la variable
# blob.delete()
return "Cargue exitoso Tabla de tb ususarios " + flowAnswer
################################# TB CENTRO DE COSTOS ################################
@gto_api.route("/centro_costos")
def centro_costos():
client = bigquery.Client()
QUERY1 = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Centros_de_costos"')
query_job = client.query(QUERY1)
rows = query_job.result()
# data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
# HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_Centrocosto,nombre_centro,id_uen,sede,estado,fecha_modif,usuario_modif FROM ' + tabla_bd )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_Centrocosto']).encode('utf-8') + "|"
text_row += str(row['nombre_centro']).encode('utf-8') + "|"
text_row += str(row['id_uen']).encode('utf-8') + "|"
text_row += str(row['sede']).encode('utf-8') + "|"
text_row += str(row['estado']).encode('utf-8') + "|"
text_row += str(row['fecha_modif']).encode('utf-8') + "|"
text_row += str(row['usuario_modif']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "evaluacion_desempeno/centro_costos" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-gto")
try:
deleteQuery = "DELETE FROM `contento-bi.gestion_humana.E_D_tb_centro_costos` WHERE 1 = 1 "
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = e_d_centro_costos_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-gto')
blob = bucket.blob("evaluacion_desempeno/centro_costos" + ".csv")
# Eliminar el archivo en la variable
# blob.delete()
return "Cargue exitoso Tabla de tb centro costos " + flowAnswer
############################## TB UEN #############################
@gto_api.route("/uen")
def uen():
client = bigquery.Client()
QUERY1 = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "uen"')
query_job = client.query(QUERY1)
rows = query_job.result()
# data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
# HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_uen,nombre_uen,estado,fecha_modif,user_modif FROM ' + tabla_bd )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_uen']).encode('utf-8') + "|"
text_row += str(row['nombre_uen']).encode('utf-8') + "|"
text_row += str(row['estado']).encode('utf-8') + "|"
text_row += str(row['fecha_modif']).encode('utf-8') + "|"
text_row += str(row['user_modif']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "evaluacion_desempeno/uen" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-gto")
try:
deleteQuery = "DELETE FROM `contento-bi.gestion_humana.E_D_uen` WHERE 1 = 1 "
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = e_d_uen_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-gto')
blob = bucket.blob("evaluacion_desempeno/uen" + ".csv")
# Eliminar el archivo en la variable
# blob.delete()
return "Cargue exitoso Tabla de uen " + flowAnswer
################################# TB CARGO ##############################
@gto_api.route("/cargos")
def cargos():
client = bigquery.Client()
QUERY1 = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Cargos"')
query_job = client.query(QUERY1)
rows = query_job.result()
# data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
# HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_cargo,nombre_cargo,estado,fecha_modif,usuario_modif FROM ' + tabla_bd )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_cargo']).encode('utf-8') + "|"
text_row += str(row['nombre_cargo']).encode('utf-8') + "|"
text_row += str(row['estado']).encode('utf-8') + "|"
text_row += str(row['fecha_modif']).encode('utf-8') + "|"
text_row += str(row['usuario_modif']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "evaluacion_desempeno/cargos" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-gto")
try:
deleteQuery = "DELETE FROM `contento-bi.gestion_humana.E_D_cargos` WHERE cast(id_cargo as int64) > 0 "
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = e_d_cargos_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-gto')
blob = bucket.blob("evaluacion_desempeno/cargos" + ".csv")
# Eliminar el archivo en la variable
# blob.delete()
return "Cargue exitoso Tabla de cargos " + flowAnswer
####################################### PERFIL ####################################
@gto_api.route("/perfil")
def perfil():
client = bigquery.Client()
QUERY1 = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Perfil"')
query_job = client.query(QUERY1)
rows = query_job.result()
# data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
# HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_perfil,nombre_perfil,estado,fecha_modif,usuario_modif,id_perfil_padre FROM ' + tabla_bd )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_perfil']).encode('utf-8') + "|"
text_row += str(row['nombre_perfil']).encode('utf-8') + "|"
text_row += str(row['estado']).encode('utf-8') + "|"
text_row += str(row['fecha_modif']).encode('utf-8') + "|"
text_row += str(row['usuario_modif']).encode('utf-8') + "|"
text_row += str(row['id_perfil_padre']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "evaluacion_desempeno/perfil" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-gto")
try:
deleteQuery = "DELETE FROM `contento-bi.gestion_humana.E_D_perfil` WHERE id_perfil <> "" "
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = e_d_perfiles_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-gto')
blob = bucket.blob("evaluacion_desempeno/perfil" + ".csv")
# Eliminar el archivo en la variable
# blob.delete()
return "Cargue exitoso Tabla de perfiles " + flowAnswer
############################## BASE ##################################
@gto_api.route("/base")
def base():
client = bigquery.Client()
QUERY1 = ('select Tabla,servidor,usuario,contrasena,data_base,tabla_bd from `contento-bi.Contento.Usuario_conexion_bases` where Tabla = "Base"')
query_job = client.query(QUERY1)
rows = query_job.result()
# data = ""
for row in rows:
servidor = row.servidor
usuario = row.usuario
contrasena = row.contrasena
data_base = row.data_base
tabla_bd = row.tabla_bd
reload(sys)
sys.setdefaultencoding('utf8')
# HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=servidor, user=usuario, password=contrasena, database=data_base)
conn.execute_query('SELECT id_cargue,documento,nombre_usuario,id_uen,id_Centrocosto,id_cargo,ciudad,fecha_ingreso,id_perfil,Marca FROM ' + tabla_bd )
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['id_cargue']).encode('utf-8') + "|"
text_row += str(row['documento']).encode('utf-8') + "|"
text_row += str(row['nombre_usuario']).encode('utf-8') + "|"
text_row += str(row['id_uen']).encode('utf-8') + "|"
text_row += str(row['id_Centrocosto']).encode('utf-8') + "|"
text_row += str(row['id_cargo']).encode('utf-8') + "|"
text_row += str(row['ciudad']).encode('utf-8') + "|"
text_row += str(row['fecha_ingreso']).encode('utf-8') + "|"
text_row += str(row['id_perfil']).encode('utf-8') + "|"
text_row += str(row['Marca']).encode('utf-8')
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "evaluacion_desempeno/base" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-gto")
try:
deleteQuery = "DELETE FROM `contento-bi.gestion_humana.E_D_base` WHERE id_cargue > 0 "
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = e_d_base_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-gto')
blob = bucket.blob("evaluacion_desempeno/base" + ".csv")
# Eliminar el archivo en la variable
# blob.delete()
return "Cargue exitoso Tabla base " + flowAnswer
|
# import logging
#
# # create a file handler
# handler = logging.FileHandler('hello.log')
# # handler.setLevel(logging.INFO)
#
# # create a logging format
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# handler.setFormatter(formatter)
#
# logger = logging.getLogger(__name__)
# logger.setLevel(logging.INFO)
# logger.addHandler(handler)
#
# logger.info('aaa')
# logger.debug('hehe')
from osresmorning import mylog
mylog.set_config({
'level':'DEBUG'
})
logger = mylog.get_log(__name__)
logger.debug('debug')
logger.info('info')
|
import Queue
def update_queue(current_time, tasks, queue):
# This ugly implementation is to solve the problem raised below
new_queue = Queue.Queue()
while tasks and tasks[0].arrive_time <= current_time:
new_queue.put(tasks[0])
tasks = tasks[1:]
while not queue.empty():
new_queue.put(queue.get())
return tasks, new_queue
def RR_scheduling(tasks, time_quantum):
result_schedule = []
process_list_len = len(tasks)
queue = Queue.Queue()
current_time = 0
waiting_time = 0
current_process = -1
previous_process = -1
for process in tasks:
waiting_time -= process.burst_time + process.arrive_time
while not queue.empty() or tasks:
tasks, queue = update_queue(current_time, tasks, queue)
if queue.empty():
current_time = tasks[0].arrive_time
continue
current_process = queue.get()
if previous_process != current_process:
result_schedule.append((current_time, current_process.id))
if current_process.burst_time > time_quantum:
current_process.burst_time -= time_quantum
current_time += time_quantum
# There is a problem here:
# If a new process arrives,
# it should have a priority to all existing processes or not?
# In this version, the answer is 'yes'.
tasks, queue = update_queue(current_time, tasks, queue)
queue.put(current_process)
else:
current_time += current_process.burst_time
waiting_time += current_time
previous_process = current_process
average_waiting_time = waiting_time/float(process_list_len)
return result_schedule, average_waiting_time
|
#!/usr/bin/python3
from dns import reversename, resolver
import sys
import argparse
import csv
import socket
import xlrd
from xlutils.copy import copy
file_name_input = ""
file_name_output = ""
rnd_nw = "10.12." # default network pattern
firstrow = 1 # default second row
dnscolumn = 1 # default second column
parser = argparse.ArgumentParser(description='This script read the first column of the given excel sheet and write back the reverse DNS to the second column')
parser.add_argument('i', metavar='inputfile', action="store", help="input excel file name")
parser.add_argument('-o', metavar='', action="store", help="output excel file name")
parser.add_argument('--nw', metavar='', action="store", help="network pattern to filter IP")
parser.add_argument('--first_row', metavar='', action="store", help="first row where we start to process the IPs", type=int)
parser.add_argument('--dns_column', metavar='', action="store", help="dns column where we put the DNS names", type=int)
if parser.parse_args().i:
file_name_input = parser.parse_args().i
if parser.parse_args().o:
file_name_output = parser.parse_args().o
else:
file_name_output = file_name_input
if parser.parse_args().nw:
rnd_nw = parser.parse_args().nw
if parser.parse_args().first_row:
firstrow = parser.parse_args().first_row
if parser.parse_args().dns_column:
dnscolumn = parser.parse_args().dns_column
def reversedns(cella):
revdns = reversename.from_address(cella)
try:
return str(resolver.query(revdns,"PTR")[0])
except resolver.NXDOMAIN as e:
return "NXDOMAIN"
def validateip(ipaddr):
try:
socket.inet_aton(ipaddr)
except socket.error:
return False
return True
xbook1 = xlrd.open_workbook(file_name_input)
xsheet1 = xbook1.sheet_by_index(0)
nrows = xsheet1.nrows
xbook2 = copy(xbook1)
xsheet2 = xbook2.get_sheet(0)
dname = "reversednsfailed"
for row_idx in range(firstrow, nrows):
celly = xsheet1.cell_value(row_idx, 0)
if validateip(celly):
if celly[:len(rnd_nw)] == rnd_nw:
dname = reversedns(celly)
xsheet2.write(row_idx, dnscolumn, dname)
xbook2.save(file_name_output)
|
Python 3.7.4 (tags/v3.7.4:e09359112e, Jul 8 2019, 19:29:22) [MSC v.1916 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> #alistirma3
>>>
>>> n=1000
>>> x=0
>>> f=1
>>> for i in range(2,n):
f=f*i
x=x+(1/f)
e=2+x
>>> e
2.718281828459045
>>>
>>> #n nin 0 ve 1 için değerlerini direk 2 olarak hesaplanıp 2 ve sonrası degerleri için kodlama yapıldı.
|
import os
import time
import cv2
import albumentations as A
from albumentations.pytorch import ToTensorV2
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torch.cuda import amp
import torchvision.transforms as T
from model import SegmentationModel, save_model
from util import time_to_string, prepare_image, get_data, pil_loader, np_loader, add_weight_decay
class FolderDataset(Dataset):
def __init__(self, data, labels, transforms=None, load_in_ram=False):
self.transforms = T.Compose([T.ToTensor()])
if transforms:
self.transforms = transforms
self.data = data
self.labels = labels
self.load_in_ram = load_in_ram
self.length = len(self.data)
def __len__(self):
return self.length
def __getitem__(self, idx):
if self.load_in_ram:
img = self.data[idx]
else:
img = np_loader(self.data[idx])
img = self.transforms(image=img)["image"] / 255.0
label = self.labels[idx]
mask = torch.empty(img.shape[1], img.shape[2], dtype=torch.long).fill_(label)
target = torch.tensor((label), dtype=torch.long)
return img, mask, target
if __name__ == "__main__":
MANUAL_SEED = 42
np.random.seed(MANUAL_SEED)
torch.manual_seed(MANUAL_SEED)
torch.cuda.manual_seed(MANUAL_SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
save_epochs = True # Write a single copy that gets updated every epoch, like a checkpoint that gets overwritten each epoch
graph_metrics = True
view_results = True
checkpoint_epoch = 20 # epoch save interval
use_amp = True
# Model Config
in_channels = 3
filters = 16
activation = "silu" # relu, leaky_relu, silu, mish
# Training Hyperparameters
input_size = 128
num_epochs = 100
# Dataloader parameters
batch_size = 256
shuffle = True
num_workers = 6
drop_last = False
pin_memory = True
# Optimization
optim_type = 'adamw' # sgd 1e-5, adam 4e-4, adamw 4e-4
base_lr = 4e-4
momentum = 0.98
nesterov = True
weight_decay = 1e-5 # 0, 1e-5, 3e-5, *1e-4, 3e-4, *5e-4, 3e-4, 1e-3, 1e-2
scheduler_type = 'plateau' # step, plateau, exp
lr_milestones = [150, 180] # for step
lr_gamma = 0.8
plateau_patience = 20
use_classifer_grad = True # Uses the classifer gradients to update the encoder
class_multi = 100.0
cutmix_beta = 0.5
# Dataset parameters
data_root = "data/memes256"
validation_split = 0.06 # percent used for validation as a decimal
load_in_ram = False # can speed up small datasets <2000 images, num_workers=0
set_mean = [0.527, 0.487, 0.459]
set_std = [0.247, 0.236, 0.245]
train_transforms = A.Compose([
# Resizing
A.RandomResizedCrop(input_size, input_size, scale=(0.2, 1.0), ratio=(3./4., 4./3.), interpolation=cv2.INTER_LINEAR),
# Spatial transforms
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.1),
A.Rotate(limit=20, p=0.5),
# A.RandomRotate90(p=0.25),
# A.OneOf([
# # A.GridDistortion(num_steps=5, distort_limit=0.03, always_apply=True),
# A.IAAPerspective(scale=(0.05, 0.1), keep_size=True, always_apply=True),
# A.IAAAffine(shear=(-15, 15), always_apply=True),
# ], p=1.0),
# Color transforms
A.OneOf([
A.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1, always_apply=True),
# A.Posterize(num_bits=[4,6], always_apply=True),
A.RGBShift(r_shift_limit=25, g_shift_limit=25, b_shift_limit=25, always_apply=True),
A.RandomGamma(gamma_limit=(50, 150), always_apply=True),
], p=0.5),
# Blurring and sharpening
A.OneOf([
A.IAASharpen(alpha=(0.2, 0.5), lightness=(0.5, 1.0), always_apply=True),
A.GaussianBlur(blur_limit=(1, 5), always_apply=True),
# A.GlassBlur(sigma=0.7, max_delta=4, iterations=1, always_apply=True, mode='fast'),
A.ISONoise(color_shift=(0.01, 0.05), intensity=(0.1, 0.5), always_apply=True),
# A.MedianBlur(blur_limit=5, always_apply=True),
# A.MotionBlur(blur_limit=3, always_apply=True),
A.RandomShadow(shadow_roi=(0, 0, 1, 1), num_shadows_lower=1, num_shadows_upper=2, shadow_dimension=3, always_apply=True),
A.GaussNoise(var_limit=5.0, mean=0, p=1.0),
], p=1.0),
A.ToGray(p=0.25),
ToTensorV2(),
])
val_transforms = A.Compose([
A.SmallestMaxSize(input_size),
A.CenterCrop(input_size, input_size),
ToTensorV2(),
])
""" Prepare the data """
# Load the images from the data_root folder
print(" * Loading data from {}...".format(data_root))
t0 = time.time()
path_list, label_list, num_to_cat = get_data(data_root)
num_cat = len(num_to_cat)
print("Number of categories :", num_cat)
print("Categories :", num_to_cat)
print(f"{len(path_list)} Images")
print(" * Creating datasets...")
print("Spliting with {:.0f}% for validation.".format(100*validation_split))
x_train, x_val, y_train, y_val = train_test_split(path_list, label_list, test_size=validation_split, random_state=MANUAL_SEED , stratify=label_list)
if load_in_ram:
x_train = [pil_loader(path) for path in x_train]
x_val = [pil_loader(path) for path in x_val]
train_dataset = FolderDataset(data=x_train, labels=y_train, transforms=train_transforms, load_in_ram=load_in_ram)
print("Train length :", len(train_dataset))
val_dataset = FolderDataset(data=x_val, labels=y_val, transforms=val_transforms, load_in_ram=load_in_ram)
print("Validation length :", len(val_dataset))
duration = time.time() - t0
rate = 'a lot of' if duration==0 else "{:.1f}".format(len(path_list)/duration)
print("Found {} images in {}. {} images per second.".format(len(path_list), time_to_string(duration), rate))
print(" * Creating dataloaders...")
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size ,shuffle=shuffle,
num_workers=num_workers, drop_last=drop_last, persistent_workers=(True if num_workers > 0 else False),
pin_memory=pin_memory)
val_loader = DataLoader(dataset=val_dataset, batch_size=batch_size ,shuffle=False,
num_workers=num_workers, drop_last=drop_last, persistent_workers=(True if num_workers > 0 else False),
pin_memory=pin_memory)
""" Setup the model, optimizer, and scheduler """
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device :', device)
print(" * Creating model...")
model = SegmentationModel(in_channels, num_cat, filters, activation, set_mean, set_std,
num_to_cat=num_to_cat, input_size=input_size).to(device)
if weight_decay != 0:
params = add_weight_decay(model, weight_decay)
else:
params = model.parameters()
# Setup optimizer
print(" * Creating optimizer...")
if optim_type == 'sgd':
optimizer = optim.SGD(params, lr=base_lr, momentum=momentum, nesterov=nesterov, weight_decay=weight_decay)
elif optim_type == 'adam':
optimizer = optim.Adam(params, lr=base_lr)
elif optim_type == 'adamw':
optimizer = optim.AdamW(params, lr=base_lr, weight_decay=weight_decay)
# Setup scheduler
print(" * Creating scheduler...")
if scheduler_type == 'step':
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=lr_milestones, gamma=lr_gamma)
elif scheduler_type == 'plateau':
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=lr_gamma, patience=plateau_patience, verbose=True)
elif scheduler_type == 'exp':
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_gamma)
else:
scheduler = None
# Setup run data
base_save = f"runs/save"
save_path = lambda r, t : base_save + '/run{:05d}_{}.pth'.format(r, t)
if not os.path.exists(base_save):
os.makedirs(base_save)
current_run = 0
with open('runs/LASTRUN.txt') as f:
current_run = int(f.read()) + 1
with open('runs/LASTRUN.txt', 'w') as f:
f.write("%s\n" % current_run)
print("current run :", current_run)
scalar = amp.GradScaler()
""" Training """
print(" * Start training...")
criterion = nn.CrossEntropyLoss()
run_stats = []
t1 = time.time()
iterations = 0
model.train()
try:
for epoch in range(num_epochs):
t2 = time.time()
train_seg_loss_total = 0.0
train_cla_loss_total = 0.0
train_seg_correct_total = 0.0
train_seg_seen_total = 0.0
train_cla_correct_total = 0.0
train_cla_seen_total = 0.0
for batch_idx, (data, true_masks, true_labels) in enumerate(train_loader):
data = data.to(device)
true_masks = true_masks.to(device)
true_labels = true_labels.to(device)
optimizer.zero_grad()
if cutmix_beta > 0:
cutmix_beta = 1.0
lam = np.random.beta(cutmix_beta, cutmix_beta)
rand_index = torch.randperm(data.size()[0]).to(data.device)
target_a = true_labels
target_b = true_labels[rand_index]
# Now the bboxes for the input and mask
_, _, w, h = data.size()
cut_rat = np.sqrt(1. - lam)
# Box size
cut_w, cut_h = int(w*cut_rat), int(h*cut_rat)
# Box center
cx, cy = np.random.randint(w), np.random.randint(h)
bbx1 = np.clip(cx - cut_w // 2, 0, w)
bbx2 = np.clip(cx + cut_w // 2, 0, w)
bby1 = np.clip(cy - cut_h // 2, 0, h)
bby2 = np.clip(cy + cut_h // 2, 0, h)
data[:, :, bbx1:bbx2, bby1:bby2] = data[rand_index, :, bbx1:bbx2, bby1:bby2]
true_masks[:, bbx1:bbx2, bby1:bby2] = true_masks[rand_index, bbx1:bbx2, bby1:bby2]
# Adjust the classification loss based on pixel area ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (w*h))
# The data and labels are now cutmixed
# if use_amp:
with amp.autocast(enabled=use_amp):
logits, encoding = model(data)
seg_loss = criterion(logits, true_masks)
encoding = encoding if use_classifer_grad else encoding.detach()
class_logits = model.classifier(encoding)
if cutmix_beta > 0:
class_loss = criterion(class_logits, target_a)*lam + criterion(class_logits, target_b)*(1.0-lam)
else:
class_loss = criterion(class_logits, true_labels)
loss = seg_loss+class_multi*class_loss
# else:
# logits, encoding = model(data)
# seg_loss = criterion(logits, true_masks)
# encoding = encoding if use_classifer_grad else encoding.detach()
# class_logits = model.classifier(encoding)
# if cutmix_beta > 0:
# class_loss = criterion(class_logits, target_a)*lam + criterion(class_logits, target_b)*(1.0-lam)
# else:
# class_loss = criterion(class_logits, true_labels)
# loss = seg_loss+class_multi*class_loss
if use_amp:
with amp.autocast():
scalar.scale(loss).backward()
scalar.step(optimizer)
scalar.update()
else:
loss.backward()
optimizer.step()
iterations += 1
# Update running metrics
train_seg_loss_total += seg_loss
train_cla_loss_total += class_loss
_, tags = torch.max(logits, dim=1)
train_seg_correct_total += (tags == true_masks).sum()
train_seg_seen_total += true_masks.numel()
_, tags = torch.max(class_logits, dim=1)
train_cla_correct_total += (tags == true_labels).sum()
train_cla_seen_total += true_labels.numel()
model.eval()
val_seg_loss_total = 0.0
val_cla_loss_total = 0.0
val_seg_correct_total = 0.0
val_seg_seen_total = 0.0
val_cla_correct_total = 0.0
val_cla_seen_total = 0.0
with torch.set_grad_enabled(False):
for batch_idx, (data, true_masks, true_labels) in enumerate(val_loader):
data = data.to(device)
true_masks = true_masks.to(device)
true_labels = true_labels.to(device)
logits, encoding = model(data)
class_logits = model.classifier(encoding)
loss = criterion(logits, true_masks)
class_loss = criterion(class_logits, true_labels)
# Update running metrics
val_seg_loss_total += loss
val_cla_loss_total += class_loss
_, tags = torch.max(logits, dim=1)
val_seg_correct_total += (tags == true_masks).sum()
val_seg_seen_total += true_masks.numel()
_, tags = torch.max(class_logits, dim=1)
val_cla_correct_total += (tags == true_labels).sum()
val_cla_seen_total += true_labels.numel()
# EPOCH TRAIN AND VALIDATE
train_loss = (train_seg_loss_total+train_cla_loss_total)/len(train_loader)
train_seg_loss = train_seg_loss_total/len(train_loader)
train_cla_loss = train_cla_loss_total/len(train_loader)
train_seg_acc = 100*train_seg_correct_total/train_seg_seen_total
train_cla_acc = 100*train_cla_correct_total/train_cla_seen_total
val_loss = (val_seg_loss_total+val_cla_loss_total)/len(val_loader)
val_seg_loss = val_seg_loss_total/len(val_loader)
val_cla_loss = val_cla_loss_total/len(val_loader)
val_seg_acc = 100*val_seg_correct_total/val_seg_seen_total
val_cla_acc = 100*val_cla_correct_total/val_cla_seen_total
epoch_metrics = {
"epoch": epoch+1,
"iterations": iterations,
"elapsed_time": time.time()-t1,
"lr": optimizer.param_groups[0]['lr'],
"train_loss": train_loss.item(),
"train_seg_loss": train_seg_loss.item(),
"train_cla_loss": train_cla_loss.item(),
"train_seg_acc": train_seg_acc.item(),
"train_cla_acc": train_cla_acc.item(),
"val_loss": val_loss.item(),
"val_seg_loss": val_seg_loss.item(),
"val_cla_loss": val_cla_loss.item(),
"val_seg_acc": val_seg_acc.item(),
"val_cla_acc": val_cla_acc.item(),
}
run_stats.append(epoch_metrics)
duration = time.time()-t2
remaining = duration*(num_epochs-epoch-1)
print("epoch {}. {}. loss={:.3f}/{:.3f}. s={:.1f}/{:.1f}. c={:.1f}/{:.1f}. lr={:2e}. elapsed={}. remaining={}.".format(epoch+1, time_to_string(duration), train_loss, val_loss, train_seg_acc, val_seg_acc, train_cla_acc, val_cla_acc, optimizer.param_groups[0]['lr'], time_to_string(time.time()-t1), time_to_string(remaining)))
if scheduler:
if type(scheduler) == optim.lr_scheduler.ReduceLROnPlateau:
scheduler.step(train_loss)
elif type(scheduler) in [optim.lr_scheduler.MultiStepLR, optim.lr_scheduler.ExponentialLR]:
scheduler.step()
if save_epochs:
save_model(model, save_path(current_run, "latest"))
if (epoch+1) % checkpoint_epoch == 0:
save_model(model, save_path(current_run, f"check{epoch+1}"))
except KeyboardInterrupt:
print("KeyboardInterrupt Exit")
pass
print('Finished Training. Duration={}. {} iterations'.format(time_to_string(time.time()-t1), iterations))
""" Training Stage 1 - Classification """
save_model(model, save_path(current_run, "final"))
print("saving to: {}".format(save_path(current_run, "final")))
# Graph Metrics
line_colors = [
'#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'
]
fig, ax = plt.subplots(2, 2, figsize=(10,8)) # w, h
font_size = 14
train_steps = len(run_stats)
# loss, top 0
ax[0][0].set_xlabel('epochs', fontsize=font_size)
ax[0][0].set_ylabel('loss', fontsize=font_size)
ax[0][0].set_yscale('log')
ax[0][0].tick_params(axis='y')
train_loss_list = ["train_seg_loss", "train_cla_loss"]
val_loss_list = ["val_seg_loss", "val_cla_loss"]
for i in range(len(train_loss_list)):
ax[0][0].plot(range(1, train_steps+1), [x[train_loss_list[i]] for x in run_stats], color=line_colors[i], label=train_loss_list[i])
ax[0][0].plot(range(1, train_steps+1), [x[val_loss_list[i]] for x in run_stats], color=line_colors[i], label=val_loss_list[i], linestyle='dashed')
ax[0][0].legend()
# acc, top 1
ax[0][1].set_xlabel('epochs', fontsize=font_size)
ax[0][1].set_ylabel('accuracy', fontsize=font_size)
# ax[0][1].set_yscale('log')
ax[0][1].tick_params(axis='y')
train_acc_list = ["train_seg_acc", "train_cla_acc"]
val_acc_list = ["val_seg_acc", "val_cla_acc"]
for i in range(len(train_acc_list)):
ax[0][1].plot(range(1, train_steps+1), [x[train_acc_list[i]] for x in run_stats], color=line_colors[i], label=train_acc_list[i])
ax[0][1].plot(range(1, train_steps+1), [x[val_acc_list[i]] for x in run_stats], color=line_colors[i], label=val_acc_list[i], linestyle='dashed')
ax[0][1].legend()
# lr, bot 0
ax[1][0].set_xlabel('epochs', fontsize=font_size)
ax[1][0].set_ylabel('lr', fontsize=font_size)
ax[1][0].set_yscale('log')
ax[1][0].tick_params(axis='y')
ax[1][0].plot(range(1, train_steps+1), [x["lr"] for x in run_stats], color=line_colors[0], label="lr")
# coef, bot 1
ax[1][1].set_xlabel('epochs', fontsize=font_size)
ax[1][1].set_ylabel('loss', fontsize=font_size)
ax[1][1].tick_params(axis='y')
ax[1][1].plot(range(1, train_steps+1), [x["train_loss"] for x in run_stats], color=line_colors[0], label="train_loss")
ax[1][1].plot(range(1, train_steps+1), [x["val_loss"] for x in run_stats], color=line_colors[0], label="val_loss", linestyle='dashed')
ax[1][1].legend()
fig.tight_layout() # otherwise the right y-label is slightly clipped
fig.savefig("{}/run{:05d}_metrics.png".format(base_save, current_run), bbox_inches='tight')
if graph_metrics:
plt.show()
# View validation predictions
rows = 4
scale = 2.2
fig, ax = plt.subplots(rows, 1+model.out_channels, figsize=(scale*(1+model.out_channels), scale*rows))
idxs = np.random.choice(range(len(x_val)), rows, replace=False)
for i in range(rows):
img = pil_loader(x_val[idxs[i]])
width, height = img.size[:2]
scale = input_size/width
img = img.resize((int(width*scale), int(height*scale)))
img = prepare_image(img, model.input_size, square=True)
y = y_val[idxs[i]]
ymask, yclass = model.predict(img.to(device))
yprob, yhat = torch.max(yclass, dim=1)
ax[i][0].imshow(img.squeeze().numpy().transpose((1,2,0)))
color = "g" if int(y)==int(yhat) else "r"
title_str = "{} ({:.02f} % {})".format(num_to_cat[int(y)], float(100*yprob), num_to_cat[int(yhat)])
ax[i][0].set_title(title_str, color=color)
for j in range(model.out_channels):
ax[i][1+j].imshow(ymask.detach().cpu().numpy().squeeze().transpose((1,2,0))[:,:,j])
ax[i][1+j].set_title("{}".format(num_to_cat[int(j)]))
fig.tight_layout()
fig.savefig("{}/run{:05d}_samples.png".format(base_save, current_run), bbox_inches='tight')
if view_results:
plt.show()
|
import torch
import torch.nn as nn
import torchvision.models as models
class MyModelA(nn.Module):
def __init__(self):
super(MyModelA, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 6, 5, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(6, 16, 5, 1),
nn.ReLU(),
nn.MaxPool2d(2))
self.fc = nn.Linear(16*4*4, 10)
def forward(self, x):
x = self.features(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
class MyModelB(nn.Module):
def __init__(self):
super(MyModelB, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 8, 5, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(8, 12, 5, 1),
nn.ReLU(),
nn.MaxPool2d(2))
self.fc = nn.Sequential(
nn.Linear(12*4*4, 50),
nn.ReLU(),
nn.Linear(50, 10))
def forward(self, x):
x = self.features(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
class MyDeepModel(nn.Module):
def __init__(self):
super(MyDeepModel, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 5, 3, 1),
nn.ReLU(),
nn.Conv2d(5, 8, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(8, 12, 3, 1),
nn.ReLU(),
nn.Conv2d(12, 16, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2))
self.fc = nn.Sequential(
nn.Linear(16*4*4, 50),
nn.ReLU(),
nn.Linear(50, 10))
def forward(self, x):
x = self.features(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
class GrayResnet18(models.resnet.ResNet):
def __init__(self):
super(GrayResnet18, self).__init__(models.resnet.BasicBlock, [2, 2, 2, 2])
self.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
self.fc = nn.Linear(in_features=512, out_features=10, bias=True)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
|
# Generated by Django 3.0.7 on 2020-08-20 22:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('qualification', '0003_staffdocument_docname'),
]
operations = [
migrations.AddField(
model_name='maindocument',
name='documents',
field=models.IntegerField(default=0),
),
]
|
import asyncio
from azure.eventhub.aio import EventHubProducerClient
from azure.eventhub import EventData
async def run():
# Connection string for namespace and name for event hub
VAR_CONN_STR = "Connection string for namespace"
VAR_EVENTHUB_NAME = "Name for event hub"
# Create a producer client to send messages to the event hub.
# Specify a connection string to your event hubs namespace and
# the event hub name.
producer = EventHubProducerClient.from_connection_string(conn_str=VAR_CONN_STR, eventhub_name=VAR_EVENTHUB_NAME)
async with producer:
# Create a batch.
event_data_batch = await producer.create_batch()
# Add events to the batch.
event_data_batch.add(EventData('First event '))
event_data_batch.add(EventData('Second event'))
event_data_batch.add(EventData('Third event'))
# Send the batch of events to the event hub.AioHttpTransport
await producer.send_batch(event_data_batch)
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
|
# -*- coding: utf-8 -*-
"""
We are going to use a simple form with an edit action to edit a comment.
Monkeypatch i18n
>>> import zope.i18n
>>> import zope.i18n.config
>>> old_1, old_2 = zope.i18n.negotiate, zope.i18n.config.ALLOWED_LANGUAGES
>>> zope.i18n.negotiate = lambda context: 'en'
>>> zope.i18n.config.ALLOWED_LANGUAGES = ['en']
Let's grok our example:
>>> from zeam.form.ztk.testing import grok
>>> grok('zeam.form.ztk.ftests.forms.editform_fixture')
Let's add a comment and try to edit it with our form:
>>> from zeam.form.ztk.ftests.forms.editform_fixture import Comment
>>> root = getRootFolder()
>>> root['comment'] = Comment('zeam.form', 'Is great')
>>> root['comment'].title
'zeam.form'
>>> root['comment'].comment
'Is great'
>>> root['comment'].name
''
>>> from zope.testbrowser.wsgi import Browser
>>> browser = Browser()
>>> browser.handleErrors = False
Now acccess the edit form:
>>> browser.open('http://localhost/comment/edit')
>>> browser.addHeader('Authorization', 'Basic mgr:mgrpw')
>>> 'Modify your comment' in browser.contents
True
>>> title_field = browser.getControl('Title')
>>> title_field
<Control name='form.field.title' type='text'>
>>> title_field.value
'zeam.form'
>>> comment_field = browser.getControl('Comment')
>>> comment_field
<Control name='form.field.comment' type='textarea'>
>>> comment_field.value
'Is great'
>>> name_field = browser.getControl('Name')
>>> name_field
<Control name='form.field.name' type='text'>
>>> name_field.value
''
We can now edit the content, and so it get modified:
>>> title_field.value = 'zeam.form.ztk'
>>> comment_field.value = 'Is far cooler than not ztk'
>>> name_field.value = 'Arthur de la contee d`or'
>>> change_button = browser.getControl('Change')
>>> change_button
<SubmitControl name='form.action.change' type='submit'>
>>> change_button.click()
>>> 'Modification saved' in browser.contents
True
Modifications are saved, we have the new value if we reload the page:
>>> browser.open('http://localhost/comment/edit')
>>> title_field = browser.getControl('Title')
>>> title_field.value
'zeam.form.ztk'
>>> comment_field = browser.getControl('Comment')
>>> comment_field.value
'Is far cooler than not ztk'
>>> name_field = browser.getControl('Name')
>>> name_field.value
'Arthur de la contee d`or'
We can remove name, it will work as it is not required:
>>> name_field.value = ''
>>> change_button = browser.getControl('Change')
>>> change_button.click()
>>> 'Modification saved' in browser.contents
True
And name is gone:
>>> browser.open('http://localhost/comment/edit')
>>> title_field = browser.getControl('Title')
>>> title_field.value
'zeam.form.ztk'
>>> comment_field = browser.getControl('Comment')
>>> comment_field.value
'Is far cooler than not ztk'
>>> name_field = browser.getControl('Name')
>>> name_field.value
''
However comment is required:
>>> comment_field.value = ''
>>> change_button = browser.getControl('Change')
>>> change_button.click()
>>> 'Modification saved' in browser.contents
False
>>> 'There were errors' in browser.contents
True
So no changes happened:
>>> browser.open('http://localhost/comment/edit')
>>> title_field = browser.getControl('Title')
>>> title_field.value
'zeam.form.ztk'
>>> comment_field = browser.getControl('Comment')
>>> comment_field.value
'Is far cooler than not ztk'
>>> name_field = browser.getControl('Name')
>>> name_field.value
''
"""
|
import PIL.Image
import math
class MapHelper(object):
@staticmethod
def new_image(width, height, alpha=False):
"""
Generates a new image using PIL.Image module
returns PIL.IMAGE OBJECT
"""
if alpha is True:
return PIL.Image.new('RGBA', (width, height), (0, 0, 0, 0))
else:
return PIL.Image.new('RGBA', (width, height))
@staticmethod
def fast_round(value, precision):
"""
Function to round values instead of using python's
return INT
"""
return int(value * 10 ** precision) / 10. ** precision
@staticmethod
def pixels_to_degrees(pixels, zoom):
"""
Generates pixels to be expected at zoom levels
returns INT
"""
return pixels * 2 ** (21-zoom)
@staticmethod
def pixels_to_meters(latitude, zoom):
"""
Function generates how many pixels per meter it
should be from the projecction
returns FLOAT
"""
# https://groups.google.com/forum/#!topic/google-maps-js-api-v3/hDRO4oHVSeM
return 2 ** zoom / (156543.03392 * math.cos(math.radians(latitude)))
|
class Employee:
num_of_emps=0
raise_amt=1.04
def __init__(self,first,last,pay):
self.first=first
self.last=last
self.email=first+'.'+last+'@gmail.com'
self.pay=pay
Employee.num_of_emps += 1
def apply_raise(self):
self.pay=self.pay*self.raise_amt
return self.pay
@classmethod
def set_raise_amt(cls,amount):
cls.raise_amt=amount
@classmethod
def from_string(cls, emp_str):
first,last,pay = emp_str.split('-')
return cls(first,last,pay)
@staticmethod
def is_workday(day):
if day.weekday()==5 or day.weekday()==6:
print('Holiday')
else:
print('workday')
emp_1=Employee('Corey','Schafer',50000)
emp_2=Employee('Tim','Gordon',60000)
print(emp_1.email)
print(emp_2.email)
print(emp_1.apply_raise())
print(Employee.num_of_emps)
emp_str='John-Doe-70000'
emp_3=Employee.from_string(emp_str)
Employee.set_raise_amt(1.05)
print(Employee.raise_amt)
print(emp_3.email)
print(Employee.num_of_emps)
import datetime
mydate=datetime.date(2021,2,22)
Employee.is_workday(mydate)
|
paisA = 80000
paisB = 200000
count = 0
while paisA < paisB:
A = paisA * 0.03
B = paisB * 0.015
paisA = paisA + A
paisB = paisB + B
count = count + 1
print(f'O país A ultrapassa o país B em {count} anos')
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from webservice.redirect import RedirectHandler
from webservice.redirect import RemoteCollectionMatcher
import tornado
class RedirectAppBuilder:
def __init__(self, remote_collection_matcher: RemoteCollectionMatcher):
redirected_collections = remote_collection_matcher.get_remote_collections()
self.redirect_handler = (r'/(.*)', RedirectHandler, {'redirected_collections': redirected_collections})
def build(self, host=None, debug=False):
return tornado.web.Application(
[self.redirect_handler],
default_host=host,
debug=debug
)
|
import requests
IPINFO_URL = 'http://ipinfo.io/{ip}/json'
def get_ip_country(ip_address):
"""Receives ip address string, use IPINFO_URL to get geo data,
parse the json response returning the country code of the IP"""
return requests.get(IPINFO_URL.format(ip = ip_address)).json()['country']
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from pants.backend.docker.target_types import DockerImageInstructionsField, DockerImageSourceField
from pants.engine.fs import CreateDigest, FileContent, Snapshot
from pants.engine.rules import Get, collect_rules, rule
from pants.engine.target import GeneratedSources, GenerateSourcesRequest, InvalidFieldException
from pants.engine.unions import UnionRule
class GenerateDockerfileRequest(GenerateSourcesRequest):
# This will always run codegen when hydrating `docker_image`s, performing source validations but
# does not generate anything if there are no `instructions` defined on the target.
input = DockerImageSourceField
output = DockerImageSourceField
@rule
async def hydrate_dockerfile(request: GenerateDockerfileRequest) -> GeneratedSources:
target = request.protocol_target
address = target.address
instructions = target[DockerImageInstructionsField].value
if instructions and request.protocol_sources.files:
raise InvalidFieldException(
f"The `{target.alias}` {address} provides both a Dockerfile with the `source` field, "
"and Dockerfile contents with the `instructions` field, which is not supported.\n\n"
"To fix, please either set `source=None` or `instructions=None`."
)
if not (instructions or request.protocol_sources.files):
raise InvalidFieldException(
f"The `{target.alias}` {address} does not specify any Dockerfile.\n\n"
"Provide either the filename to a Dockerfile in your workspace as the `source` field "
"value, or the Dockerfile content to the `instructions` field."
)
def dockerfile_path():
name_parts = ["Dockerfile", address.target_name, address.generated_name]
return os.path.join(address.spec_path, ".".join(filter(bool, name_parts)))
output = (
await Get(
Snapshot,
CreateDigest(
(FileContent(dockerfile_path(), "\n".join([*instructions, ""]).encode()),)
),
)
if instructions
else request.protocol_sources
)
return GeneratedSources(output)
def rules():
return (
*collect_rules(),
UnionRule(GenerateSourcesRequest, GenerateDockerfileRequest),
)
|
import sys
import os
f = open("C:/Users/user/Documents/python/atcoder/ABC118/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
n = int(input())
s = list(input())
x = 0
max_x = 0
for i in range(len(s)):
if s[i] == "I":
x += 1
elif s[i] == "D":
x -= 1
max_x = max(max_x,x)
print(max_x)
|
from script.base_api.service_descartes.lessons import *
from script.base_api.service_descartes.examine import *
|
from django.db import models
class Var(models.Model):
"""Persistent app variables."""
name = models.CharField(max_length=255, primary_key=True)
value = models.CharField(max_length=255)
|
from django.shortcuts import render
from django.utils import timezone
from .models import Post
def base(request):
return render(request, 'blog/front.html', {})
def blog_list(request):
posts = Post.objects.get(title="self_Improvement")
return render(request, 'blog/blog_list.html', {'posts': posts})
def blog_list1(request):
posts = Post.objects.get(title="way_To_Computer_Science")
return render(request, 'blog/blog_list1.html', {'posts': posts})
def about(request):
return render(request, 'blog/about.html', {})
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-29 11:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mastermind', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='game',
name='mode',
field=models.CharField(choices=[('initial', 'Ny'), ('open', 'Åben'), ('closed', 'Lukket')], default='initial', max_length=20),
),
]
|
__author__ = "Narwhale"
import string
s = string.ascii_lowercase
def alphabet_position(text):
string_l = []
t = text.split()
for j in t:
t = "".join(t)
for i in t:
if i.lower() in s:
result = s.index(i.lower()) + 1
string_l.append(str(result))
date_str = " ".join(string_l)
return date_str
print(alphabet_position("The sunset sets at twelve o' clock."))
|
def rotateArray(array, n, d):
for i in range(0, d):
x = array[0]
for j in range(0, n-1):
array[j] = array[j+1]
array[n-1]= x
def printarray(array):
for i in range(0, n):
print(array[i])
array= [12,10,5,6,52,36]
n= len(array)
rotateArray(array, n, 2)
printarray(array)
|
'''
fib series: 0 1 1 2 3 5 8 13 21 34 55 89
Ex: 30
21+8+1
Ex: 10
8+2
algo approach:
Ex:30
21
30-21=9
8
9-8=1
1
'''
'''
def nearfib(n): #30,9
a,b=0,1
while n>=b:
if n==b:
return b
a,b=b,a+b
print(a) #21,8,1
return nearfib(n-a) #9,1
n=int(input()) #10
print(nearfib(n))
'''
|
# SPDX-License-Identifier: MIT
# Copyright (c) 2021 scmanjarrez. All rights reserved.
# This work is licensed under the terms of the MIT license.
from contextlib import closing
import sqlite3 as sql
import util as ut
DB = 'paimon.db'
def setup_db():
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.executescript(
"""
CREATE TABLE IF NOT EXISTS users (
uid INTEGER PRIMARY KEY,
resin INTEGER DEFAULT 0,
warn INTEGER DEFAULT 150,
timezone TEXT DEFAULT 'null:null',
strikes INTEGER DEFAULT 0
);
CREATE TABLE IF NOT EXISTS banned (
uid INTEGER PRIMARY KEY
);
"""
)
def get_strikes(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT strikes FROM users '
'WHERE uid = ?',
[uid])
return cur.fetchone()[0] # (x,)
def inc_strikes(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('UPDATE users SET strikes = strikes + 1 '
'WHERE uid = ?',
[uid])
db.commit()
def dec_strikes(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('UPDATE users SET strikes = strikes - 1 '
'WHERE strikes > 0 AND uid = ?',
[uid])
db.commit()
def banned(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute(
'SELECT EXISTS ('
'SELECT 1 FROM banned WHERE uid = ?'
')',
[uid])
return cur.fetchone()[0]
def ban_user(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('INSERT INTO banned VALUES (?)',
[uid])
db.commit()
ut.blocked(uid)
def cached(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute(
'SELECT EXISTS ('
'SELECT 1 FROM users WHERE uid = ?'
')',
[uid])
return cur.fetchone()[0]
def add_user(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('INSERT INTO users (uid) VALUES (?)',
[uid])
db.commit()
def del_user(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('DELETE FROM users '
'WHERE uid = ?',
[uid])
db.commit()
def all_users():
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT uid FROM users')
return cur.fetchall()
def all_users_notify():
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT uid FROM users WHERE notifications = 1')
return cur.fetchall()
def get_resin(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT resin FROM users '
'WHERE uid = ?',
[uid])
return cur.fetchone()[0]
def set_resin(uid, resin):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('UPDATE users SET resin = ? '
'WHERE uid = ?',
[resin, uid])
db.commit()
def inc_resin(uid, resin):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute(
f'UPDATE users SET resin = resin + ? '
f'WHERE resin < {ut.RESIN_MAX} '
f'AND uid = ?',
[resin, uid])
db.commit()
def dec_resin(uid, resin):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute(
'UPDATE users SET resin = resin - ? '
'WHERE resin > 0 '
'AND uid = ?',
[resin, uid])
db.commit()
def max_resin(uid, resin):
cur_resin = get_resin(uid)
hard_cap = (ut.RESIN_MAX - cur_resin) * ut.RESIN_REGEN
soft_cap = (resin - cur_resin) * ut.RESIN_REGEN
soft_cap = 0 if soft_cap < 0 else soft_cap
return (hard_cap // 60, hard_cap % 60), (soft_cap // 60, soft_cap % 60)
def unset_warn(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('UPDATE users SET warn = -1 '
'WHERE uid = ?',
[uid])
db.commit()
def get_warn(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT warn FROM users '
'WHERE uid = ?',
[uid])
return cur.fetchone()[0] # (x,)
def set_warn(uid, threshold):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('UPDATE users SET warn = ? '
'WHERE uid = ?',
[threshold, uid])
db.commit()
def unset_timezone(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('UPDATE users SET timezone = "null:null" '
'WHERE uid = ?',
[uid])
db.commit()
def get_timezone(uid):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('SELECT timezone FROM users '
'WHERE uid = ?',
[uid])
return cur.fetchone()[0] # (x,)
def set_timezone(uid, hour, minutes):
with closing(sql.connect(DB)) as db:
with closing(db.cursor()) as cur:
cur.execute('UPDATE users SET timezone = ? '
'WHERE uid = ?',
[f"{hour:02}:{minutes:02}", uid])
db.commit()
|
# -*- coding: utf-8 -*-
"""
Default model implementations. Custom database or OAuth backends need to
implement these models with fields and and methods to be compatible with the
views in :attr:`provider.views`.
"""
from __future__ import unicode_literals
import os
from django.conf import settings
from django.core.validators import RegexValidator
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from .. import constants, scope
from ..validators import validate_uris
from ..utils import (
now, short_token, long_token, get_code_expiry, get_token_expiry,
serialize_instance, deserialize_instance)
from .managers import AccessTokenManager
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
class ClientStatus:
TEST = 1
LIVE = 2
DISABLED = 3
CHOICES = (
(TEST, 'TEST'),
(LIVE, 'LIVE'),
(DISABLED, 'DISABLED'),
)
class ScopeField(models.IntegerField):
initial = {}
def __init__(self, *args, **kwargs):
kwargs['choices'] = scope.SCOPE_CHOICES
super(ScopeField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
from .forms import ScopeChoiceField
defaults = {'choices_form_class': ScopeChoiceField}
defaults.update(kwargs)
return super(ScopeField, self).formfield(**defaults)
def validate(self, value, model_instance):
# all the bits in value must be present in list of all scopes
return value == (value & scope.to_int(*scope.SCOPE_NAME_DICT.values()))
def __str__(self):
return 'scope'
def client_logo_image_path(instance, filename):
filename_split = os.path.splitext(filename)
ext = filename_split[1]
if not ext:
ext = '.png'
return '/'.join([constants.LOGO_FOLDER, instance.client_id, 'icon' + ext])
@python_2_unicode_compatible
class Client(models.Model):
"""
Default client implementation.
Expected fields:
* :attr:`user`
* :attr:`name`
* :attr:`url`
* :attr:`redirect_url`
* :attr:`client_id`
* :attr:`client_secret`
* :attr:`client_type`
Clients are outlined in the :rfc:`2` and its subsections.
"""
user = models.ForeignKey(
AUTH_USER_MODEL,
related_name='oauth2_client',
blank=True, null=True)
name = models.CharField(
max_length=255,
blank=True)
url = models.URLField(
help_text=_("Your application's URL."))
redirect_uri = models.TextField(
max_length=1028,
help_text=_("Your application's callback URL"),
validators=[validate_uris])
webhook_uri = models.CharField(
max_length=1028,
help_text=_("Your application's webhook URL"),
null=True, blank=True,
validators=[RegexValidator(regex=r'^\S*//\S*$')])
logo = models.ImageField(
upload_to=client_logo_image_path,
null=True, blank=True,
storage=constants.IMAGE_STORAGE,
help_text=_("40x40 pixel logo of your application"))
status = models.PositiveSmallIntegerField(
max_length=2,
choices=ClientStatus.CHOICES,
default=1)
last_updated_date = models.DateTimeField(
auto_now=True)
created_date = models.DateTimeField(
auto_now_add=True)
client_id = models.CharField(
max_length=255,
default=short_token)
client_secret = models.CharField(
max_length=255,
default=long_token)
client_type = models.IntegerField(
choices=constants.CLIENT_TYPES,
default=constants.CONFIDENTIAL)
scope = ScopeField(default=0)
class Meta:
app_label = 'oauth2'
def __str__(self):
return self.redirect_uri
def get_default_token_expiry(self):
public = (self.client_type == 1)
return get_token_expiry(public)
def serialize(self):
return dict(user=serialize_instance(self.user) if self.user else None,
name=self.name,
url=self.url,
redirect_uri=self.redirect_uri,
client_id=self.client_id,
client_secret=self.client_secret,
client_type=self.client_type)
@classmethod
def deserialize(cls, data):
if not data:
return None
kwargs = {}
# extract values that we care about
for field in cls._meta.fields:
name = field.name
val = data.get(field.name, None)
# handle relations
if val and field.rel:
val = deserialize_instance(field.rel.to, val)
kwargs[name] = val
return cls(**kwargs)
@python_2_unicode_compatible
class Grant(models.Model):
"""
Default grant implementation. A grant is a code that can be swapped for an
access token. Grants have a limited lifetime as defined by
:attr:`provider.constants.EXPIRE_CODE_DELTA` and outlined in
:rfc:`4.1.2`
Expected fields:
* :attr:`user`
* :attr:`client` - :class:`Client`
* :attr:`code`
* :attr:`expires` - :attr:`datetime.datetime`
* :attr:`redirect_uri`
* :attr:`scope`
"""
user = models.ForeignKey(
AUTH_USER_MODEL,
blank=True, null=True)
client = models.ForeignKey(
Client)
code = models.CharField(
max_length=255,
default=long_token)
expires = models.DateTimeField(
default=get_code_expiry)
redirect_uri = models.CharField(
max_length=255,
blank=True)
scope = ScopeField(default=0)
created = models.DateTimeField(
auto_now_add=True)
modified = models.DateTimeField(
auto_now=True)
class Meta:
app_label = 'oauth2'
def __str__(self):
return self.code
@python_2_unicode_compatible
class AccessToken(models.Model):
"""
Default access token implementation. An access token is a time limited
token to access a user's resources.
Access tokens are outlined :rfc:`5`.
Expected fields:
* :attr:`user`
* :attr:`token`
* :attr:`client` - :class:`Client`
* :attr:`expires` - :attr:`datetime.datetime`
* :attr:`scope`
Expected methods:
* :meth:`get_expire_delta` - returns an integer representing seconds to
expiry
"""
user = models.ForeignKey(
AUTH_USER_MODEL,
null=True, blank=True)
token = models.CharField(
max_length=255,
default=long_token,
db_index=True)
client = models.ForeignKey(
Client)
expires = models.DateTimeField()
scope = ScopeField(
default=0)
type = models.IntegerField(
default=0)
is_deleted = models.BooleanField(
default=False)
created = models.DateTimeField(
auto_now_add=True)
modified = models.DateTimeField(
auto_now=True)
objects = AccessTokenManager()
class Meta:
app_label = 'oauth2'
def __str__(self):
return self.token
def save(self, *args, **kwargs):
if not self.expires:
self.expires = self.client.get_default_token_expiry()
return super(AccessToken, self).save(*args, **kwargs)
def get_expire_delta(self, reference=None):
"""
Return the number of seconds until this token expires.
"""
if reference is None:
reference = now()
expiration = self.expires
if timezone:
if timezone.is_aware(reference) and timezone.is_naive(expiration):
# MySQL doesn't support timezone for datetime fields
# so we assume that the date was stored in the UTC timezone
expiration = timezone.make_aware(expiration, timezone.utc)
elif timezone.is_naive(reference) and timezone.is_aware(expiration):
reference = timezone.make_aware(reference, timezone.utc)
timedelta = expiration - reference
return timedelta.days*86400 + timedelta.seconds
@python_2_unicode_compatible
class RefreshToken(models.Model):
"""
Default refresh token implementation. A refresh token can be swapped for a
new access token when said token expires.
Expected fields:
* :attr:`user`
* :attr:`token`
* :attr:`access_token` - :class:`AccessToken`
* :attr:`client` - :class:`Client`
* :attr:`expired` - ``boolean``
"""
user = models.ForeignKey(
AUTH_USER_MODEL,
blank=True, null=True)
token = models.CharField(
max_length=255,
default=long_token)
access_token = models.OneToOneField(
AccessToken,
related_name='refresh_token')
client = models.ForeignKey(
Client)
expired = models.BooleanField(
default=False)
created = models.DateTimeField(
auto_now_add=True)
modified = models.DateTimeField(
auto_now=True)
class Meta:
app_label = 'oauth2'
def __str__(self):
return self.token
|
#!/usr/bin/python3
def fizzbuzz():
for numbers in range(1, 101):
if (numbers % 15 == 0):
print("FizzBuzz ", end="")
elif (numbers % 3 == 0):
print("Fizz ", end="")
elif (numbers % 5 == 0):
print("Buzz ", end="")
else:
print("{:d} ".format(numbers), end="")
|
from bird_manager import BirdManager
import numpy as np
birdManager = BirdManager();
filenames, captions = birdManager.get_captions('/data1/BIRD/captions/train')
traindata = np.append(filenames[:, None], captions, axis = 1)
print filenames.shape
print captions.shape
|
""" I'm sorry.
"""
def walk_stack_for(var_name):
import inspect
frame = None
try:
for f in inspect.stack()[1:]:
frame = f[0]
code = frame.f_code
if code.co_varnames[:1] == (var_name, ):
return frame.f_locals[var_name]
elif code.co_varnames[:2] == ("self", var_name,):
return frame.f_locals[var_name]
elif code.co_varnames and code.co_varnames[0] == var_name:
return frame.f_locals[var_name]
finally:
del frame
def current_user():
""" Returns the current authenticated user, or None
"""
curr_request = current_request()
if curr_request:
return curr_request.user
def current_request():
""" Returns the current request, or None
"""
return walk_stack_for('request')
|
'''
Find duplicate glyphs in selected font
'''
from collections import Counter
def find(font_glyphs):
'''Check if there are duplicate glyphs'''
print '***Find Duplicate glyphs in selected font***'
glyphs_count = Counter(font_glyphs)
if len(set(glyphs_count.values())) >= 2:
for glyph in glyphs_count:
if glyphs_count[glyph] >= 2:
print 'ERROR: %s duplicated\n' % glyph
else:
print 'PASS: No duplicate glyphs\n'
if __name__ == '__main__':
font = Glyphs.font
find([g.name for g in font.glyphs])
|
__author__ = 'iceke'
class Util(object):
def a(self):
pass
'''
transform all kinds of time to minute
'''
@staticmethod
def format_time(time_str):
final_time = 0.0
time_array = time_str.split(' ')
unit = time_array[1]
value = float(time_array[0])
if unit == 's':
final_time = round(value/60.0, 2)
elif unit == 'min':
final_time = round(value+0.0, 2)
elif unit == 'h':
final_time = round(value*60.0, 2)
else:
raise ValueError('spark total time\'s unit is illegal')
return final_time
'''
transform all kinds of time to second
'''
@staticmethod
def format_second(time_str):
final_time = 0.0
time_array = time_str.split(' ')
unit = time_array[1]
if time_array[0].find(".") is False:
print time_array[0]
value = int(time_array[0], 10)
value = value * 1.0
else:
value = float(time_array[0])
if unit == 's':
final_time = value
elif unit == 'ms':
final_time = round(value/1000.0, 2)
elif unit == 'min':
final_time = round(value*60.0, 2)
elif unit == 'h':
final_time = round(value*3600.0, 2)
else:
raise ValueError('spark total time\'s unit is illegal')
return final_time
@staticmethod
def format_tasks_percent(percent_str):
final_percent = 0.0
percent_array = percent_str.strip().split('/')
print percent_array[0],percent_array[1]
final_percent = round((float(percent_array[0])/float(percent_array[1]))*100, 1)
return final_percent
|
#!/usr/bin/python
# Copyright 2008-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
import sys
from os import path as osp
pym_path = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym")
sys.path.insert(0, pym_path)
import portage
from portage import os
from portage.output import *
from optparse import OptionGroup, OptionParser
__program__ = "glsa-check"
__author__ = "Marius Mauch <genone@gentoo.org>"
__version__ = "1.0"
def cb_version(*args, **kwargs):
"""Callback for --version"""
sys.stderr.write("\n"+ __program__ + ", version " + __version__ + "\n")
sys.stderr.write("Author: " + __author__ + "\n")
sys.stderr.write("This program is licensed under the GPL, version 2\n\n")
sys.exit(0)
# option parsing
parser = OptionParser(usage="%prog <option> [glsa-list]",
version="%prog "+ __version__)
parser.epilog = "glsa-list can contain an arbitrary number of GLSA ids," \
" filenames containing GLSAs or the special identifiers" \
" 'all', 'new' and 'affected'"
modes = OptionGroup(parser, "Modes")
modes.add_option("-l", "--list", action="store_const",
const="list", dest="mode",
help="List all unapplied GLSA")
modes.add_option("-d", "--dump", action="store_const",
const="dump", dest="mode",
help="Show all information about the given GLSA")
modes.add_option("", "--print", action="store_const",
const="dump", dest="mode",
help="Alias for --dump")
modes.add_option("-t", "--test", action="store_const",
const="test", dest="mode",
help="Test if this system is affected by the given GLSA")
modes.add_option("-p", "--pretend", action="store_const",
const="pretend", dest="mode",
help="Show the necessary commands to apply this GLSA")
modes.add_option("-f", "--fix", action="store_const",
const="fix", dest="mode",
help="Try to auto-apply this GLSA (experimental)")
modes.add_option("-i", "--inject", action="store_const", dest="mode",
help="Inject the given GLSA into the checkfile")
modes.add_option("-m", "--mail", action="store_const",
const="mail", dest="mode",
help="Send a mail with the given GLSAs to the administrator")
parser.add_option_group(modes)
parser.remove_option("--version")
parser.add_option("-V", "--version", action="callback",
callback=cb_version, help="Some information about this tool")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
help="Print more information")
parser.add_option("-n", "--nocolor", action="callback",
callback=lambda *args, **kwargs: nocolor(),
help="Disable colors")
parser.add_option("-e", "--emergelike", action="store_false", dest="least_change",
help="Do not use a least-change algorithm")
parser.add_option("-c", "--cve", action="store_true", dest="list_cve",
help="Show CAN ids in listing mode")
options, params = parser.parse_args()
mode = options.mode
least_change = options.least_change
list_cve = options.list_cve
verbose = options.verbose
# Sanity checking
if mode is None:
sys.stderr.write("No mode given: what should I do?\n")
parser.print_help()
sys.exit(1)
elif mode != "list" and not params:
sys.stderr.write("\nno GLSA given, so we'll do nothing for now. \n")
sys.stderr.write("If you want to run on all GLSA please tell me so \n")
sys.stderr.write("(specify \"all\" as parameter)\n\n")
parser.print_help()
sys.exit(1)
elif mode in ["fix", "inject"] and os.geteuid() != 0:
# we need root privileges for write access
sys.stderr.write("\nThis tool needs root access to "+options.mode+" this GLSA\n\n")
sys.exit(2)
elif mode == "list" and not params:
params.append("new")
# delay this for speed increase
from portage.glsa import *
eroot = portage.settings['EROOT']
vardb = portage.db[eroot]["vartree"].dbapi
portdb = portage.db[eroot]["porttree"].dbapi
# build glsa lists
completelist = get_glsa_list(portage.settings)
checklist = get_applied_glsas(portage.settings)
todolist = [e for e in completelist if e not in checklist]
glsalist = []
if "new" in params:
glsalist = todolist
params.remove("new")
if "all" in params:
glsalist = completelist
params.remove("all")
if "affected" in params:
# replaced completelist with todolist on request of wschlich
for x in todolist:
try:
myglsa = Glsa(x, portage.settings, vardb, portdb)
except (GlsaTypeException, GlsaFormatException) as e:
if verbose:
sys.stderr.write(("invalid GLSA: %s (error message was: %s)\n" % (x, e)))
continue
if myglsa.isVulnerable():
glsalist.append(x)
params.remove("affected")
# remove invalid parameters
for p in params[:]:
if not (p in completelist or os.path.exists(p)):
sys.stderr.write(("(removing %s from parameter list as it isn't a valid GLSA specification)\n" % p))
params.remove(p)
glsalist.extend([g for g in params if g not in glsalist])
def summarylist(myglsalist, fd1=sys.stdout, fd2=sys.stderr):
fd2.write(white("[A]")+" means this GLSA was already applied,\n")
fd2.write(green("[U]")+" means the system is not affected and\n")
fd2.write(red("[N]")+" indicates that the system might be affected.\n\n")
myglsalist.sort()
for myid in myglsalist:
try:
myglsa = Glsa(myid, portage.settings, vardb, portdb)
except (GlsaTypeException, GlsaFormatException) as e:
if verbose:
fd2.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
continue
if myglsa.isApplied():
status = "[A]"
color = white
elif myglsa.isVulnerable():
status = "[N]"
color = red
else:
status = "[U]"
color = green
if verbose:
access = ("[%-8s] " % myglsa.access)
else:
access=""
fd1.write(color(myglsa.nr) + " " + color(status) + " " + color(access) + myglsa.title + " (")
if not verbose:
for pkg in list(myglsa.packages)[:3]:
fd1.write(" " + pkg + " ")
if len(myglsa.packages) > 3:
fd1.write("... ")
else:
for pkg in myglsa.packages:
mylist = vardb.match(pkg)
if len(mylist) > 0:
pkg = color(" ".join(mylist))
fd1.write(" " + pkg + " ")
fd1.write(")")
if list_cve:
fd1.write(" "+(",".join([r[:13] for r in myglsa.references if r[:4] in ["CAN-", "CVE-"]])))
fd1.write("\n")
return 0
if mode == "list":
sys.exit(summarylist(glsalist))
# dump, fix, inject and fix are nearly the same code, only the glsa method call differs
if mode in ["dump", "fix", "inject", "pretend"]:
for myid in glsalist:
try:
myglsa = Glsa(myid, portage.settings, vardb, portdb)
except (GlsaTypeException, GlsaFormatException) as e:
if verbose:
sys.stderr.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
continue
if mode == "dump":
myglsa.dump()
elif mode == "fix":
sys.stdout.write("fixing "+myid+"\n")
mergelist = myglsa.getMergeList(least_change=least_change)
for pkg in mergelist:
sys.stdout.write(">>> merging "+pkg+"\n")
# using emerge for the actual merging as it contains the dependency
# code and we want to be consistent in behaviour. Also this functionality
# will be integrated in emerge later, so it shouldn't hurt much.
emergecmd = "emerge --oneshot " + portage.settings["EMERGE_OPTS"] + " =" + pkg
if verbose:
sys.stderr.write(emergecmd+"\n")
exitcode = os.system(emergecmd)
# system() returns the exitcode in the high byte of a 16bit integer
if exitcode >= 1<<8:
exitcode >>= 8
if exitcode:
sys.exit(exitcode)
myglsa.inject()
elif mode == "pretend":
sys.stdout.write("Checking GLSA "+myid+"\n")
mergelist = myglsa.getMergeList(least_change=least_change)
if mergelist:
sys.stdout.write("The following updates will be performed for this GLSA:\n")
for pkg in mergelist:
oldver = None
for x in vardb.match(portage.cpv_getkey(pkg)):
if vardb._pkg_str(x, None).slot == portdb._pkg_str(pkg, None).slot:
oldver = x
if oldver == None:
raise ValueError("could not find old version for package %s" % pkg)
oldver = oldver[len(portage.cpv_getkey(oldver))+1:]
sys.stdout.write(" " + pkg + " (" + oldver + ")\n")
else:
sys.stdout.write("Nothing to do for this GLSA\n")
elif mode == "inject":
sys.stdout.write("injecting " + myid + "\n")
myglsa.inject()
sys.stdout.write("\n")
sys.exit(0)
# test is a bit different as Glsa.test() produces no output
if mode == "test":
outputlist = []
for myid in glsalist:
try:
myglsa = Glsa(myid, portage.settings, vardb, portdb)
except (GlsaTypeException, GlsaFormatException) as e:
if verbose:
sys.stderr.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
continue
if myglsa.isVulnerable():
outputlist.append(str(myglsa.nr))
if len(outputlist) > 0:
sys.stderr.write("This system is affected by the following GLSAs:\n")
if verbose:
summarylist(outputlist)
else:
sys.stdout.write("\n".join(outputlist)+"\n")
else:
sys.stderr.write("This system is not affected by any of the listed GLSAs\n")
sys.exit(0)
# mail mode as requested by solar
if mode == "mail":
import portage.mail, socket
from io import StringIO
from email.mime.text import MIMEText
# color doesn't make any sense for mail
nocolor()
if "PORTAGE_ELOG_MAILURI" in portage.settings:
myrecipient = portage.settings["PORTAGE_ELOG_MAILURI"].split()[0]
else:
myrecipient = "root@localhost"
if "PORTAGE_ELOG_MAILFROM" in portage.settings:
myfrom = portage.settings["PORTAGE_ELOG_MAILFROM"]
else:
myfrom = "glsa-check"
mysubject = "[glsa-check] Summary for %s" % socket.getfqdn()
# need a file object for summarylist()
myfd = StringIO()
myfd.write("GLSA Summary report for host %s\n" % socket.getfqdn())
myfd.write("(Command was: %s)\n\n" % " ".join(sys.argv))
summarylist(glsalist, fd1=myfd, fd2=myfd)
summary = str(myfd.getvalue())
myfd.close()
myattachments = []
for myid in glsalist:
try:
myglsa = Glsa(myid, portage.settings, vardb, portdb)
except (GlsaTypeException, GlsaFormatException) as e:
if verbose:
sys.stderr.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
continue
myfd = StringIO()
myglsa.dump(outstream=myfd)
myattachments.append(MIMEText(str(myfd.getvalue()), _charset="utf8"))
myfd.close()
mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject, summary, myattachments)
portage.mail.send_mail(portage.settings, mymessage)
sys.exit(0)
# something wrong here, all valid paths are covered with sys.exit()
sys.stderr.write("nothing more to do\n")
sys.exit(2)
|
import DigitalMicrograph as DM
print("Initializing DigitalMicrograph environmnet...")
# the name of the tag is used, this is deleted so it shouldn't matter anyway
file_tag_name = "__python__file__"
# the dm-script to execute, double curly brackets are used because of the
# python format function
script = ("\n".join((
"DocumentWindow win = GetDocumentWindow(0);",
"if(win.WindowIsvalid()){{",
"if(win.WindowIsLinkedToFile()){{",
"TagGroup tg = GetPersistentTagGroup();",
"if(!tg.TagGroupDoesTagExist(\"{tag_name}\")){{",
"number index = tg.TagGroupCreateNewLabeledTag(\"{tag_name}\");",
"tg.TagGroupSetIndexedTagAsString(index, win.WindowGetCurrentFile());",
"}}",
"else{{",
"tg.TagGroupSetTagAsString(\"{tag_name}\", win.WindowGetCurrentFile());",
"}}",
"}}",
"}}"
))).format(tag_name=file_tag_name)
# execute the dm script
DM.ExecuteScriptString(script)
# read from the global tags to get the value to the python script
global_tags = DM.GetPersistentTagGroup()
if global_tags.IsValid():
s, __file__ = global_tags.GetTagAsString(file_tag_name);
if s:
# delete the created tag again
DM.ExecuteScriptString(
"GetPersistentTagGroup()." +
"TagGroupDeleteTagWithLabel(\"{}\");".format(file_tag_name)
)
else:
del __file__
try:
__file__
except NameError:
# set a default if the __file__ could not be received
__file__ = ""
if __file__ != "":
import os
import sys
base_path = str(os.path.dirname(os.path.dirname(__file__)))
if base_path not in sys.path:
sys.path.insert(0, base_path)
import pprint
import traceback
import matplotlib.pyplot as plt
try:
import pylo
view = pylo.CLIView()
configuration = pylo.AbstractConfiguration()
controller = pylo.Controller(view, configuration)
camera = pylo.loader.getDevice("Digital Micrograph Camera", controller)
camera.show_images = True
img = camera.recordImage()
plt.imshow(img.image_data)
pprint.pprint(img.tags)
except Exception as e:
# dm-script error messages are very bad, use this for getting the error text and the
# correct traceback
print("Exception: ", e)
traceback.print_exc()
|
# Submitter: loganw1(Wang, Logan)
import prompt
import goody
# Use these global variables to index the list associated with each name in the dictionary.
# e.g., if men is a dictionary, men['m1'][match] is the woman who matches man 'm1', and
# men['m1'][prefs] is the list of preference for man 'm1'.
# It would seems that this list might be better represented as a named tuple, but the
# preference list it contains is mutated, which is not allowed in a named tuple.
match = 0 # Index 0 of list associate with name is match (str)
prefs = 1 # Index 1 of list associate with name is preferences (list of str)
def read_match_preferences(open_file : open) -> {str:[str,[str]]}:
d = dict()
for text in open_file:
relationships = text.strip().split(sep=';')
l=[]
for i in range(1,len(relationships)):
l.append(relationships[i])
d[relationships[0]] = [None,l]
return d
def dict_as_str(d : {str:[str,[str]]}, key : callable=None, reverse : bool=False) -> str:
l = dict(d)
s = ''
l = sorted(l,key =key,reverse=reverse)
for person in l:
s += " " + person + ' -> ' + str(d[person]) + '\n'
return s
def who_prefer(order : [str], p1 : str, p2 : str) -> str:
for person in order:
if p1 == person or p2 == person:
return person
return None #TODO handle Null Reference
def extract_matches(men : {str:[str,[str]]}) -> {(str,str)}:
return {(man,woman[0])for man,woman in men.items()}
def make_match(men : {str:[str,[str]]}, women : {str:[str,[str]]}, trace : bool = False) -> {(str,str)}:
temp_men = {}
for key,relationship in men.items():
temp_men[key] = [str(relationship[0]),[str(potential_match) for potential_match in relationship[1]]]
temp_women = {}
for key, relationship in women.items():
temp_women[key] = [str(relationship[0]), [str(potential_match) for potential_match in relationship[1]]]
unmatched = {man for man in men.keys()}
if(trace):
print('Women Preferences (unchanging)')
print(dict_as_str(temp_women))
print()
print('Men Preferences (current)')
print(dict_as_str(temp_men))
print()
print('unmatched men = ' + str(unmatched))
while len(unmatched) > 0:
prospect = unmatched.pop()
woman_asked = temp_men[prospect][1].pop(0)
if(trace):
print(prospect + ' proposes to ' + woman_asked +', ', end='')
if temp_women[woman_asked][0] == 'None':
temp_men[prospect][0] = woman_asked
temp_women[woman_asked][0] = prospect
if(trace):
print('who is currently unmatched and accepts the proposal')
else:
if prospect == who_prefer(temp_women[woman_asked][1],prospect,temp_women[woman_asked][0]):
unmatched.add(temp_women[woman_asked][0])
temp_men[prospect][0] = woman_asked
temp_women[woman_asked][0] = prospect
if(trace):
print('who is currently matched and accepts the proposal (likes new match better)')
else:
unmatched.add(prospect)
if (trace):
print('who is currently matched and rejects the proposal (likes current match better)')
if(trace):
print('Men Preferences (current)')
print(dict_as_str(temp_men))
print()
print('unmatched men = ' + str(unmatched))
return extract_matches(temp_men)
if __name__ == '__main__':
# Write script here
men_file = goody.safe_open('Furnish any file name containing the preferences of the men:', 'r', 'Illegal file name',
default='men0.txt')
women_file = goody.safe_open('Furnish any file name containing the preferences of the women:', 'r', 'Illegal file name',
default='women0.txt')
men_dict = read_match_preferences(men_file)
women_dict = read_match_preferences(women_file)
print()
print('Men Preferences')
print(dict_as_str(men_dict))
print('Women Preferences')
print(dict_as_str(women_dict))
user = prompt.for_bool("Furnish Trace of Execution", default=True)
print()
print('matches = ' + str(make_match(men_dict,women_dict,user)))
# For running batch self-tests
print()
import driver
driver.default_file_name = "bsc2.txt"
# driver.default_show_traceback = True
# driver.default_show_exception = True
# driver.default_show_exception_message = True
driver.driver()
|
import sys
FIB = [0,1,1]
def fib(n):
if n < len(FIB):
return FIB[n]
else:
l = len(FIB)
for i in xrange(l,n+1):
FIB.append(FIB[-1]+FIB[-2])
return FIB[-1]
with open(sys.argv[1],'r') as f:
for line in f:
print fib(int(line.strip()))
|
import math
from decimal import *
def golden():
getcontext().prec = 100
gr = Decimal(1+Decimal(math.sqrt(Decimal(5))))/2
return gr
print(golden())
|
import os
import numpy as np
import time
import argparse
# import logging
from mpi4py import MPI
from math import ceil
from random import Random
import networkx as nx
import torch
import torch.distributed as dist
import torch.utils.data.distributed
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.multiprocessing import Process
from torch.autograd import Variable
import torchvision
from torchvision import datasets, transforms
import torch.backends.cudnn as cudnn
import torchvision.models as IMG_models
from models import *
from models import LogisticRegression
# logging.basicConfig(level=logging.INFO)
class Partition(object):
""" Dataset-like object, but only access a subset of it. """
def __init__(self, data, index):
self.data = data
self.index = index
def __len__(self):
return len(self.index)
def __getitem__(self, index):
data_idx = self.index[index]
return self.data[data_idx]
class DataPartitioner(object):
""" Partitions a dataset into different chuncks. """
def __init__(self, data, sizes=[0.7, 0.2, 0.1], seed=1234, isNonIID=False, alpha=0, dataset=None):
self.data = data
self.dataset = dataset
if isNonIID:
self.partitions, self.ratio = self.__getDirichletData__(data, sizes, seed, alpha)
else:
self.partitions = []
self.ratio = sizes
rng = Random()
rng.seed(seed)
data_len = len(data)
indexes = [x for x in range(0, data_len)]
rng.shuffle(indexes)
for frac in sizes:
part_len = int(frac * data_len)
self.partitions.append(indexes[0:part_len])
indexes = indexes[part_len:]
def use(self, partition):
return Partition(self.data, self.partitions[partition])
def __getNonIIDdata__(self, data, sizes, seed, alpha):
labelList = data.train_labels
rng = Random()
rng.seed(seed)
a = [(label, idx) for idx, label in enumerate(labelList)]
# Same Part
labelIdxDict = dict()
for label, idx in a:
labelIdxDict.setdefault(label,[])
labelIdxDict[label].append(idx)
labelNum = len(labelIdxDict)
labelNameList = [key for key in labelIdxDict]
labelIdxPointer = [0] * labelNum
# sizes = number of nodes
partitions = [list() for i in range(len(sizes))]
eachPartitionLen= int(len(labelList)/len(sizes))
# majorLabelNumPerPartition = ceil(labelNum/len(partitions))
majorLabelNumPerPartition = 2
basicLabelRatio = alpha
interval = 1
labelPointer = 0
#basic part
for partPointer in range(len(partitions)):
requiredLabelList = list()
for _ in range(majorLabelNumPerPartition):
requiredLabelList.append(labelPointer)
labelPointer += interval
if labelPointer > labelNum - 1:
labelPointer = interval
interval += 1
for labelIdx in requiredLabelList:
start = labelIdxPointer[labelIdx]
idxIncrement = int(basicLabelRatio*len(labelIdxDict[labelNameList[labelIdx]]))
partitions[partPointer].extend(labelIdxDict[labelNameList[labelIdx]][start:start+ idxIncrement])
labelIdxPointer[labelIdx] += idxIncrement
#random part
remainLabels = list()
for labelIdx in range(labelNum):
remainLabels.extend(labelIdxDict[labelNameList[labelIdx]][labelIdxPointer[labelIdx]:])
rng.shuffle(remainLabels)
for partPointer in range(len(partitions)):
idxIncrement = eachPartitionLen - len(partitions[partPointer])
partitions[partPointer].extend(remainLabels[:idxIncrement])
rng.shuffle(partitions[partPointer])
remainLabels = remainLabels[idxIncrement:]
return partitions
def __getDirichletData__(self, data, psizes, seed, alpha):
n_nets = len(psizes)
K = 10
labelList = np.array(data.train_labels)
min_size = 0
N = len(labelList)
np.random.seed(2020)
net_dataidx_map = {}
while min_size < K:
idx_batch = [[] for _ in range(n_nets)]
# for each class in the dataset
for k in range(K):
idx_k = np.where(labelList == k)[0]
np.random.shuffle(idx_k)
proportions = np.random.dirichlet(np.repeat(alpha, n_nets))
## Balance
proportions = np.array([p*(len(idx_j)<N/n_nets) for p,idx_j in zip(proportions,idx_batch)])
proportions = proportions/proportions.sum()
proportions = (np.cumsum(proportions)*len(idx_k)).astype(int)[:-1]
idx_batch = [idx_j + idx.tolist() for idx_j,idx in zip(idx_batch,np.split(idx_k,proportions))]
min_size = min([len(idx_j) for idx_j in idx_batch])
for j in range(n_nets):
np.random.shuffle(idx_batch[j])
net_dataidx_map[j] = idx_batch[j]
net_cls_counts = {}
for net_i, dataidx in net_dataidx_map.items():
unq, unq_cnt = np.unique(labelList[dataidx], return_counts=True)
tmp = {unq[i]: unq_cnt[i] for i in range(len(unq))}
net_cls_counts[net_i] = tmp
print('Data statistics: %s' % str(net_cls_counts))
local_sizes = []
for i in range(n_nets):
local_sizes.append(len(net_dataidx_map[i]))
local_sizes = np.array(local_sizes)
weights = local_sizes/np.sum(local_sizes)
print(weights)
return idx_batch, weights
def partition_dataset(rank, size, args):
print('==> load train data')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root=args.datapath,
train=True,
download=True,
transform=transform_train)
partition_sizes = [1.0 / size for _ in range(size)]
partition = DataPartitioner(trainset, partition_sizes, isNonIID=args.NIID, alpha=args.alpha)
ratio = partition.ratio
partition = partition.use(rank)
train_loader = torch.utils.data.DataLoader(partition,
batch_size=args.bs,
shuffle=True,
pin_memory=True)
print('==> load test data')
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
testset = torchvision.datasets.CIFAR10(root=args.datapath,
train=False,
download=True,
transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset,
batch_size=64,
shuffle=False,
num_workers=size)
# You can add more datasets here
return train_loader, test_loader, ratio
def select_model(num_class, args):
if args.model == 'VGG':
model = vgg11()
# You can add more models here
return model
def comp_accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class Meter(object):
""" Computes and stores the average, variance, and current value """
def __init__(self, init_dict=None, ptag='Time', stateful=False,
csv_format=True):
"""
:param init_dict: Dictionary to initialize meter values
:param ptag: Print tag used in __str__() to identify meter
:param stateful: Whether to store value history and compute MAD
"""
self.reset()
self.ptag = ptag
self.value_history = None
self.stateful = stateful
if self.stateful:
self.value_history = []
self.csv_format = csv_format
if init_dict is not None:
for key in init_dict:
try:
# TODO: add type checking to init_dict values
self.__dict__[key] = init_dict[key]
except Exception:
print('(Warning) Invalid key {} in init_dict'.format(key))
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.std = 0
self.sqsum = 0
self.mad = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
self.sqsum += (val ** 2) * n
if self.count > 1:
self.std = ((self.sqsum - (self.sum ** 2) / self.count)
/ (self.count - 1)
) ** 0.5
if self.stateful:
self.value_history.append(val)
mad = 0
for v in self.value_history:
mad += abs(v - self.avg)
self.mad = mad / len(self.value_history)
def __str__(self):
if self.csv_format:
if self.stateful:
return str('{dm.val:.3f},{dm.avg:.3f},{dm.mad:.3f}'
.format(dm=self))
else:
return str('{dm.val:.3f},{dm.avg:.3f},{dm.std:.3f}'
.format(dm=self))
else:
if self.stateful:
return str(self.ptag) + \
str(': {dm.val:.3f} ({dm.avg:.3f} +- {dm.mad:.3f})'
.format(dm=self))
else:
return str(self.ptag) + \
str(': {dm.val:.3f} ({dm.avg:.3f} +- {dm.std:.3f})'
.format(dm=self))
|
D = {
1: 5.6,
2: 7.8,
3: 6.6,
4: 8.7,
5: 7.7
}
# i
D[8] = 8.8
print(D)
# ii
D.pop(2)
print(D)
# iii
if 6 in D: print("yes")
else : print("no")
# iv
print(len(D))
# v
sum = 0
for v in D.values(): sum += v
print(sum)
# vi
D[3] = 7.1
print(D)
# vii
D.clear()
print(D)
|
from girder.models.setting import Setting
from girder.plugins.imagespace.settings import ImageSpaceSetting
class GeorgetownSetting(ImageSpaceSetting):
requiredSettings = ('IMAGE_SPACE_GEORGETOWN_DOMAIN_DYNAMICS_SEARCH',)
def validateImageSpaceGeorgetownDomainDynamicsSearch(self, doc):
return doc.rstrip('/')
|
import dataPuller
# import helpers
# this will get the acutal data once the url is figured out
dataPuller.getData(2006, 11, 1, 54)
# helpers.getUrl(2013, 10, 15)
|
def armstrong_num(num):
num_1 = num
res=0
while num != 0:
rem = num % 10
res = res + rem ** 3
num = num // 10
return num_1 == res
input_num = int(input("enter a number"))
if(armstrong_num(input_num)):
print(f"given num {input_num} is armstrong")
else:
print(f"given num {input_num} is not armstrong")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__license__ = ''
__version__ = '1.0.1'
update_permission_element_query = """
UPDATE public.permission AS pmr
SET (module, action, active, updated_on) = (
$2::VARCHAR, $3::VARCHAR, $4::BOOLEAN, now())
WHERE pmr.id::BIGINT = $1
RETURNING *;
"""
|
from clpy.types.string import String
from clpy.space import Space
def test_string():
space = Space()
assert space.hash(String('a')) == space.hash(String('a'))
assert space.eq(String('a'), String('a'))
assert not space.eq(String('a'), String('b'))
|
from flask import Blueprint, render_template, abort, session, request, jsonify, url_for, redirect
from jinja2 import TemplateNotFound
from flaskConfiguration import monDB
from datetime import datetime, date, timedelta
import application.codechefAPI as helper
from flaskConfiguration import monDB
notifications_page = Blueprint('notifications_page', __name__, template_folder='templates')
@notifications_page.before_request
def tokenExpireCheck():
try:
if session['expires_in'] <= datetime.now():
status = helper.refreshAccessToken()
if status is not True:
abort(redirect(url_for('authenticate.logout')))
except:
abort(redirect(url_for('authenticate.logout')))
@notifications_page.route('/notifications')
def userNotifications():
if session.get('username') == None:
return redirect(url_for('login'))
notifications = monDB.suggestions.find({'username': session['username'], 'status' : "1"})
try:
return render_template('notifications.html', notifications=notifications)
except TemplateNotFound:
abort(404)
@notifications_page.route('/notifications/markasread', methods=['POST'])
def markAsRead():
if session.get('username') == None:
return jsonify({'status': 'failure', 'message': 'Something went wrong. Please try again later'})
if request.method == "POST":
problemCode = request.form.get('problemCode')
contestCode = request.form.get('contestCode')
if problemCode == None or contestCode == None:
return jsonify({'status': 'failure', 'message': 'Something went wrong. Please try again later'})
# update criteria
criteria = {
'username': session['username'],
'problemCode': problemCode,
'contestCode': contestCode,
'status': '1'
}
# change status to '0'
monDB.suggestions.update_one(criteria,
{
'$set': {
'status': '0'
}
}
)
notifications_count = monDB.suggestions.find({'status': '1', 'username': session['username']}).count()
session['notifications_count'] = notifications_count
return jsonify({'status': 'success', 'message': 'Marked as read successfully!'})
else:
return jsonify({'status': 'failure', 'message': 'Something went wrong. Please try again later'})
|
from equadratures import *
import numpy as np
import matplotlib.pyplot as plt
VALUE = 15
plt.rcParams.update({'font.size': VALUE})
order = 4
s1 = Parameter(lower=-1, upper=1, order=order, distribution='Uniform')
myBasis = Basis('univariate')
myPoly = Poly(s1, myBasis, method='numerical-integration')
points, weights = myPoly.get_points_and_weights()
def function(x):
return x[0]**7 - 3.0*x[0]**6 + x[0]**5 - 10.0*x[0]**4 +4.0
integral = float( 2 * np.dot(weights , evaluate_model(points, function) ) )
print(integral)
s2 = Parameter(lower=-1, upper=1, order=order, distribution='uniform', endpoints='lower')
s3 = Parameter(lower=-1, upper=1, order=order, distribution='uniform', endpoints='upper')
s4 = Parameter(lower=-1, upper=1, order=order, distribution='uniform', endpoints='both')
myPoly2 = Poly(s2, myBasis, method='numerical-integration')
myPoly3 = Poly(s3, myBasis, method='numerical-integration')
myPoly4 = Poly(s4, myBasis, method='numerical-integration')
zeros = np.zeros((order+1))
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.xlabel('$s$', fontsize=VALUE)
plt.ylabel('Quadrature points', fontsize=VALUE)
plt.plot(myPoly.get_points(), zeros, 'o', c='crimson', lw=4, ms=15)
plt.plot(myPoly2.get_points(), zeros-0.1, '<', c='orange', lw=4, ms=15)
plt.plot(myPoly3.get_points(), zeros+0.1, '>', c='navy', lw=4, ms=15)
plt.plot(myPoly4.get_points(), zeros+0.2, 's', c='limegreen', lw=4, ms=15)
plt.grid()
frame1 = plt.gca()
frame1.axes.yaxis.set_ticklabels([])
plt.savefig('../Figures/tutorial_2_fig_a.png', dpi=200, bbox_inches='tight')
|
from flask import request, Blueprint, json, Response
from ..models.VehicleModel import VehicleModel, VehicleSchema
vehicle_api = Blueprint('vehicle_api', __name__)
vehicle_schema = VehicleSchema()
@vehicle_api.route('/', methods=['POST'])
def create():
req_data = request.get_json()
data, error = vehicle_schema.load(req_data)
if error:
return custom_response(error, 400)
post = VehicleModel(data)
post.save()
data = vehicle_schema.dump(post).data
return custom_response(data, 201)
@vehicle_api.route('/truck_today', methods=['GET'])
def list_trucks_today():
vehicle = VehicleModel.get_trucks_today()
response = vehicle_schema.dump(vehicle, many=True).data
return custom_response(response, 200)
@vehicle_api.route('/truck_week', methods=['GET'])
def list_trucks_week():
vehicle = VehicleModel.get_trucks_week()
response = vehicle_schema.dump(vehicle, many=True).data
return custom_response(response, 200)
@vehicle_api.route('/truck_month', methods=['GET'])
def list_trucks_month():
vehicle = VehicleModel.get_trucks_month()
response = vehicle_schema.dump(vehicle, many=True).data
return custom_response(response, 200)
def custom_response(res, status_code):
return Response(
mimetype="application/json",
response=json.dumps(res),
status=status_code
)
|
x = "there are %d types of people." % 10
binary = "binary"
do_not = "don't"
y = "those who know %d and those who %d." % (binary, do_not)
print(x)
print(y)
print("i said: %r." % x)
print("i also said: '%s'." % y)# 如果删去‘’,则输出这句话中没有引号。但是上一句话仍旧有。%r 和 %s差异导致?
hilarious = False
joke_evaluatoin = "isn't that joke so funny? %r"
print(joke_evaluatoin % hilarious)# 等同于print("isn't that joke so funny? %r" % hilarious)
w = "this is the left side of ..."
e = "a string with a right side."
print (w+e)
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import OpenGL.GL as gl
import OpenGL.GLU as glu
#local imports
from common import COLORS
from screen import Screen
class CheckerBoard:
def __init__(self,
nrows,
check_width = 1.0,
check_height = None,
color1 = COLORS['white'],
color2 = COLORS['black'],
fixation_dot_color = None,
):
self.nrows = int(nrows)
if check_width is None:
check_width = 2.0/nrows #fill whole screen
self.check_width = check_width
if check_height is None:
check_height = check_width
self.check_height = check_height
self.board_width = check_width*nrows
#run colors through filter to catch names and convert to RGB
color1 = COLORS.get(color1, color1)
color2 = COLORS.get(color2, color2)
self.color1 = color1
self.color2 = color2
self.fixation_dot_color = fixation_dot_color
self.display_list_multi = None #for cached rendering of multiple display lists, leaving ability to change color
def render(self):
color1 = self.color1
color2 = self.color2
# create display lists if not yet done
if self.display_list_multi is None:
w = self.check_width
h = self.check_height
board_width = w * self.nrows
board_height = h * self.nrows
# get needed display list ints
if self.fixation_dot_color:
self.num_lists = 3 # include list for fixation dot
else:
self.num_lists = 2
self.display_list_multi = gl.glGenLists(self.num_lists)
# Create a display list for color 1
try:
gl.glNewList(self.display_list_multi, gl.GL_COMPILE)
# render the checkerboard's color1 checks
gl.glDisable(gl.GL_LIGHTING)
for x in range(0, self.nrows):
for y in range(0, self.nrows):
if (x + y) % 2 == 0:
gl.glRectf(w*x, h*y, w*(x + 1), h*(y + 1))
finally:
gl.glEnable(gl.GL_LIGHTING)
# End the display list
gl.glEndList()
# create a display list for color 2
try:
gl.glNewList(self.display_list_multi + 1, gl.GL_COMPILE)
# render the checkerboard's color2 checks
gl.glDisable(gl.GL_LIGHTING)
for x in range(0, self.nrows):
for y in range(0, self.nrows):
if (x + y) % 2 == 1:
gl.glRectf(w*x, h*y, w*(x + 1), h*(y + 1))
finally:
gl.glEnable(gl.GL_LIGHTING)
# End the display list
gl.glEndList()
# create list for fixation dot
if not self.fixation_dot_color is None:
gl.glNewList(self.display_list_multi + 2, gl.GL_COMPILE)
gl.glDisable(gl.GL_LIGHTING)
r, g, b = self.fixation_dot_color
gl.glColor3f(r, g, b)
gl.glTranslatef(board_width / 2.0, board_height / 2.0, 0)
glu.gluDisk(glu.gluNewQuadric(), 0, 0.005, 45, 1)
gl.glEnable(gl.GL_LIGHTING)
gl.glEndList()
self.show_display_lists(color1, color2)
else:
# render display lists
self.show_display_lists(color1, color2)
def show_display_lists(self, color1, color2):
# render the color1 list:
gl.glColor3f(*color1)
gl.glCallList(self.display_list_multi)
# render the colro2 list:
gl.glColor3f(*color2)
gl.glCallList(self.display_list_multi + 1)
# render fixation dot
if not self.fixation_dot_color is None:
gl.glCallList(self.display_list_multi + 2)
def __del__(self):
# __del__ gets called sometimes when render() hasn't yet been run and the OpenGL list doesn't yet exist
try:
gl.glDeleteLists(self.display_list_multi, self.num_lists)
except AttributeError:
pass
class CheckerBoardScreen(Screen):
def setup(self,
nrows,
check_width = None,
check_color1 = 'white',
check_color2 = 'black',
screen_background_color = 'neutral-gray',
fixation_dot_color = None,
pos_x = None,
pos_y = None,
vsync_value = None,
vsync_patch = "bottom-right",
):
Screen.setup(self,
background_color = screen_background_color,
vsync_value = vsync_value,
vsync_patch = vsync_patch,
)
self.CB = CheckerBoard(nrows = nrows,
check_width = check_width,
color1 = check_color1,
color2 = check_color2,
fixation_dot_color = fixation_dot_color
)
if pos_x is None:
pos_x = -0.5*self.CB.board_width
if pos_y is None:
pos_y = -0.5*self.CB.board_width
self.pos_x = pos_x
self.pos_y = pos_y
def render(self):
Screen.render(self)
#move so that board is centered and render
gl.glLoadIdentity()
gl.glTranslatef(self.pos_x,self.pos_y,0.0)
self.CB.render()
################################################################################
# TEST CODE
################################################################################
if __name__ == "__main__":
NROWS = 64
CBS = CheckerBoardScreen.with_pygame_display(
#debug = True
)
CBS.setup(nrows = NROWS,
screen_background_color = "neutral-gray",
vsync_value = 1
)
CBS.run(duration = 2)
|
# -*- coding: utf-8 -*-
import smtplib
mail_server = "smtp.rambler.ru"
mail_server_port = 465
from_addr = 'EMAIL_FROM'
to_addr = 'EMAIL_TO'
from_header = 'From: %s\r\n' % from_addr
to_header = 'To: %s\r\n\r\n' % to_addr
subject_header = 'Subject: Testing SMTP Authentication'
body = 'This mail tests SMTP Authentication'
email_message = '%s\n%s\n%s\n\n%s' % (from_header, to_header, subject_header, body)
s = smtplib.SMTP_SSL(mail_server, mail_server_port)
s.set_debuglevel(1)
s.login('EMAIL', 'PASSWORD')
s.sendmail(from_addr, to_addr, email_message)
s.quit()
|
def test_import():
from instapi import Client
from instapi import ClientCompatPatch
from instapi import (
ClientError,
ClientLoginError,
ClientLoginRequiredError,
ClientCookieExpiredError,
ClientThrottledError,
ClientConnectionError,
ClientCheckpointRequiredError,
ClientChallengeRequiredError,
ClientSentryBlockError,
ClientReqHeadersTooLargeError,
)
from instapi import MediaRatios
from instapi import MediaTypes
from instapi.utils import (
InstagramID,
gen_user_breadcrumb,
max_chunk_size_generator,
max_chunk_count_generator,
get_file_size,
ig_chunk_generator,
)
from instapi.constants import Constants
from instapi.compat import compat_urllib_parse
from instapi.compat import compat_urllib_error
assert True == True
|
import os
import yaml
curdir = os.path.dirname(__file__)
specs_dir = os.path.join(curdir, 'specs')
def load_spec(spec):
return yaml.load(open(os.path.join(specs_dir, spec+'.yaml')))
# Different specs
SPECS = [
load_spec('empty'),
load_spec('local'),
]
# Different configs
CONFIGS = [
dict(allowed_types=['derived/json']),
dict(allowed_types=['derived/csv'])
]
|
# Generated by Django 2.2.5 on 2020-02-24 08:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='students',
name='mobile',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
|
#coding: utf-8
from scrapy.cmdline import execute
import os
import urllib
import requests
if __name__ == '__main__':
project_name = "zyh"
spider_name = "test"
s = "scrapy crawl dmoz"
# s = "scrapy crawl %s -o %s -t json" % (spider_name, results_name)
execute(s.split())
|
from django.contrib import admin
# Register your models here.
from .models import BlogMeta
class BlogMetaAdmin(admin.ModelAdmin):
list_display = ('key', "value")
admin.site.register(BlogMeta, BlogMetaAdmin)
|
class Solution:
def countCharacters(self, words: List[str], chars: str) -> int:
"""
https://leetcode.com/problems/find-words-that-can-be-formed-by-characters/
"""
sum = 0
for w in words:
n = len(w)
i = 0
while i < n:
if w.count(w[i]) > chars.count(w[i]):
break
i += 1
# print(i)
if i == n:
sum += n
return sum
|
"""
https://github.com/microsoft/DeepSpeed
deepspeed/ops/sparse_attention/sparsity_config.py
"""
import json
import random
import torch
MAX_SEQ_LENGTH = 512
def setup_layout(num_heads, max_position, block):
if max_position % block != 0:
raise ValueError(
f"Sequence Length, {max_position}, needs to be dividable by Block size {block}!"
)
num_blocks = max_position // block
layout = torch.zeros((num_heads, num_blocks, num_blocks), dtype=torch.int64)
return layout
def build_dense_pattern(num_heads, max_position, **unused_kwargs):
"""Initialize the Dense Sparsity Pattern Config.
In reality, this is not sparse and all blocks are used. We keep it for the sake of comparison and comprehension.
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
seq_len: required: an integer determining number of attention heads of the layer.
different_layout_per_head: optional: this is just for the sake of consistency with other sparsity formats; can ignore it for DenseSparsityConfig
"""
return torch.ones(num_heads, max_position, max_position)
def build_fixed_pattern(
num_heads,
max_position,
block=16,
num_local_blocks=4,
num_global_blocks=1,
attention="bidirectional",
horizontal_global_attention=False,
num_different_global_patterns=1,
**unused_kwargs,
):
"""Initialize `Fixed` Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
num_local_blocks: optional: an integer determining the number of blocks in local attention window.
num_global_blocks: optional: an integer determining how many consecutive blocks in a local window is used as the representative of the window for global attention.
attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
horizontal_global_attention: optional: a boolean determining if blocks that are global representative of a local window, also attend to all other blocks. This is valid only if attention type is `bidirectional`. Looking at the attention matrix, that means global attention not only includes the vertical blocks, but also horizontal blocks.
num_different_global_patterns: optional: an integer determining number of different global attentions layouts. While global attention can be fixed by which block/s are representative of any local window, since there are multi-heads, each head can use a different global representative. For example, with 4 blocks local window and global attention size of 1 block, we can have 4 different versions in which the first, Second, third, or forth block of each local window can be global representative of that window. This parameter determines how many of such patterns we want. Of course, there is a limitation based on num_local_blocks and num_global_blocks.
"""
if num_local_blocks % num_global_blocks != 0:
raise ValueError(
f"Number of blocks in a local window, {num_local_blocks}, must be dividable by number of global blocks, {num_global_blocks}!"
)
if attention != "unidirectional" and attention != "bidirectional":
raise NotImplementedError(
'only "uni/bi-directional" attentions are supported for now!'
)
if attention != "bidirectional" and horizontal_global_attention:
raise ValueError(
'only "bi-directional" attentions can support horizontal global attention!'
)
if num_different_global_patterns > (num_local_blocks // num_global_blocks):
raise ValueError(
f"Number of layout versions (num_different_global_patterns), {num_different_global_patterns}, cannot be larger than number of local window blocks divided by number of global blocks, {num_local_blocks} / {num_global_blocks} = {num_local_blocks//num_global_blocks}!"
)
def set_local_layout(h, layout):
"""Sets local attantion layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completly set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local layout is set
"""
num_blocks = layout.shape[1]
for i in range(0, num_blocks, num_local_blocks):
end = min(i + num_local_blocks, num_blocks)
for row in range(i, end):
for col in range(
i, (row + 1 if attention == "unidirectional" else end)
):
layout[h, row, col] = 1
return layout
def set_global_layout(h, layout):
"""Sets global attantion layout used by the given head in the sparse attention.
Currently we set global blocks starting from the last block of a local window to the first one. That means if a local window consists of 4 blocks and global attention size is one block, we use block #4 in each local window as global. If we have different layout per head, then other heads will get #3, #2, and #1. And if we have more heads (and different layout has set) than num of global attentions, multiple head may have same global attentions.
Note) if horizontal_global_attention is set, global blocks will be set both horizontally and vertically.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completly set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
"""
num_blocks = layout.shape[1]
first_global_block_idx = (
num_local_blocks
- (1 + h % num_different_global_patterns) * num_global_blocks
)
# set all global blocks except the last one if (in last local window)
end = num_blocks - (num_blocks % num_local_blocks)
for i in range(first_global_block_idx, end, num_local_blocks):
# vertical global attention
first_row = 0 if attention == "bidirectional" else i
# (((i // self.num_local_blocks) + 1) * self.num_local_blocks)
# if (first_row < num_blocks):
layout[h, first_row:, i : i + num_global_blocks] = 1
# horizontal global attention; only in bidirectional attention
if horizontal_global_attention:
layout[h, i : i + num_global_blocks, :] = 1
# set last global blocks; handle possible short last local window
if end < num_blocks:
start = min(end + first_global_block_idx, num_blocks - num_global_blocks)
end = start + num_global_blocks
# vertical global attention
first_row = 0 if attention == "bidirectional" else start
# (((start // self.num_local_blocks) + 1) * self.num_local_blocks)
# if (first_row < num_blocks):
layout[h, first_row:, start:end] = 1
# horizontal global attention
if horizontal_global_attention:
layout[h, start:end, :] = 1
return layout
layout = setup_layout(num_heads, max_position, block)
for h in range(0, num_heads):
layout = set_local_layout(h, layout)
layout = set_global_layout(h, layout)
num_blocks = layout.shape[1]
full_layout = layout.new_zeros(num_heads, num_blocks, block, num_blocks, block)
full_layout[:, :, :, :, :] = layout[:, :, None, :, None]
full_layout = full_layout.reshape(num_heads, max_position, max_position)
return full_layout
def build_longformer_pattern(
num_heads,
max_position,
block=16,
num_sliding_window_blocks=3,
global_block_indices=[0],
global_block_end_indices=None,
**unused_kwargs,
):
"""Initialize the edited `Longformer` Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window.
global_block_indices: optional: a list of integers determining which blocks are considered as global attention. Given indices, determine the blocks that all other token blocks attend to and they attend to all other token blocks. Default value is only index 0. Notice that if global_block_end_indices parameter is set, this parameter is used as starting index of each global window.
global_block_end_indices: optional: a list of integers determining end indices of global window blocks. By default this is not used. But if it is set, it must have the same size of global_block_indices parameter, and combining this two parameters, for each index i, blocks from global_block_indices[i] to global_block_end_indices[i] (exclusive) are considered as global attention.
"""
if global_block_end_indices is not None:
if len(global_block_indices) != len(global_block_end_indices):
raise ValueError(
f"Global block start indices length, {len(global_block_indices)}, must be same as global block end indices length, {len(global_block_end_indices)}!"
)
for _, (start_idx, end_idx) in enumerate(
zip(global_block_indices, global_block_end_indices)
):
if start_idx >= end_idx:
raise ValueError(
f"Global block start index, {start_idx}, must be smaller than global block end index, {end_idx}!"
)
def set_sliding_window_layout(h, layout):
num_blocks = layout.shape[1]
if num_blocks < num_sliding_window_blocks:
raise ValueError(
f"Number of sliding window blocks, {num_sliding_window_blocks}, must be smaller than overal number of blocks in a row, {num_blocks}!"
)
w = num_sliding_window_blocks // 2
for row in range(0, num_blocks):
start = max(0, row - w)
end = min(row + w + 1, num_blocks)
layout[h, row, start:end] = 1
return layout
def set_global_layout(h, layout):
num_blocks = layout.shape[1]
if global_block_end_indices is None:
for idx in global_block_indices:
if idx < num_blocks:
layout[h, idx, :] = 1
layout[h, :, idx] = 1
else:
for _, (start_idx, end_idx) in enumerate(
zip(global_block_indices, global_block_end_indices)
):
if start_idx < num_blocks:
end_idx = min(end_idx, num_blocks)
layout[h, start_idx:end_idx, :] = 1
layout[h, :, start_idx:end_idx] = 1
return layout
layout = setup_layout(num_heads, max_position, block)
for h in range(0, num_heads):
layout = set_sliding_window_layout(h, layout)
layout = set_global_layout(h, layout)
num_blocks = layout.shape[1]
full_layout = layout.new_zeros(num_heads, num_blocks, block, num_blocks, block)
full_layout[:, :, :, :, :] = layout[:, :, None, :, None]
full_layout = full_layout.reshape(num_heads, max_position, max_position)
return full_layout
def build_bigbird_pattern(
num_heads,
max_position,
block=16,
num_random_blocks=1,
num_sliding_window_blocks=3,
num_global_blocks=1,
**unused_kwargs,
):
"""Initialize the BigBird Sparsity Pattern Config.
For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
Arguments:
num_heads: required: an integer determining number of attention heads of the layer.
block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
num_random_blocks: optional: an integer determining the number of random blocks in each block row.
num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window.
num_global_blocks: optional: an integer determining how many consecutive blocks, starting from index 0, are considered as global attention. Global block tokens will be attended by all other block tokens and will attend to all other block tokens as well.
"""
def set_random_layout(h, layout):
"""Sets random attantion layout used by the given head in the sparse attention.
Note) By default, it assumes there will be a unique random block layout for all heads; unless `different_layout_per_head` parameter is set in which each head can have a different random layout.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completly set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which random layout is set
"""
num_blocks = layout.shape[1]
if num_blocks < num_random_blocks:
raise ValueError(
f"Number of random blocks, {num_random_blocks}, must be smaller than overal number of blocks in a row, {num_blocks}!"
)
for row in range(0, num_blocks):
rnd_cols = random.sample(range(0, num_blocks), num_random_blocks)
layout[h, row, rnd_cols] = 1
return layout
def set_sliding_window_layout(h, layout):
"""Sets sliding local attantion layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completly set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local sliding window layout is set
"""
num_blocks = layout.shape[1]
if num_blocks < num_sliding_window_blocks:
raise ValueError(
f"Number of sliding window blocks, {num_sliding_window_blocks}, must be smaller than overal number of blocks in a row, {num_blocks}!"
)
w = num_sliding_window_blocks // 2
for row in range(0, num_blocks):
start = max(0, row - w)
end = min(row + w + 1, num_blocks)
layout[h, row, start:end] = 1
return layout
def set_global_layout_itc(h, layout):
"""Sets global attantion layout used by the given head in the sparse attention.
Arguments:
h: required: an integer determining head index
layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completly set at this step
Return:
layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
"""
num_blocks = layout.shape[1]
if num_blocks < num_global_blocks:
raise ValueError(
f"Number of global blocks, {num_global_blocks}, must be smaller than overal number of blocks in a row, {num_blocks}!"
)
layout[h, 0:num_global_blocks, :] = 1
layout[h, :, 0:num_global_blocks] = 1
return layout
layout = setup_layout(num_heads, max_position, block)
for h in range(0, num_heads):
layout = set_random_layout(h, layout)
layout = set_sliding_window_layout(h, layout)
layout = set_global_layout_itc(h, layout)
num_blocks = layout.shape[1]
full_layout = layout.new_zeros(num_heads, num_blocks, block, num_blocks, block)
full_layout[:, :, :, :, :] = layout[:, :, None, :, None]
full_layout = full_layout.reshape(num_heads, max_position, max_position)
return full_layout
def build_block_structure_random_pattern(
num_heads,
max_position,
block_shape=(64, 128),
pe_array_shape=(32, 32),
different_layout_per_head=False,
**unused_kwargs,
):
if max_position % block_shape[0] != 0 or max_position % block_shape[1] != 0:
raise ValueError(
f"Sequence length, {max_position}, must be dividable by block size, {block_shape}!"
)
if pe_array_shape[0] > block_shape[0] or pe_array_shape[1] * 2 > block_shape[1]:
raise ValueError(
f"PE Array shape, {pe_array_shape}, must be smaller than half block, {(block_shape[0], block_shape[1] // 2)}!"
)
def set_block_layout(h, layout):
# layout: int64 [num_heads, max_pos, max_pos]
unstru_mask = torch.zeros(pe_array_shape[0], pe_array_shape[1] * 2, dtype=torch.int64)
unstru_mask[:, :pe_array_shape[1]] = 1
unstru_mask = unstru_mask[
torch.arange(unstru_mask.shape[0]).unsqueeze(-1),
torch.argsort(torch.rand(*unstru_mask.shape), dim=-1)
]
layout[h, :pe_array_shape[0], :pe_array_shape[1] * 2] = unstru_mask
layout[h, :, :] = layout[
h,
torch.argsort(torch.rand(layout.shape[1])).unsqueeze(-1),
torch.argsort(torch.rand(layout.shape[2])).unsqueeze(0)
]
return layout
layout = setup_layout(num_heads, max_position, block=1)
block_rows, block_cols = max_position//block_shape[0], max_position//block_shape[1]
layout = layout.reshape(num_heads, block_rows, block_cols, *block_shape)
if different_layout_per_head:
for h in range(0, num_heads):
for r in range(block_rows):
for c in range(block_cols):
set_block_layout(h, layout[:, r, c])
else:
for r in range(block_rows):
for c in range(block_cols):
set_block_layout(0, layout[:, r, c])
layout[1:, :, :] = layout[0, :, :]
layout = layout.permute(0, 1, 3, 2, 4).reshape(num_heads, max_position, max_position)
return layout
ATTN_MASK_BUILDERS = {
"DenseSparsityConfig": build_dense_pattern,
"FixedSparsityConfig": build_fixed_pattern,
"BSLongformerSparsityConfig": build_longformer_pattern,
"BigBirdSparsityConfig": build_bigbird_pattern,
"VariableSparsityConfig": None,
"BlockStructuredRandomSparsityConfig": build_block_structure_random_pattern,
}
def build_static_sparsity_mask(
json_file, num_heads, max_position=MAX_SEQ_LENGTH
) -> torch.Tensor:
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
cfg_json: dict = json.loads(text)
cfg_cls_name = cfg_json.pop("class")
print(f"Loaded {cfg_cls_name} from {json_file}:\n{cfg_json}")
sp_attn_builder = ATTN_MASK_BUILDERS.get(cfg_cls_name)
assert (
sp_attn_builder is not None
), f"Cannot find AttnMaskBuilder named {cfg_cls_name}"
sparsity_mask = sp_attn_builder(num_heads, max_position, **cfg_json)
return sparsity_mask
def apply_static_sparsity_mask(sparsity_mask, attention_scores):
# sparsity_mask: [num_attention_heads, max_pos, max_pos] (max_pos >= tgt_len, src_len)
# attention_scores: [batch_size, num_attention_heads, tgt_len, src_len]
sparsity_mask = (1.0 - sparsity_mask) * -10000.0
sparsity_mask = sparsity_mask.type_as(attention_scores)
tgt_len, src_len = attention_scores.shape[-2:]
attention_scores += sparsity_mask[:, :tgt_len, :src_len]
if __name__ == "__main__":
# attn_mask = build_static_sparsity_mask('big_bird_sparsity_config.json', max_position=8)
# print(attn_mask.shape)
# print(attn_mask[0])
# attn_mask = build_static_sparsity_mask('longformer_sparsity_config.json', max_position=8)
# print(attn_mask.shape)
# print(attn_mask[0])
# attn_mask = build_static_sparsity_mask('fixed_sparsity_config.json', max_position=8)
# print(attn_mask.shape)
# print(attn_mask[0])
# # config: "block_shape": [4, 4], "pe_array_shape": [2, 2]
# attn_mask = build_static_sparsity_mask('block_structured_random_sparsity_config.json', max_position=16)
# print(attn_mask.shape)
# print(attn_mask[0])
pass
|
import random
from enum import Enum
class Suit(Enum):
"""
Suit start from 0 to 3
"""
Spades = 0
Hearts = 1
Clubs = 2
Diamonds = 3
class Value(Enum):
"""
Value start from 1 to 13
"""
Ace, Two, Three, Four = 1, 2, 3, 4
Five, Six, Seven, Eight = 5, 6, 7, 8
Nine, Ten, Jack, Queen, King = 9, 10, 11, 12, 13
class Card:
"""
Card with 2 parameters `suit` and `value`
"""
def __init__(self, suit, value):
self.suit = Suit(suit)
self.value = Value(value)
def __lt__(self, other):
return self.value.value < other.value.value
class deck:
def __init__(self, randomDeck):
self.facedown = [Card(i, j) for i in range(0, 4) for j in range(1, 14)]
if randomDeck:
random.shuffle(self.facedown)
self.faceup = []
def shuffleCurrentDeck(self):
random.shuffle(self.facedown)
def drawCardFromTopOfDeck(self):
if len(self.facedown) == 0:
random.shuffle(self.faceup)
self.facedown = self.faceup
self.faceup = []
return self.facedown.pop()
def placeTopCardOfDeckOnDiscardPile(self):
if len(self.facedown) == 0:
random.shuffle(self.faceup)
self.facedown = self.faceup
self.faceup = []
self.faceup.append(self.facedown.pop())
def getTopCardofDiscardPile(self):
return self.faceup[len(self.faceup - 1)]
def placeCardOnDiscardPile(self, card):
self.faceup.append(card)
def isEmpty(self):
if len(self.facedown) == 0:
return True
return False
|
import sys
import os
f = open("C:/Users/user/Documents/python/atcoder/ABC118/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
import math
n = int(input())
def factorize(n):
fct = [] # prime factor
b, e = 2, 0 # base, exponent
while b * b <= n:
while n % b == 0:
n = n // b
e = e + 1
if e > 0:
fct.append((b, e))
b, e = b + 1, 0
if n > 1:
fct.append((n, 1))
return fct
def divisor(fac):
temp = 1
for i in range(len(fac)):
temp *= (fac[i][1] + 1)
return temp
print(divisor(factorize(math.factorial(n))) % 1000000007)
|
# -*- coding: utf-8 -*-
import numpy as np
# from DataProcessor.QASent import QASentdataPreprocess
from DataProcessor.QASentForServer import QASentForServerdataPreprocess
from DataProcessor.WikiQA import WikiQAdataPreprocess
from DataProcessor.insuranceQA import insuranceQAPreprocess
from NeuralModel.IAGRU_WORD import IAGRU_WORD
from NeuralModel.OAGRU import OAGRU, OAGRU_small
from TaskBase import TaskBases
from public_functions import *
__author__ = 'benywon'
insuranceQA = 'insuranceQA'
WikiQA = 'WikiQA'
QASent = 'QASent'
IAGru = 'IAGru'
OAGru = 'OAGru'
OAGru_SMALL = 'OAGru_small'
class AnswerSelection(TaskBases):
def __init__(self, MODEL=IAGru, DATASET=WikiQA, **kwargs):
TaskBases.__init__(self)
if DATASET == insuranceQA:
self.Data = insuranceQAPreprocess(**kwargs)
elif DATASET == WikiQA:
self.Data = WikiQAdataPreprocess(**kwargs)
else:
self.Data = QASentForServerdataPreprocess(**kwargs)
if MODEL == IAGru:
self.Model = IAGRU_WORD(data=self.Data, **kwargs)
elif MODEL == OAGru_SMALL:
self.Model = OAGRU_small(data=self.Data, **kwargs)
else:
self.Model = OAGRU(data=self.Data, **kwargs)
@TaskBases.Train
def Train(self):
MAP, MRR = self.Test()
append_name = self.Data.dataset_name + '_MAP_' + str(MAP) + '_MRR_' + str(MRR)
self.Model.save_model(append_name)
def Test(self):
print '\nstart testing...'
final_result_MAP = []
final_result_MRR = []
for one_pack in self.Data.TEST:
batch_result = []
for one in one_pack:
out = self.Model.test_function(one[0], one[1])
batch_result.append([out, one[2]])
batch_result.sort(key=lambda x: x[0], reverse=True)
result = 0.
right_index = 0.
first_one_position = -1.0
for i, value in enumerate(batch_result):
if value[1] == 1:
if first_one_position == -1.0:
first_one_position = (i + 1)
right_index += 1
result += right_index / (i + 1)
final_result_MAP.append(result / right_index)
final_result_MRR.append(1.0 / first_one_position)
MAP = np.mean(np.asarray(final_result_MAP))
MRR = np.mean(np.asarray(final_result_MRR))
print 'final-result-MAP:' + str(MAP)
print 'final-result-MRR:' + str(MRR)
return MAP, MRR
def testACC(self):
pass
def output_softmax(self,number=0,path=None):
print 'start output softmax'
self.Model.load_model(path)
length = len(self.Data.TRAIN[0])
pool_list = list()
tran = lambda x: '_'.join(map(str, x.tolist()))
softmax_pool = []
for i in xrange(length):
question = self.Data.TRAIN[0][i]
pool_list.append(tran(question))
samples = []
answer_yes = self.Data.TRAIN[1][i]
answer_no = self.Data.TRAIN[2][i]
if tran(answer_yes) not in pool_list:
samples.append(answer_yes)
if tran(answer_no) not in pool_list:
samples.append(answer_no)
for sample in samples:
pool_list.append(tran(sample))
softmax = self.Model.test_function(question, sample)
softmax_pool.append(softmax)
dump_file(obj=softmax_pool, filepath=str(number)+'softmax_result_i.pickle')
def getO(self):
fileList=get_dir_files('/home/benywon/PycharmProjects/bAbi/model/IAGRU/test')
for i,filename in enumerate(fileList):
self.output_softmax(number=i,path=filename)
if __name__ == '__main__':
c = AnswerSelection(optmizer='adadelta', MODEL=IAGru, DATASET=WikiQA, batch_training=False, sampling=2,
reload=False,
output_softmax=False,
Margin=0.12,
use_the_last_hidden_variable=False, use_clean=True, epochs=50, Max_length=50,
N_hidden=150)
c.Train()
|
def validate_ean(code):
checksum = int(code[-1])
total = 0
for i, a in enumerate(code[:-1]):
number = int(a)
if i % 2 != 0:
number *= 3
total += number
total = 0 if total % 10 == 0 else 10 - (total % 10)
return total == checksum
|
import random
import numpy as np
import tensorflow as tf
import scipy.sparse as sp
from .base_sequence import Sequence
class MiniBatchSequence(Sequence):
def __init__(
self,
x,
y,
out_weight=None,
shuffle=False,
batch_size=1,
*args, **kwargs
):
super().__init__(*args, **kwargs)
assert batch_size == 1
self.n_batches = len(x)
self.shuffle = shuffle
self.indices = list(range(self.n_batches))
self.batch_size = batch_size
self.x, self.y, self.out_weight = self.astensors(x, y, out_weight)
def __len__(self):
return self.n_batches
def __getitem__(self, index):
idx = self.indices[index]
return self.x[idx], self.y[idx], self.out_weight[idx]
def on_epoch_end(self):
if self.shuffle:
self._shuffle_batches()
def _shuffle_batches(self):
"""
Shuffle all nodes at the end of each epoch
"""
random.shuffle(self.indices)
class SAGEMiniBatchSequence(Sequence):
def __init__(
self,
x,
y=None,
out_weight=None,
sizes=[5, 5],
shuffle=False,
batch_size=512,
*args, **kwargs
):
super().__init__(*args, **kwargs)
self.node_attr, self.adj_matrix, self.batch_nodes = x
self.y = y
self.n_batches = int(np.ceil(len(self.batch_nodes) / batch_size))
self.shuffle = shuffle
self.batch_size = batch_size
self.indices = np.arange(len(self.batch_nodes))
self.sizes = sizes
self.node_attr = self.astensor(self.node_attr)
def __len__(self):
return self.n_batches
def __getitem__(self, index):
if self.shuffle:
idx = self.indices[index *
self.batch_size:(index + 1) * self.batch_size]
else:
idx = slice(index * self.batch_size, (index + 1) * self.batch_size)
nodes_input = [self.batch_nodes[idx]]
for num_sample in self.sizes:
neighbors = sample_neighbors(
self.adj_matrix, nodes_input[-1], num_sample).ravel()
nodes_input.append(neighbors)
y = self.y[idx] if self.y is not None else None
return self.astensors([self.node_attr, *nodes_input], y)
def on_epoch_end(self):
pass
def on_epoch_end(self):
if self.shuffle:
self._shuffle_batches()
def _shuffle_batches(self):
"""
Shuffle all nodes at the end of each epoch
"""
random.shuffle(self.indices)
def sample_neighbors(adj_matrix, nodes, num_neighbors):
np.random.shuffle(adj_matrix.T)
return adj_matrix[nodes, :num_neighbors]
class FastGCNBatchSequence(Sequence):
def __init__(
self,
x,
y=None,
shuffle=False,
batch_size=None,
rank=None,
*args, **kwargs
):
super().__init__(*args, **kwargs)
node_attr, adj_matrix = x
self.y = y
self.n_batches = int(
np.ceil(adj_matrix.shape[0] / batch_size)) if batch_size else 1
self.shuffle = shuffle
self.batch_size = batch_size
self.indices = np.arange(adj_matrix.shape[0])
self.rank = rank
if rank:
self.p = column_prop(adj_matrix)
self.node_attr, self.adj_matrix = node_attr, adj_matrix
def __len__(self):
return self.n_batches
def __getitem__(self, index):
if not self.batch_size:
(node_attr, adj_matrix), y = self.full_batch()
else:
(node_attr, adj_matrix), y = self.mini_batch(index)
if self.rank:
p = self.p
rank = self.rank
distr = adj_matrix.sum(0).A1.nonzero()[0]
if rank > distr.size:
q = distr
else:
q = np.random.choice(
distr, rank, replace=False, p=p[distr] / p[distr].sum())
adj_matrix = adj_matrix[:, q].dot(sp.diags(1.0 / (p[q] * rank)))
if tf.is_tensor(node_attr):
node_attr = tf.gather(node_attr, q)
else:
node_attr = node_attr[q]
return self.astensors((node_attr, adj_matrix), y)
def full_batch(self):
return (self.node_attr, self.adj_matrix), self.y
def mini_batch(self, index):
if self.shuffle:
idx = self.indices[index *
self.batch_size:(index + 1) * self.batch_size]
else:
idx = slice(index * self.batch_size, (index + 1) * self.batch_size)
y = self.y[idx]
adj_matrix = self.adj_matrix[idx]
node_attr = self.node_attr
return (node_attr, adj_matrix), y
def on_epoch_end(self):
pass
def on_epoch_end(self):
if self.shuffle:
self._shuffle_batches()
def _shuffle_batches(self):
"""
Shuffle all nodes at the end of each epoch
"""
random.shuffle(self.indices)
def column_prop(adj):
column_norm = sp.linalg.norm(adj, axis=0)
norm_sum = column_norm.sum()
return column_norm / norm_sum
|
#lib import
import sys, os
sys.path.append('..\\..\\')
import derpapi
import pygame
RUN = False
y = 30
c = 0
optpos = {}
optkeys = []
set_stor = [{'name':'Airplane Mode', 'type':'switch', 'default':False}, {'name':'WiFi','type':'custom','default':None}]
#screen init
try:
pygame.init()
except:
pygame.display.init()
screen = pygame.display.set_mode([480, 320], pygame.NOFRAME)
try:
font = pygame.font.Font('..\\..\\font_main.otf', 32)
except:
font = pygame.font.Font('font_main.otf', 32)
#load option sprites
sprites = {}
for i in os.listdir('optionsprites'):
for n in os.listdir('optionsprites\\' + i):
sprites[i + '.' + n.split('.')[0]] = pygame.image.load('optionsprites\\' + i + '\\' + n)
print(sprites)
def cusfunc(cname):
if cname == 'WiFi':
print('NO CONNECTION SRY SCRUB')
def render(opt, custom=False):
global screen, sprites, y, optpos, set_stor, c, font, optkeys
screen.blit(sprites[opt], (10,y))
if custom:
screen.blit(font.render(set_stor[c]['name'], True, (0,0,0)), (25, y + 5))
else:
screen.blit(font.render(set_stor[c]['name'], True, (0,0,0)), (sprites[opt].get_width() + 15, y + 5))
optpos[sprites[opt]] = ((10, y), sprites[opt].get_size())
optkeys.append(sprites[opt])
def main():
global RUN, derpapi, cusfunc, screen, render, y, optpos, set_stor, c, font, optkeys
#place main code here
EXIT = pygame.image.load('exit.png')
settings = derpapi.retrieve()
for i in set_stor:
if not i['name'] in settings.keys():
settings[i['name']] = i['default']
RUN = True
while RUN:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
if derpapi.collision((430, 0), (50, 50), event.pos):
RUN = False
else:
cc = 0
for i in optkeys:
if derpapi.collision(optpos[i][0], optpos[i][1], event.pos):
if set_stor[cc]['type'] == 'switch':
if settings[set_stor[cc]['name']] == True:
settings[set_stor[cc]['name']] = False
else:
settings[set_stor[cc]['name']] = True
elif set_stor[cc]['type'] == 'custom':
cusfunc(set_stor[cc]['name'])
cc += 1
screen.fill((230,230,230))
screen.blit(font.render('SETTINGS', True, (0,0,0)), (10,10))
screen.blit(EXIT, (430, 0))
y = 45
optpos = {}
optkeys = []
c = 0
for i in set_stor:
if i['type'] == 'switch':
if settings[i['name']] == True:
render('switch.on')
else:
render('switch.off')
elif i['type'] == 'custom':
render('custom.custom', custom=True)
y += 42
c += 1
pygame.display.flip()
derpapi.store(data=settings)
main()
pygame.display.quit()
|
import json
from . import utils
from .post_request import post_request
from .config_param import config_param
def upload_sector(filename, sector_name):
"""
Upload sector to the simulator host.
Parameters
----------
filename : str
A string indicating path to sector geojson file on the local machine.
sector_name : str
A string indicating name to store sector under on the simulator host.
Returns
-------
TRUE if successful. Otherwise an exception is thrown.
Examples
--------
>>> pydodo.upload_sector(filename = "~/Documents/test_sector.geojson", sector_name = "test_sector")
"""
utils._validate_string(filename, "filename")
utils._validate_string(sector_name, "sector_name")
with open(filename, "r") as f:
content = json.load(f)
body = {"name": sector_name, "content": content}
return post_request(config_param("endpoint_upload_sector"), body)
|
import numpy as np
from signum import sign
################################################################################
# class Perceptron
################################################################################
class Perceptron(object):
def __init__(self, dim = 0, avg_flag = False):
self.dim = dim + 1 # #dimensions of perceptron + bias
self.w = np.zeros(self.dim) # weights
self.w_avg = np.zeros(self.dim) # averaged weights
self.avg_flag = avg_flag # This flag counts the avg flag
self.cnt = 0 # Counter for number of examples
def get_weight(self):
return self.w
def init_random(self):
self.w = 0.02*np.random.rand(self.dim) - 0.01 # small number between -0.01 and 0.01
self.w_avg = self.w.copy()
def update(self, lr, x, y):
self.w = self.w + lr * y * np.append(x,1)
def update_avg(self):
self.cnt += 1
if(self.avg_flag == True):
alpha = float(1)/self.cnt
self.w_avg = (1-alpha)*self.w_avg + alpha*self.w
def predict(self, x):
if(self.avg_flag == False):
if(x.ndim == 1):
return sign(np.array([self.w.dot(np.append(x,1))]))
else:
return sign(np.append(x,np.ones([len(x), 1]),1).dot(self.w.T))
else:
if(x.ndim == 1):
return sign(np.array([self.w_avg.dot(np.append(x,1))]))
else:
return sign(np.append(x,np.ones([len(x), 1]),1).dot(self.w_avg.T))
def predict_train(self,x):
if(x.ndim == 1):
return sign(np.array([self.w.dot(np.append(x,1))]))
else:
return sign(np.append(x,np.ones([len(x), 1]),1).dot(self.w.T))
|
def crc16(data: str, poly: hex = 0xA001) -> str:
'''
CRC-16 MODBUS HASHING ALGORITHM
'''
crc = 0xFFFF
for byte in data:
crc ^= ord(byte)
for _ in range(8):
crc = ((crc >> 1) ^ poly
if (crc & 0x0001)
else crc >> 1)
hv = hex(crc).upper()[2:]
blueprint = '0000'
return (blueprint if len(hv) == 0 else blueprint[:-len(hv)] + hv)
|
"""
This script implements a pH calibration protocol that involves mixing Phosphoric Acid and water, and then adaptively adding NaOH until the solution reaches as target pH. The protocols includes several cases for expected conditions, such as requiring pH meter re-calibration, failures in Provision steps, failures in the calibration process (i.e. unrecoverably exceeding the target pH), and failures in cleanup activities. The primitives require custom implementations to support calculation of volumes and quantities, as well as defining decision point predicates.
"""
import importlib
import logging
import os
from os.path import basename
from typing import Tuple
# import labop_time as labopt
import rdflib as rdfl
import sbol3
import tyto
from sbol3 import Document
import examples.pH_calibration.ph_calibration_utils as util
import labop
import uml
from labop.execution_engine import ExecutionEngine
logger: logging.Logger = logging.Logger("pH_calibration")
CONT_NS = rdfl.Namespace("https://sift.net/container-ontology/container-ontology#")
OM_NS = rdfl.Namespace("http://www.ontology-of-units-of-measure.org/resource/om-2/")
LIBRARY_NAME = "pH_calibration"
DOCSTRING = "This protocol implements a pH calibration protocol with decision nodes."
def prepare_document() -> Document:
logger.info("Setting up document")
doc = sbol3.Document()
sbol3.set_namespace("https://bbn.com/scratch/")
return doc
def import_labop_libraries() -> None:
logger.info("Importing libraries")
labop.import_library("liquid_handling")
logger.info("... Imported liquid handling")
labop.import_library("plate_handling")
logger.info("... Imported plate handling")
labop.import_library("spectrophotometry")
logger.info("... Imported spectrophotometry")
labop.import_library("sample_arrays")
logger.info("... Imported sample arrays")
def create_protocol() -> labop.Protocol:
logger.info("Creating protocol")
protocol: labop.Protocol = labop.Protocol("pH_calibration_protocol")
protocol.name = "pH calibration protocol"
protocol.description = DOCSTRING
return protocol
def create_subprotocol(doc) -> labop.Protocol:
logger.info("Creating subprotocol")
protocol: labop.Protocol = labop.Protocol("pH_adjustment_protocol")
protocol.name = "pH adjustment protocol"
protocol.description = "pH adjustment protocol"
doc.add(protocol)
############################################################################
# Protocol Input Parameters
############################################################################
(
reaction_vessel,
naoh_container,
measurement_delay,
initial_transfer_amount,
) = util.get_ph_adjustment_protocol_inputs(protocol)
############################################################################
# Define Custom Primitives needed for protocol
############################################################################
(
measure_pH_primitive,
measure_temperature_primitive,
at_target_primitive,
calculate_naoh_addition,
error_message,
) = util.define_pH_adjustment_protocol_primitives(protocol.document, LIBRARY_NAME)
############################################################################
# Protocol Steps
############################################################################
# 7.1 Transfer NaOH into reaction vessel
transfer = protocol.execute_primitive(
"Transfer",
source=naoh_container,
destination=reaction_vessel,
amount=initial_transfer_amount,
)
protocol.order(protocol.initial(), transfer)
# 7.2 Wait X seconds (FIXME, need to implement)
# 7.3 Measure pH
measure_pH = protocol.execute_primitive(
measure_pH_primitive, samples=reaction_vessel
)
# 7.4 Measure Temp
measure_temp = protocol.execute_primitive(
measure_temperature_primitive, samples=reaction_vessel
)
# Delay measurement
# FIXME measurement_delay is an input parameter, but temporal constraints are instantiated at author time, rather than runtime.
# wait_pH = labopt.precedes(transfer, measurement_delay, measure_pH, units=measurement_delay.unit)
protocol.order(transfer, measure_pH)
# wait_temp = labopt.precedes(transfer, measurement_delay.value, measure_temp, units=measurement_delay.unit)
protocol.order(transfer, measure_temp)
join_node = uml.JoinNode()
protocol.nodes.append(join_node)
protocol.order(measure_pH, join_node)
protocol.order(measure_temp, join_node)
# 7.5 At Target?
at_target_decision = protocol.make_decision_node(
join_node,
decision_input_behavior=at_target_primitive,
decision_input_source=measure_pH.output_pin("measurement"),
)
# At Target -> Yes: exit
at_target_decision.add_decision_output(protocol, True, protocol.final())
# At Target -> No: 7.6 calc next, goto Transfer
calculate_naoh_addition_invocation = protocol.execute_primitive(
calculate_naoh_addition,
resource=naoh_container,
temperature=measure_temp.output_pin("measurement"),
pH=measure_pH.output_pin("measurement"),
reaction_vessel=reaction_vessel,
)
# Edge into calculation
at_target_decision.add_decision_output(
protocol, False, calculate_naoh_addition_invocation
)
# Edge out of calculation
protocol.edges.append(
uml.ObjectFlow(
source=calculate_naoh_addition_invocation.output_pin("return"),
target=transfer.input_pin("amount"),
)
)
# At Target -> Exception: No change, overshoot, overflow
at_target_error = protocol.execute_primitive(error_message, message="Exception")
at_target_decision.add_decision_output(protocol, "Exception", at_target_error)
protocol.edges.append(
uml.ControlFlow(source=at_target_error, target=protocol.final())
)
# time_constraints = labopt.TimeConstraints("pH Adjustment Timing",
# constraints=labopt.And([wait_pH, wait_temp]),
# protocols=[protocol])
# doc.add(time_constraints)
return protocol
def create_setup_subprotocol(doc):
logger.info("Creating setup_subprotocol")
protocol: labop.Protocol = labop.Protocol("pH_adjustment_setup_protocol")
protocol.name = "pH adjustment setup protocol"
protocol.description = "pH adjustment setup protocol"
doc.add(protocol)
############################################################################
# Protocol Input Parameters
############################################################################
(reaction_volume) = util.get_setup_protocol_inputs(protocol)
############################################################################
# Protocol Materials
############################################################################
(h3po4, ddh2o, naoh) = util.define_setup_protocol_materials(doc)
############################################################################
# Define Custom Primitives needed for protocol
############################################################################
calculate_volume_primitive = util.define_setup_protocol_primitives(
protocol.document, LIBRARY_NAME
)
############################################################################
# Protocol Steps
############################################################################
# 3.0
reaction_vessel = protocol.execute_primitive(
"EmptyContainer", specification="reaction_vessel"
)
reaction_vessel.name = "reaction_vessel"
protocol.order(protocol.initial(), reaction_vessel)
naoh_container = protocol.execute_primitive("EmptyContainer", specification="vial")
protocol.order(protocol.initial(), naoh_container)
naoh_provision = protocol.execute_primitive(
"Provision",
resource=naoh,
amount=sbol3.Measure(100, tyto.OM.milligram),
destination=naoh_container.output_pin("samples"),
)
# 20% weight
volume_phosphoric_acid = protocol.execute_primitive(
calculate_volume_primitive,
resource=h3po4,
total_volume=reaction_volume,
percentage=20,
)
(
provision_phosphoric_acid,
provision_phosphoric_acid_error_handler,
) = util.wrap_with_error_message(
protocol,
LIBRARY_NAME,
labop.loaded_libraries["liquid_handling"].find("Provision"),
resource=h3po4,
destination=reaction_vessel.output_pin("samples"),
amount=volume_phosphoric_acid.output_pin("volume"),
)
# 80% weight
volume_h2o = protocol.execute_primitive(
calculate_volume_primitive,
resource=h3po4,
total_volume=reaction_volume,
percentage=80,
)
(provision_h2o, provision_h2o_error_handler) = util.wrap_with_error_message(
protocol,
LIBRARY_NAME,
labop.loaded_libraries["liquid_handling"].find("Provision"),
resource=ddh2o,
destination=reaction_vessel.output_pin("samples"),
amount=volume_h2o.output_pin("volume"),
)
# Join all tokens before the final node
final_join = uml.JoinNode()
protocol.nodes.append(final_join)
protocol.order(final_join, protocol.final())
provision_phosphoric_acid_error_handler.add_decision_output(
protocol, None, final_join
)
provision_h2o_error_handler.add_decision_output(protocol, None, final_join)
protocol.designate_output(
"naoh_container",
labop.SampleArray,
naoh_container.output_pin("samples"),
)
rv_output = protocol.designate_output(
"reaction_vessel",
labop.SampleArray,
reaction_vessel.output_pin("samples"),
)
protocol.order(rv_output, protocol.final())
protocol.designate_output(
"volume_phosphoric_acid",
sbol3.OM_MEASURE,
volume_phosphoric_acid.output_pin("volume"),
)
protocol.designate_output(
"volume_h2o", sbol3.OM_MEASURE, volume_h2o.output_pin("volume")
)
return protocol
def pH_calibration_protocol() -> Tuple[labop.Protocol, Document]:
#############################################
# set up the document
doc: Document = prepare_document()
#############################################
# Import the primitive libraries
import_labop_libraries()
#############################################
# Create the protocol
protocol: labop.Protocol = create_protocol()
doc.add(protocol)
############################################################################
# Protocol Input Parameters
############################################################################
(reaction_volume, rpm) = util.get_ph_calibration_protocol_inputs(protocol)
############################################################################
# Define Custom Primitives needed for protocol
############################################################################
(
pH_meter_calibrated_primitive,
calibrate_pH_meter_primitive,
mix_primitive,
stop_mix_primitive,
clean_electrode_primitive,
) = util.define_pH_calibration_protocol_primitives(protocol.document, LIBRARY_NAME)
############################################################################
# Protocol Steps
############################################################################
# 1. Decide whether to calibrate pH meter, and connect to initial node
pH_meter_calibrated = protocol.make_decision_node(
protocol.initial(),
decision_input_behavior=pH_meter_calibrated_primitive,
)
# 2. If not pH_meter_calibrated, then Calibrate the pH meter if needed
calibrate_pH_meter = protocol.execute_primitive(calibrate_pH_meter_primitive)
# Link 1 -> 2 (False)
pH_meter_calibrated.add_decision_output(protocol, False, calibrate_pH_meter)
# 3. If pH_meter_calibrated, then inventorize and confirm materials
# (
# reaction_vessel,
# provision_h2o_error_handler,
# naoh_container,
# ) = make_inventorise_and_confirm_materials(protocol, reaction_volume)
# 3. Setup Reagents and Labware subprotocol
setup_subprotocol: labop.Protocol = create_setup_subprotocol(doc)
setup_subprotocol_invocation = protocol.execute_primitive(
setup_subprotocol,
reaction_volume=reaction_volume,
)
protocol.order(protocol.initial(), setup_subprotocol_invocation)
# 4. Decide whether calibration was successful
is_calibration_successful = protocol.make_decision_node(
calibrate_pH_meter.output_pin("return")
)
# 6. Decide if ready to adjust (Before 3.)
ready_to_adjust1 = uml.MergeNode()
protocol.nodes.append(ready_to_adjust1)
protocol.order(setup_subprotocol_invocation, ready_to_adjust1)
# Link 4 -> ready_to_adjust (True)
is_calibration_successful.add_decision_output(protocol, True, ready_to_adjust1)
ready_to_adjust2 = uml.MergeNode()
protocol.nodes.append(ready_to_adjust2)
protocol.order(setup_subprotocol_invocation, ready_to_adjust2)
# Link 1 -> 3 (True)
pH_meter_calibrated.add_decision_output(protocol, True, ready_to_adjust2)
# Error Message Activity
error_message_primitive = util.define_error_message(protocol.document, LIBRARY_NAME)
calibration_error = protocol.execute_primitive(
error_message_primitive, message="Calibration Failed!"
)
# Link 4 -> Error (False)
is_calibration_successful.add_decision_output(protocol, False, calibration_error)
# 5. Start Mix
mix_vessel = protocol.execute_primitive(
mix_primitive,
samples=setup_subprotocol_invocation.output_pin("reaction_vessel"),
rpm=rpm,
)
protocol.order(ready_to_adjust1, mix_vessel)
protocol.order(ready_to_adjust2, mix_vessel)
# 7. Adjustment subprotocol
adjust_subprotocol: labop.Protocol = create_subprotocol(doc)
adjust_subprotocol_invocation = protocol.execute_primitive(
adjust_subprotocol,
reaction_vessel=setup_subprotocol_invocation.output_pin("reaction_vessel"),
naoh_container=setup_subprotocol_invocation.output_pin("naoh_container"),
measurement_delay=sbol3.Measure(20, tyto.OM.second),
initial_transfer_amount=sbol3.Measure(100, tyto.OM.milligram),
)
protocol.order(mix_vessel, adjust_subprotocol_invocation)
# 8. Stop Mix
stop_mix_vessel = protocol.execute_primitive(
stop_mix_primitive,
samples=setup_subprotocol_invocation.output_pin("reaction_vessel"),
)
protocol.order(adjust_subprotocol_invocation, stop_mix_vessel)
(
clean_electrode_invocation,
clean_electrode_error_handler,
) = util.wrap_with_error_message(
protocol,
LIBRARY_NAME,
clean_electrode_primitive,
)
protocol.order(stop_mix_vessel, clean_electrode_invocation)
clean_electrode_error_handler.add_decision_output(protocol, None, protocol.final())
protocol.designate_output(
"rpm",
sbol3.OM_MEASURE,
rpm,
)
# protocol.to_dot().view()
return protocol, doc
def reload():
mainmodname = basename(__file__)[:-3]
module = importlib.import_module(mainmodname)
# reload the module to check for changes
importlib.reload(module)
# update the globals of __main__ with the any new or changed
# functions or classes in the reloaded module
globals().update(vars(module))
main()
def main():
new_protocol: labop.Protocol
new_protocol, doc = pH_calibration_protocol()
agent = sbol3.Agent("test_agent")
ee = ExecutionEngine()
parameter_values = [
labop.ParameterValue(
parameter=new_protocol.get_input("reaction_volume"),
value=sbol3.Measure(10, tyto.OM.milliliter),
),
]
try:
execution = ee.execute(
new_protocol, agent, id="test_execution", parameter_values=parameter_values
)
except Exception as e:
logger.exception(e)
print("Validating and writing protocol")
v = doc.validate()
assert len(v) == 0, "".join(f"\n {e}" for e in v)
rdf_filename = os.path.join(os.path.dirname(__file__), "pH_calibration_protocol.nt")
doc.write(rdf_filename, sbol3.SORTED_NTRIPLES)
print(f"Wrote file as {rdf_filename}")
# render and view the dot
dot = new_protocol.to_dot()
dot.render(f"{new_protocol.name}.gv")
dot.view()
if __name__ == "__main__":
main()
|
from collections import Counter
from matplotlib import pyplot as plt
grades = [83, 95, 91, 87, 70, 0, 85, 82, 100, 67, 73, 77, 0]
histogram = Counter(min(grade // 10 * 10, 90) for grade in grades) #Dict for students with marks
plt.bar([x+5 for x in histogram.keys()],histogram.values(), 10, edgecolor=(0,0,0))
plt.axis([-5, 105, 0, 5])
plt.xticks([10*i for i in range(11)])
plt.xlabel("Decile")
plt.ylabel("# of students")
plt.title("Grades for exam 1")
plt.show()
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from reddituser.models import RedditUser
class SignUpForm(UserCreationForm):
class Meta:
model = RedditUser
fields = UserCreationForm.Meta.fields + ('email', 'bio')
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
|
from panda3d.core import ModelNode, NodePath, Vec4, CKeyValues, Vec3
from panda3d.bsp import BSPMaterialAttrib
from .MapHelper import MapHelper
class ModelHelper(MapHelper):
ChangeWith = [
"model"
]
def __init__(self, mapObject):
MapHelper.__init__(self, mapObject)
self.modelRoot = NodePath(ModelNode("modelHelper"))
self.modelRoot.setScale(16.0)
self.modelRoot.reparentTo(self.mapObject.helperRoot)
self.vpRoots = []
def setSelectedState(self):
for vp, vpRoot in self.vpRoots:
if vp.is2D():
# Show unlit, untextured, blue wireframe in 2D
vpRoot.setRenderModeFilled()
vpRoot.setLightOff(1)
vpRoot.setFogOff(1)
vpRoot.clearAttrib(BSPMaterialAttrib)
vpRoot.setTransparency(1)
vpRoot.setColor(Vec4(1, 1, 1, 0.75), 1)
else:
vpRoot.setColorScale(Vec4(1, 1, 1, 1))
def setUnselectedState(self):
for vp, vpRoot in self.vpRoots:
if vp.is2D():
# Show unlit, untextured, blue wireframe in 2D
vpRoot.setRenderModeWireframe()
vpRoot.setLightOff(1)
vpRoot.setFogOff(1)
vpRoot.setBSPMaterial("phase_14/materials/unlit.mat", 1)
vpRoot.setColor(Vec4(0.016, 1, 1, 1), 1)
vpRoot.clearTransparency()
else:
vpRoot.setColorScale(Vec4(1, 1, 1, 1))
def select(self):
self.setSelectedState()
def deselect(self):
self.setUnselectedState()
def generate(self, helperInfo):
MapHelper.generate(self)
args = helperInfo['args']
modelPath = args[0] if len(args) > 0 else None
if not modelPath:
# Model wasn't specified in the class definition,
# check for a property called "model"
modelPath = self.mapObject.getPropertyValue("model", default = "models/smiley.egg.pz")
else:
# For some reason the fgd parser doesn't remove the quotes around the
# model path string in the game class definition
modelPath = modelPath.replace("\"", "")
if not modelPath:
return
modelNp = base.loader.loadModel(modelPath, okMissing = True)
if not modelNp:
return
# Create a representation in each viewport
for vp in base.viewportMgr.viewports:
vpRoot = self.modelRoot.attachNewNode("vpRepr")
vpRoot.hide(~vp.getViewportMask())
self.vpRoots.append((vp, vpRoot))
vpModel = modelNp.instanceTo(vpRoot)
if self.mapObject.selected:
self.setSelectedState()
else:
self.setUnselectedState()
def cleanup(self):
self.vpRoots = []
if self.modelRoot:
self.modelRoot.removeNode()
self.modelRoot = None
MapHelper.cleanup(self)
|
#! -*- coding: utf-8 -*-
from datetime import datetime
from main import db, ma
from marshmallow import fields
author_abstracts = db.Table('author_abstracts',
db.Column('author_id', db.Integer,
db.ForeignKey('scopus_authors.id')),
db.Column('abstract_id', db.Integer,
db.ForeignKey('scopus_abstracts.id')))
funding_abstracts = db.Table('funding_abstracts',
db.Column('abstract_id', db.Integer,
db.ForeignKey('scopus_abstracts.id')),
db.Column('funding_id', db.Integer,
db.ForeignKey('fundings.id')))
# area_abstracts = db.Table('area_abstracts',
# db.Column('area_id', db.Integer,
# db.ForeignKey('scopus_areas.id')),
# db.Column('abstract_id', db.Integer,
# db.ForeignKey('scopus_abstracts.id')))
class ScopusAffiliation(db.Model):
__tablename__ = 'scopus_affiliations'
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.UnicodeText(), index=True)
city = db.Column(db.UnicodeText())
country = db.Column(db.UnicodeText())
scopus_affil_id = db.Column(db.UnicodeText())
def __repr__(self):
return "<ScopusAffiliation id=%s>" % self.affil_id
class ScopusAuthor(db.Model):
__tablename__ = 'scopus_authors'
id = db.Column(db.Integer(), primary_key=True)
affil_id = db.Column(db.Integer(),
db.ForeignKey('scopus_affiliations.id'))
initials = db.Column(db.String(8))
#indexed_name = db.Column(db.String(255))
surname = db.Column(db.UnicodeText())
given_name = db.Column(db.UnicodeText())
preferred_name = db.Column(db.UnicodeText())
url = db.Column(db.Text())
affiliation = db.relationship('ScopusAffiliation',
backref=db.backref('authors', lazy='dynamic'))
def __repr__(self):
return "<ScopusAuthor name=%s>" % \
(self.indexed_name.encode('utf8'))
class ScopusAbstract(db.Model):
__tablename__ = 'scopus_abstracts'
id = db.Column(db.Integer(), primary_key=True)
url = db.Column(db.Text())
identifier = db.Column(db.UnicodeText())
pii = db.Column(db.UnicodeText())
doi = db.Column(db.UnicodeText())
eid = db.Column(db.UnicodeText())
title = db.Column(db.Text())
publication_name = db.Column(db.UnicodeText())
citedby_count = db.Column(db.Integer())
cover_date = db.Column(db.DateTime())
description = db.Column(db.UnicodeText())
authors = db.relationship('ScopusAuthor',
secondary=author_abstracts,
backref=db.backref('abstracts', lazy='dynamic'))
fundings = db.relationship('Funding',
secondary=funding_abstracts,
backref=db.backref('abstracts', lazy='dynamic'))
def __repr__(self):
return "<ScopusAbstract title=%s, doi=%s>" % \
(self.title[:20], self.doi)
class ScopusSubjArea(db.Model):
__tablename__ = 'scopus_subj_areas'
id = db.Column(db.Integer(), primary_key=True)
affil_abbr = db.Column(db.String(32))
year = db.Column(db.String(8))
area = db.Column(db.String(8))
articles = db.Column(db.Integer)
citations = db.Column(db.Integer)
# class ScopusArea(db.Model):
# __tablename__ = 'scopus_areas'
# id = db.Column(db.Integer(), primary_key=True)
# abstract_id = db.Column(db.Integer(),
# db.ForeignKey('scopus_abstracts.id'))
# area = db.Column(db.String(255))
# abstracts = db.relationship('ScopusAbstract',
# secondary=area_abstracts,
# backref=db.backref('areas', lazy='dynamic'))
#
# def __repr__(self):
# return "<ScopusArea area=%s>" % (self.area)
class Funding(db.Model):
__tablename__ = 'fundings'
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.UnicodeText())
year = db.Column(db.UnicodeText())
amount = db.Column(db.Float())
class AuthorSchema(ma.Schema):
id = fields.Integer(dump_only=True)
given_name = fields.String(required=True)
surname = fields.String(required=True)
abstracts = fields.Nested('AbstractSchema', many=True)
url = ma.URLFor('api.researcherresource', id='<id>', _external=True)
class AbstractSchema(ma.Schema):
id = fields.Integer(dump_only=True)
title = fields.String(required=True)
description = fields.String(required=False)
doi = fields.String()
citedby_count = fields.Integer()
cover_date = fields.DateTime()
url = ma.URLFor('api.abstractresource', id='<id>', _external=True)
class ScopusAbstractCount(db.Model):
__tablename__ = 'abstract_count'
id = db.Column(db.Integer(), primary_key=True)
institute = db.Column(db.String(128))
year = db.Column(db.Integer())
articles = db.Column(db.Integer())
citations = db.Column(db.Integer())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.