content
stringlengths 5
1.05M
|
|---|
from unittest import mock
from transiter import parse
from transiter.parse import utils
def test_clean_all_good():
trip_cleaners = [mock.MagicMock() for __ in range(3)]
for cleaner in trip_cleaners:
cleaner.return_value = True
stop_event_cleaners = [mock.MagicMock() for __ in range(3)]
gtfs_cleaner = utils.TripDataCleaner(trip_cleaners, stop_event_cleaners)
trip = parse.Trip(
id="trip",
route_id="L",
direction_id=True,
stop_times=[parse.TripStopTime(stop_id="L03")],
)
clean_trips = gtfs_cleaner.clean([trip])
assert [trip] == clean_trips
for cleaner in trip_cleaners:
cleaner.assert_called_once_with(trip)
for cleaner in stop_event_cleaners:
cleaner.assert_called_once_with(trip, trip.stop_times[0])
def test_clean_buggy_trip():
trip_cleaners = [mock.MagicMock() for __ in range(3)]
for cleaner in trip_cleaners:
cleaner.return_value = True
trip_cleaners[1].return_value = False
stop_event_cleaners = [mock.MagicMock() for __ in range(3)]
gtfs_cleaner = utils.TripDataCleaner(trip_cleaners, stop_event_cleaners)
trip = parse.Trip(
id="trip",
route_id="L",
direction_id=True,
stop_times=[parse.TripStopTime(stop_id="L03")],
)
clean_trips = gtfs_cleaner.clean([trip])
assert [] == clean_trips
trip_cleaners[0].assert_called_once_with(trip)
trip_cleaners[1].assert_called_once_with(trip)
trip_cleaners[2].assert_not_called()
for cleaner in stop_event_cleaners:
cleaner.assert_not_called()
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 31 09:43:24 2019
Script for merging TNO and NAEI emission files
;
; This is based on Doug Lowes NCL script to do the same thing
; but with TNO/NAEI whereas we will use EDGAR/NAEI
; - treatment of the emission variables will be hardcoded,
; so that we can check for existence of NAEI emissions.
;
; Rules for merging:
; 1) EDGAR emissions will only be taken where there are no
; NAEI emissions (this will be based on a total summation of
; NAEI emissions, rather than done on a species by species basis).
; 2) A multiplying factor of 1.6 is applied to OC_DOM, OC_TRA, and OC_25_10
; emissions (to roughly account for the Carbon to "everything" mass ratio for OM)
; 4) OIN 2.5 PM species will be either the difference between
; E_PM25 and the sum of BC_1 + EC_1_25 + OC_DOM + OC_TRA
; or 10% of the E_PM25 mass (whichever is smaller)
; (we'll make sure that all emissions are >0)
; 5) OIN 10 PM species will be the difference between
; E_PM_10 and the sum of OC_25_10 + EC_25_10
; (we'll make sure that all emissions are >0)
@author: ee15amg
"""
import numpy as np
from netCDF4 import Dataset
from netCDF4 import num2date, date2num
#import time
#from scipy.io.netcdf import netcdf_file
#import matplotlib.pyplot as plt
# define input and output files
filename_edgar = ('wrfchemi_edgar_00z_d01')
filename_naei = ('wrfchemi_naei_00z_d01')
filename_combined = ('wrfchemi_00z_d01')
# open the files for processing
F_NAEI = Dataset(filename_naei,"r")
F_EDGAR = Dataset(filename_edgar,"r")
F_OUT = Dataset(filename_combined, "w")
#F_OUT = netcdf_file(filename_combined, "w")
# full list of variables (for calculating the total NAEI emissions)
var_full = (['E_CH4','E_ECI','E_ECJ','E_CO','E_C2H2','E_NH3','E_NO',
'E_NO2','E_ORGI','E_ORGJ','E_PM_10','E_PM25I','E_PM25J',
'E_SO2','E_BIGALK','E_BIGENE','E_C2H4','E_C2H5OH','E_C2H6',
'E_CH2O','E_CH3CHO','E_CH3COCH3','E_CH3OH','E_MEK','E_TOLUENE',
'E_C3H6','E_C3H8','E_BENZENE','E_XYLENE'])
# list of variables for which we filter TNO emissions to avoid clashs with
# NAEI emissions
var_filter = (['E_CH4','E_ECI','E_ECJ','E_CO','E_C2H2','E_NH3','E_NO',
'E_NO2','E_ORGI','E_ORGJ','E_PM_10','E_PM25I','E_PM25J',
'E_SO2','E_BIGALK','E_BIGENE','E_C2H4','E_C2H5OH','E_C2H6',
'E_CH2O','E_CH3CHO','E_CH3COCH3','E_CH3OH','E_MEK','E_TOLUENE',
'E_C3H6','E_C3H8','E_BENZENE','E_XYLENE'])
#create output netcdf following same layout as input files
#;;;;;;;;;;;;;; operational section of script ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
# loop through variables to pull out for making full NAEI summation
# (units don't matter here - it's a purely binary check)
var_tot = np.zeros([12,1,139,139])
for i_var in range(len(var_full)):
var_temp = np.asarray(F_NAEI[var_full[i_var]])
var_tot[:,:,:,:]=(var_tot[:,:,:,:] + var_temp[:,:,:,:])
# create data mask to apply to TNO data
# initialise mask to correct dimensions
naei_empty = var_tot[:,:,:,:]*0.0
# set mask to zero where there's NAEI data - and 1.0 where there isn't
mask= np.where(var_tot==0)
naei_empty[mask] = naei_empty[mask]+1.0
#check mask works
#print np.max(var_tot[mask])
## create NETCDF file to put data into
##create Dimensions
#
#get number of hours from netcdf data
lenDateStr = len(np.asarray(F_NAEI['Times'])[0,:])
n_lons = np.size(np.asarray(F_NAEI['XLONG'])[0,:])
n_lats = np.size(np.asarray(F_NAEI['XLAT'])[0,:])
n_emis = np.size(np.asarray(F_NAEI['E_CO'])[0,:,0,0])
n_times = np.size(np.asarray(F_NAEI['Times'])[:,0])
# copy all global attributes from old file to new one
# also copy old dimensions from old to new netcdf
F_OUT.setncatts(F_EDGAR.__dict__)
for name, dimension in F_EDGAR.dimensions.items():
F_OUT.createDimension(
name, (len(dimension) if not dimension.isunlimited() else None))
# add dimensions into netcdf
#Time = F_OUT.createDimension("Time",n_times)
#emissions_zdim_stag = F_OUT.createDimension("emissions_zdim_stag",n_emis)
#south_north = F_OUT.createDimension("south_north",n_lats)
#west_east = F_OUT.createDimension("west_east",n_lons)
#DateStrLen = F_OUT.createDimension("DateStrLen",lenDateStr)
#create Variables
Times = F_OUT.createVariable("Times","S1",("Time","DateStrLen")),
XLONG = F_OUT.createVariable("XLONG","f4",("south_north","west_east")),
XLAT = F_OUT.createVariable("XLAT","f4",("south_north","west_east"))
for i_var in range(np.size(var_full)):
var_filter[i_var] = F_OUT.createVariable((var_filter[i_var]),"f4",
("Time","emissions_zdim_stag","south_north","west_east"))
# fill basic variables manually
lat = np.asarray(F_NAEI['XLAT'])
lon = np.asarray(F_NAEI['XLONG'])
datetime = np.asarray(F_NAEI['Times'])
XLAT = lat
XLONG = lon
Times = datetime
# then loop through chem species to add them into file
#loop through the variables to be combined (in a straightforward manner)
for i_var in range(len(var_filter)):
# load data
var_edgar = np.asarray(F_EDGAR[var_full[i_var]])
var_naei= np.asarray(F_NAEI[var_full[i_var]])
# merge data files - applying the filter
# where naei data exists we wont input edgar vars
#(multiplying them by zero takes care of this)
var_naei_new = var_naei + naei_empty*var_edgar
# save data
#print var_full[i_var]
var_filter[i_var][:,:,:,:] = var_naei_new[:,:,:,:]
F_OUT.close()
|
'''
Created on Jun 21, 2018
@author: moffat
'''
from django import forms
from edc_form_validators import FormValidator
class IneligibleSubjectFormValidator(FormValidator):
def clean(self):
self.screening_identifier = self.cleaned_data.get('screening_identifier')
self.reasons_ineligible = self.cleaned_data.get('reasons_ineligible')
self.required_if_true(
True, field_required='screening_identifier')
self.required_if_true(
True, field_required='reasons_ineligible')
|
from discharge_data import *
from gumbel_reduce import gumbel_reduce_data
from plot_discharge import PlotDischarge
from log import *
time_list = [50, 100, 150, 200, 500, 1000]
def verify_gumbel(func):
"""
A wrapper function, Wrapper decorator @ should be placed right on
top of function that is to be wrapped
:param func: function to be wrapped
:return: the wrapped function's return value
"""
def wrapper(args):
try:
result = func(args)
return result
except KeyError:
logging.error('ERROR: To extract reduce value numeric value is '
'to be passed')
return np.nan
return wrapper
@verify_gumbel
def get_reduce_mean(index_):
"""
Indexing the Gumbel reduce data and determining reduce mean
:param index_: no. of discharge data in the DataFrame
:return: indexed reduced mean value
"""
if index_ in range(100, 150):
index_ = 150
elif index_ in range(150, 200):
index_ = 200
value = gumbel_reduce_data(csv_file=gumbel_reduce_path)
return value['Reduced mean'][index_]
@verify_gumbel
def get_reduce_std(index_):
"""
Indexing the Gumbel reduce data and determining reduce std
:param index_: no. of discharge data in the DataFrame
:return: indexed reduced standard deviation value
"""
if index_ in range(100, 150):
index_ = 150
elif index_ in range(150, 200):
index_ = 200
value = gumbel_reduce_data(csv_file=gumbel_reduce_path)
return value['Reduced std'][index_]
def gumbel_distribution(discharge_data):
"""
Function determines the Gumbel Distribution statistical parameters
by calling 2 functions and performs mathematical operations to
return the Extrapolated discharges
:param discharge_data: pandas DataFrame of Annual maxima
:return: a list of extrapolated discharges
"""
number_of_years = discharge_data.shape[0]
logging.info(
'Annual Max is calculated for {} years'.format(number_of_years))
mean_discharge = discharge_data['Discharge [CMS]'].mean()
std_dev = discharge_data['Discharge [CMS]'].std()
reduce_mean = get_reduce_mean(number_of_years)
reduce_std = get_reduce_std(number_of_years)
main_discharge = []
for year in time_list:
reduced_variate = -math.log(math.log(year / (year - 1)))
try:
frequency_factor = (reduced_variate - reduce_mean) / reduce_std
except ZeroDivisionError:
logging.error("ERROR: Could not divide by zero")
discharge_value = mean_discharge + frequency_factor * std_dev
main_discharge.append(discharge_value)
return main_discharge
def main(d_data, scatter_size=0.0):
"""
:param d_data: raw discharge data file
:param scatter_size: Specifying the scatter size for Magic Methods
:return: results as a Dictionary
"""
# instantiation of the DischargeDataHandler Class
raw_discharge_data = DischargeDataHandler(data_csv_file=d_data)
# calling method to get discharge data
data = raw_discharge_data.get_discharge_data()
logging.info("\nThe Annual Maximum Discharge Data is: \n{}".format(data))
# function call to estimate extrapolated discharges
output_list = gumbel_distribution(data)
# instantiation of the PlotDischarge Class
plotter = PlotDischarge()
# Flexibilizing the Scatter size using Magic Methods
plotter * scatter_size
plotter > 150
plotter < 5
# changing the extrapolated discharge plotting arguments to numpy
t_series = np.array(time_list)
q_series = np.array(output_list)
# appending extrapolated discharges to a dict
flood_discharge_dict = {}
for index, time in enumerate(time_list):
flood_discharge_dict[time] = output_list[index]
# returning all the values as a dictionary
results = {"return_periods": flood_discharge_dict, "t_series": t_series,
"q_series": q_series, "d_data": data, "obj": plotter}
return results
|
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Import
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Import Standard libraries
from abc import ABC, abstractmethod
import streamlit as st
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Classes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CBaseComponent(ABC):
def __init__(self
, compOpts
):
self.opts = compOpts
@abstractmethod
def getOptDflt(self, optName):
raise NotImplementedError('This method is not implemented in the Base Class: it must be implemented in the child class.')
@abstractmethod
def render(self):
pass
def getOpt(self, optName):
if optName in self.opts.keys():
return self.opts.get(optName)
return self.getOptDflt(optName)
def showTitle(self, title):
# Display the component title
if title:
st.subheader(title)
|
import cv2
import os
import pickle
import imutils
import numpy as np
import time
# IU - Concert Live Clip 2018 Tour.mp4
image_path = "D:/Github/FR-AttSys/test_imgs/IU-2.png"
detector_path = "./face_detection_model"
# OpenCV深度学习面部嵌入模型的路径
embedding_model = "./face_detection_model/openface_nn4.small2.v1.t7"
# 训练模型以识别面部的路径
recognizer_path = "./saved_weights/recognizer.pickle"
# 标签编码器的路径
le_path = "./saved_weights/le.pickle"
faces = ["iu", "pch", "chopin"]
COLORS = np.random.uniform(0, 255, size=(len(faces), 3))
def face_recognition(image_path):
try:
image = cv2.imread(image_path)
except FileExistsError as e:
print(e)
# 置信度
confidence_default = 0.5
# 从磁盘加载序列化面部检测器
proto_path = os.path.sep.join([detector_path, "deploy.prototxt"])
model_path = os.path.sep.join([detector_path, "res10_300x300_ssd_iter_140000.caffemodel"])
detector = cv2.dnn.readNetFromCaffe(proto_path, model_path)
# 从磁盘加载序列化面嵌入模型
try:
embedded = cv2.dnn.readNetFromTorch(embedding_model)
except IOError:
print("面部嵌入模型的路径不正确!")
# 加载实际的人脸识别模型和标签
try:
recognizer = pickle.loads(open(recognizer_path, "rb").read())
le = pickle.loads(open(le_path, "rb").read())
except IOError:
print("人脸识别模型保存路径不正确!")
image = imutils.resize(image, width=600)
(h, w) = image.shape[:2]
# construct a blob from the image
image_blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0, (300, 300),
(104.0, 177.0, 123.0), swapRB=False, crop=False)
detector.setInput(image_blob)
detections = detector.forward()
# 循环检测
for i in range(0, detections.shape[2]):
# 提取与预测相关的置信度(即概率)
confidence = detections[0, 0, i, 2]
# 过滤弱检测
if confidence > confidence_default:
# 计算面部边界框的(x,y)坐标
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# 提取面部ROI
face = image[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
# 确保面部宽度和高度足够大
if fW < 20 or fH < 20:
continue
# 为面部ROI构造一个blob,然后通过我们的面部嵌入模型传递blob以获得面部的128-d量化
face_blob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)
embedded.setInput(face_blob)
vec = embedded.forward()
# 执行分类识别面部
predicts = recognizer.predict_proba(vec)[0]
j = np.argmax(predicts)
probability = predicts[j]
name = le.classes_[j]
# 绘制面部的边界框以及相关的概率
text = "{}: {:.2f}%".format(name, probability * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(image, (startX, startY), (endX, endY), COLORS[j], 1)
image = cv2.putText(image, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.60, COLORS[j], 1)
cv2.imshow("Face Recognition on Image", image)
cv2.waitKey(0)
if __name__ == "__main__":
face_recognition(image_path)
|
from mmfutils import optimize
import numpy as np
from uncertainties import ufloat
class Test(object):
def test_usolve(self):
n = ufloat(2.0, 0.1, 'n')
c = ufloat(1.0, 0.1, 'c')
a = ufloat(3.0, 0.1, 'a')
def f(x):
return x**n - a*c
ans = optimize.ubrentq(f, 0, max(1, a))
exact = (a*c)**(1./n)
res = ans - exact
assert np.allclose(0, [res.nominal_value, res.std_dev])
def test_usolve_1(self):
"""Should also work with regular numbers (which is faster)."""
n = 2.0
c = 1.0
a = 3.0
def f(x):
return x**n - a*c
ans = optimize.ubrentq(f, 0, max(1, a))
exact = (a*c)**(1./n)
assert np.allclose(ans, exact)
|
from JumpScale import j
from .Code import Code
j.code = Code()
|
###############################################################################
# Imports
import sys
###############################################################################
# General utility
# Exit the program
def terminate_app(code, message=None):
if message:
print("Exiting program with code {}: {}".format(code, message))
else:
print("Exiting program with code {}.".format(code))
sys.exit(code)
# Print section
def print_section(title):
print("\n\n###############################################################################")
print(title)
print("###############################################################################")
###############################################################################
|
# -*- coding: utf-8 -*-
# @Time : 2021/8/17 21:41
# @Author : lovemefan
# @Email : lovemefan@outlook.com
# @File : client.py
import logging
import grpc
from tzrpc.proto.py.Server_pb2_grpc import toObjectStub
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] - %(levelname)s - %(threadName)s - %(module)s.%(funcName)s - %(message)s')
logger = logging.getLogger(__name__)
class TZPRC_Client:
__type = ["String", "Integer", "Float", "Double", "Boolean", "Numpy", "Tensor"]
def __init__(self, server_address: str):
self.server_address = server_address
self.channel = grpc.insecure_channel(server_address)
def register(self, func):
"""
:param return_type: type of return [String, ]
:return:
"""
# if return_type not in self.__type:
# raise ValueError(f"TZRPC return type only support {self.__type}")
def wrapper(*args, **kwargs):
stub = toObjectStub(self.channel)
func(stub=stub, *args, **kwargs)
return wrapper
class Listener:
def __init__(self):
pass
|
from django.contrib import admin
from .models import RedirectUrl, RedirectUrlsEntry
# Register your models here.
admin.site.register(RedirectUrl)
admin.site.register(RedirectUrlsEntry)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Create by Albert_Chen
# CopyRight (py) 2020年 陈超. All rights reserved by Chao.Chen.
# Create on 2020-03-22
from __future__ import absolute_import
import json
import logging
import tornado
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from gamecenter.api import game_handler
from gamecenter.api import room_handler
from gamecenter.api import user_handler
from gamecenter.api import error_handler
from gamecenter.api import main_handler
LOG = logging.getLogger(__name__)
# pattern part
URL_PREFIX = r"/v1/gamecenter"
main_pattern = [
(r"/v1", main_handler.MainHandler),
]
user_patterns = [
(URL_PREFIX + r"/user_info", user_handler.UserHandler),
(URL_PREFIX + r"/user_logout", user_handler.UserLogoutHandler),
]
game_patters = [
(URL_PREFIX + r"/game_logs", game_handler.GameLogsHandler),
(URL_PREFIX + r"/game_current_logs", game_handler.GameCurrentLogsHandler),
(URL_PREFIX + r"/game_list", game_handler.GameListHandler)
]
room_patterns = [
(URL_PREFIX + r"/room_list", room_handler.RoomListHandler),
(URL_PREFIX + r"/room", room_handler.RoomHandler),
(URL_PREFIX + r"/room/(\d+)", room_handler.RoomHandler),
(URL_PREFIX + r"/join_room", room_handler.RoomJoinHandler),
(URL_PREFIX + r"/quit_room", room_handler.RoomQuitHandler),
(URL_PREFIX + r"/room/(\d+)", room_handler.RoomHandler),
(URL_PREFIX + r"/game_over", room_handler.GameOverHandler),
(URL_PREFIX + r"/game_start", room_handler.GameStartHandler),
]
url_patterns = user_patterns +\
game_patters + \
main_pattern +\
room_patterns
class WebApp(tornado.web.Application):
def __init__(self, debug=False):
handlers = url_patterns
setting = dict(debug=debug, default_handler_class=error_handler.NotFoundHandler)
tornado.web.Application.__init__(
self, handlers, autoreload=True, **setting
)
def run_api(mode, port=8000, address="127.0.0.1"):
LOG.info(mode)
http_server = tornado.httpserver.HTTPServer(WebApp(debug=mode))
http_server.listen(port, address)
LOG.info('start run at %s:%s ' % (address, port))
tornado.ioloop.IOLoop.current().start()
|
import mimetypes
from typing import List, Union
from pathlib import Path
import magic
from haystack.nodes.base import BaseComponent
DEFAULT_TYPES = ["txt", "pdf", "md", "docx", "html"]
class FileTypeClassifier(BaseComponent):
"""
Route files in an Indexing Pipeline to corresponding file converters.
"""
outgoing_edges = 10
def __init__(self, supported_types: List[str] = DEFAULT_TYPES):
"""
Node that sends out files on a different output edge depending on their extension.
:param supported_types: the file types that this node can distinguish.
Note that it's limited to a maximum of 10 outgoing edges, which
correspond each to a file extension. Such extension are, by default
`txt`, `pdf`, `md`, `docx`, `html`. Lists containing more than 10
elements will not be allowed. Lists with duplicate elements will
also be rejected.
"""
if len(supported_types) > 10:
raise ValueError("supported_types can't have more than 10 values.")
if len(set(supported_types)) != len(supported_types):
duplicates = supported_types
for item in set(supported_types):
duplicates.remove(item)
raise ValueError(f"supported_types can't contain duplicate values ({duplicates}).")
super().__init__()
self.supported_types = supported_types
def _estimate_extension(self, file_path: Path) -> str:
"""
Return the extension found based on the contents of the given file
:param file_path: the path to extract the extension from
"""
extension = magic.from_file(str(file_path), mime=True)
return mimetypes.guess_extension(extension) or ""
def _get_extension(self, file_paths: List[Path]) -> str:
"""
Return the extension found in the given list of files.
Also makes sure that all files have the same extension.
If this is not true, it throws an exception.
:param file_paths: the paths to extract the extension from
:return: a set of strings with all the extensions (without duplicates), the extension will be guessed if the file has none
"""
extension = file_paths[0].suffix.lower()
if extension == "":
extension = self._estimate_extension(file_paths[0])
for path in file_paths:
path_suffix = path.suffix.lower()
if path_suffix == "":
path_suffix = self._estimate_extension(path)
if path_suffix != extension:
raise ValueError(f"Multiple file types are not allowed at once.")
return extension.lstrip(".")
def run(self, file_paths: Union[Path, List[Path], str, List[str], List[Union[Path, str]]]): # type: ignore
"""
Sends out files on a different output edge depending on their extension.
:param file_paths: paths to route on different edges.
"""
if not isinstance(file_paths, list):
file_paths = [file_paths]
paths = [Path(path) for path in file_paths]
output = {"file_paths": paths}
extension = self._get_extension(paths)
try:
index = self.supported_types.index(extension) + 1
except ValueError:
raise ValueError(
f"Files of type '{extension}' ({paths[0]}) are not supported. "
f"The supported types are: {self.supported_types}. "
"Consider using the 'supported_types' parameter to "
"change the types accepted by this node."
)
return output, f"output_{index}"
|
import numpy as np
import pandas as pd
import math
import random
import re
simulationDuration = 100
minFeatureDuration = 1
maxFeatureDuration = 20
minFeatureValue = 0
maxFeatureValue = 10000
maxDuration = 5
numRuns = 1000
class QDisc:
def sort(self, anArray):
return anArray
def name(self):
return "NoOp"
class QDiscRandom(QDisc):
def sort(self, anArray):
random.shuffle(anArray)
return anArray
def name(self):
return "Random"
class QDiscCD3(QDisc):
def sort(self, anArray):
anArray.sort(key=lambda feature: feature['cd3value'], reverse=True)
return anArray
def name(self):
return "CD3"
class QDiscShortestJobFirst(QDisc):
def sort(self, anArray):
anArray.sort(key=lambda feature: feature['estimatedDuration'], reverse=False)
return anArray
def name(self):
return "Shortest First"
def generateRandomFeatureDuration():
return np.random.randint(minFeatureDuration, maxFeatureDuration+1)
def generateRandomFeatureValue():
return np.random.randint(minFeatureValue, maxFeatureValue+1)
def createFeature(estimatedDuration=None, estimatedValue=None):
if not estimatedDuration: estimatedDuration = generateRandomFeatureDuration()
if not estimatedValue: estimatedValue = generateRandomFeatureValue()
cd3value = estimatedValue / estimatedDuration
return {'estimatedDuration': estimatedDuration, 'estimatedValue': estimatedValue, 'cd3value': cd3value }
def calcPercentage(base, candidate):
result = {
"Queueing Discipline": "",
"Value Reassessed": "",
"Duration Reassessed": "",
"Resized": ""
}
for k,v in candidate.items():
if re.search("percentile", k):
result[k] = '{:.0f}%'.format((v / base[k] - 1) * 100)
return result
def decompose(number, numParts):
if number < numParts:
while number > 0:
n = random.randint(1, number)
yield n
number -= n
numParts -= 1
for i in range(numParts):
yield 0
else:
while number > 0 and numParts > 1:
n = random.randint(1, number-numParts+1)
yield n
number -= n
numParts -= 1
yield number
def performSimulation(qdisc, reassessDuration=False, reassessValue=True, resize=False):
results = []
i = 0
while i < numRuns:
features = []
for x in range(10):
features.append(createFeature())
elapsedWeeks = 0
accruedValue = 0
numDeliveredFeatures = 0
while elapsedWeeks < simulationDuration:
features = qdisc.sort(features)
currentFeature = features.pop(0)
if resize and currentFeature['estimatedDuration'] > maxDuration:
splittedDurations = list(decompose(currentFeature['estimatedDuration'], 4))
random.shuffle(splittedDurations)
splittedValues = list(decompose(currentFeature['estimatedValue'], 4))
random.shuffle(splittedValues)
for x in range(3):
features.append(createFeature(splittedDurations.pop(0), splittedValues.pop(0)))
continue
if reassessDuration:
currentFeatureDuration = generateRandomFeatureDuration()
else:
currentFeatureDuration = currentFeature['estimatedDuration']
if reassessValue:
currentFeatureValue = generateRandomFeatureValue()
else:
currentFeatureValue = currentFeature['estimatedValue']
if (elapsedWeeks+currentFeatureDuration) > simulationDuration:
break
numberNewFeatures = math.floor((elapsedWeeks+currentFeatureDuration)/2) - math.floor(elapsedWeeks/2)
for x in range(numberNewFeatures):
features.append(createFeature())
elapsedWeeks += currentFeatureDuration
accruedValue += currentFeatureValue
results.append(accruedValue)
i += 1
result = {
"Queueing Discipline": qdisc.name(),
"Value Reassessed": reassessValue,
"Duration Reassessed": reassessDuration,
"Resized": resize
}
i = 1
for percentile in np.percentile(results,[10,20,30,40,50,60,70,80,90]):
key = "%i0th percentile" % i
result[key] = percentile
i += 1
return result
def runSimulationSet(title, set, out):
simulationResults = []
columns = ["Queueing Discipline", "Value Reassessed", "Duration Reassessed", "Resized", "10th percentile", "20th percentile", "30th percentile", "40th percentile", "50th percentile", "60th percentile", "70th percentile", "80th percentile", "90th percentile"]
print(title)
baseSim = set.pop(0);
base = performSimulation(baseSim[0], reassessDuration=baseSim[1], reassessValue=baseSim[2], resize=baseSim[3])
simulationResults.append(base)
for sim in set:
result = performSimulation(sim[0], reassessDuration=sim[1], reassessValue=sim[2], resize=sim[3])
simulationResults.append(result)
simulationResults.append(calcPercentage(base, result))
df = pd.DataFrame(simulationResults, columns=columns)
df.T.to_csv("out", sep="\t", header=False)
print(df.T, "\n")
set = [
# Array format: qdisc, reassessDuration, reassessValue, resize
# see performSimulation
[QDiscCD3(), False, False, False],
[QDiscShortestJobFirst(), False, False, False],
[QDiscRandom(), False, False, False]
]
runSimulationSet("Case I: CD3 Assumptions Are Met", set, "report-case-i.csv")
set = [
[QDiscCD3(), False, True, False],
[QDiscShortestJobFirst(), False, True, False],
[QDiscRandom(), False, True, False]
]
runSimulationSet("Case II: CD3 Assumptions Not Met", set, "report-case-ii.csv")
set = [
[QDiscCD3(), True, True, False],
[QDiscShortestJobFirst(), True, True, False],
[QDiscRandom(), True, True, False]
]
runSimulationSet("Case II: CD3 Assumptions Not Met (Version 2)", set, "report-case-ii_1.csv")
set = [
[QDiscCD3(), False, True, True],
[QDiscShortestJobFirst(), False, True, True],
[QDiscRandom(), False, True, True]
]
runSimulationSet("Case III: Right Sizing of Items", set, "report-case-iii.csv")
|
#!/usr/bin/python
import yaml
from pprint import pprint
from netmiko import ConnectHandler
from ciscoconfparse import CiscoConfParse
with open('netmiko.yml', 'r') as file:
devices=yaml.load(file)
cisco4 = devices.get('cisco4')
ssh = ConnectHandler(host=cisco4.get('host'), username=cisco4.get('username'), password=cisco4.get('password'), device_type='cisco_ios', session_log='ex6_cisco4_log.txt')
with open('ex6_cisco4_conf.txt', 'w') as file:
file.write(ssh.send_command('show run'))
config = CiscoConfParse('ex6_cisco4_conf.txt')
intf = config.find_objects_w_child(parentspec = r'^interface', childspec = r'^[\s]+ip address')
ipadd = config.find_objects_w_parents(parentspec = r'^interface', childspec = r'^[\s]+ip address')
table = list(zip([i.text for i in intf], [j.text for j in ipadd]))
for i in table:
pprint('Interface Line: {0}'.format(i[0]))
pprint('IP Address Line: {0}'.format(i[1]))
|
import pandas as pd
from comvest.utilities.io import files, read_from_db, write_result
from comvest.utilities.logging import progresslog, resultlog
def cleandata(df,date,keepcolumns=['ano_vest','cod_curso','desc_curso','he','mod_isencao']):
df.insert(loc=0, column='ano_vest', value=date)
df.drop(columns=['area'], errors='ignore', inplace=True)
df.rename(columns={df.columns[1]:'cod_curso', df.columns[2]:'desc_curso'}, inplace=True)
df['desc_curso'] = df['desc_curso'].map(
lambda desc: desc.replace('\r','').replace('\n','').replace('_x000D_','')
)
return df.reindex(columns=keepcolumns)
def extraction():
courses_frames = []
for path, date in files.items():
courses = read_from_db(path, sheet_name='cursos')
progresslog('cursos', date)
courses = cleandata(courses, date)
courses_frames.append(courses)
# Export CSV
all_courses = pd.concat(courses_frames)
all_courses.sort_values(by='ano_vest', ascending=False, inplace=True)
FILE_NAME = 'cursos_comvest.csv'
write_result(all_courses, FILE_NAME)
resultlog(FILE_NAME)
|
# %%
#######################################
def registryget_regkey_summary(key_object: RegistryKey):
from Registry.Registry import RegistryKey
#
if isinstance(key_object, RegistryKey):
results = {}
results['values_num'] = key_object.values_number()
results['subkeys_num'] = key_object.subkeys_number()
return results
|
from typing import Dict
from typing import List
from treasury.session import FederalTreasurySession
class RevenueAndPayments():
"""
## Overview:
----
Revenue:
Daily overview of federal revenue collections such as income tax
deposits, customs duties, fees for government service, fines, and
loan repayments.
Payments:
Listing of payments made through the Judgment Fund including the
amount paid out, judgment type, legal representatives, agency,
and associated costs.
"""
def __init__(self, session: FederalTreasurySession) -> None:
"""Initializes the `RevenueAndPayments` object.
### Parameters
----
session : `TreasurySession`
An initialized session of the `TreasurySession`.
### Usage
----
>>> treasury_client = FederalTreasuryClient()
>>> revenue_and_payments_service = treasury_client.revenue_and_payments()
"""
# Set the session.
self.treasury_session: FederalTreasurySession = session
def __repr__(self) -> str:
"""String representation of the `FederalTreasuryClient.RevenueAndPayments` object."""
# define the string representation
str_representation = '<FederalTreasuryClient.RevenueAndPayments (active=True, connected=True)>'
return str_representation
def judgement_fund_congress(
self,
fields: List[str] = None,
sort: List[str] = None,
filters: List[str] = None,
page_number: int = 1,
page_size: int = 100
) -> Dict:
"""Judgment Fund: Annual Report to Congress.
### Overview
----
This table provides a listing of payments made through the Judgment
Fund including the amount paid out, judgment type, legal representatives,
agencies involved, and associated costs.
### Parameters
----
fields : List[str] (optional, Default=None)
The fields parameter allows you to select which field(s) should be
included in the response. If desired fields are not specified, all
fields will be returned.
sort : List[str] (optional, Default=None)
The sort parameter allows a user to sort a field in ascending (least
to greatest) or descending (greatest to least) order. When no sort parameter
is specified, the default is to sort by the first column listed. Most API
endpoints are thus sorted by date in ascending order (historical to most
current).
filters : List[str] (optional, Default=None)
Filters are used to view a subset of the data based on specific
criteria. For example, you may want to find data that falls within
a certain date range, or only show records which contain a value
larger than a certain threshold. When no filters are provided,
the default response will return all fields and all data.
page_number : int (optional, Default=1)
The page number will set the index for the pagination, starting
at 1. This allows the user to paginate through the records
returned from an API request
page_size : int (optional, Default=100)
The page size will set the number of rows that are returned
on a request.
### Returns
----
Dict
A collection of `Records` resources.
### Usage
----
>>> treasury_client = FederalTreasuryClient()
>>> revenue_and_payments_service = treasury_client.revenue_and_payments()
>>> revenue_and_payments_service.judgement_fund_congress()
"""
if fields:
fields = ','.join(fields)
if filters:
filters = ','.join(filters)
if sort:
sort = ','.join(sort)
content = self.treasury_session.make_request(
method='get',
endpoint='/v2/payments/jfics/jfics_congress_report',
params={
'format': 'json',
'page[number]': page_number,
'page[size]': page_size,
'fields': fields,
'sort': sort,
'filters': filters
}
)
return content
def revenue_collection(
self,
fields: List[str] = None,
sort: List[str] = None,
filters: List[str] = None,
page_number: int = 1,
page_size: int = 100
) -> Dict:
"""U.S. Government Revenue Collections.
### Overview
----
A daily overview of federal revenue collections such as individual
and corporate income tax deposits, customs duties, fees for government
service, fines, and loan repayments. These collections can be made through
either electronic or non-electronic transactions by mail, internet, bank,
or over-the-counter (OTC) channels.
### Parameters
----
fields : List[str] (optional, Default=None)
The fields parameter allows you to select which field(s) should be
included in the response. If desired fields are not specified, all
fields will be returned.
sort : List[str] (optional, Default=None)
The sort parameter allows a user to sort a field in ascending (least
to greatest) or descending (greatest to least) order. When no sort parameter
is specified, the default is to sort by the first column listed. Most API
endpoints are thus sorted by date in ascending order (historical to most
current).
filters : List[str] (optional, Default=None)
Filters are used to view a subset of the data based on specific
criteria. For example, you may want to find data that falls within
a certain date range, or only show records which contain a value
larger than a certain threshold. When no filters are provided,
the default response will return all fields and all data.
page_number : int (optional, Default=1)
The page number will set the index for the pagination, starting
at 1. This allows the user to paginate through the records
returned from an API request
page_size : int (optional, Default=100)
The page size will set the number of rows that are returned
on a request.
### Returns
----
Dict
A collection of `Records` resources.
### Usage
----
>>> treasury_client = FederalTreasuryClient()
>>> revenue_and_payments_service = treasury_client.revenue_and_payments()
>>> revenue_and_payments_service.revenue_collection()
"""
if fields:
fields = ','.join(fields)
if filters:
filters = ','.join(filters)
if sort:
sort = ','.join(sort)
content = self.treasury_session.make_request(
method='get',
endpoint='/v2/revenue/rcm',
params={
'format': 'json',
'page[number]': page_number,
'page[size]': page_size,
'fields': fields,
'sort': sort,
'filters': filters
}
)
return content
|
from train_QNet import *
import torch
import numpy as np
from torch import optim
import random
import copy
import utils
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
from torch import optim
from tqdm import tqdm as _tqdm
import random
def repeat_trajectory(trajectory, seed, env_name):
# Note that step is from the end
env = utils.create_env(env_name)
assert seed == int(seed)
seed = int(seed)
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
env.seed(seed)
env.reset()
environment_states = []
states = []
rewards = []
for observation in trajectory:
environment_states.append(copy.deepcopy(env))
obs_s, obs_a, obs_r, obs_ns, obs_done = observation
states.append(obs_s)
rewards.append(obs_r)
next_state, r, done, _ = env.step(obs_a)
if np.any(obs_ns - next_state > 1e-12) or r != obs_r or done != obs_done:
raise ValueError("Trajectory not copied during repeat_trajectory function!")
# total_reward = 0
# for i, reward in enumerate(rewards):
# total_reward += reward
# rewards[i] = total_reward
# rewards = [np.abs(total_reward - reward) for reward in rewards]
rewards = np.array(rewards)
returns = np.cumsum(rewards[::-1])[::-1]
# partial_returns = np.cumsum(rewards)
return environment_states, states, returns #, partial_returns
def backward_train(train, model, memory, trajectory, seed, env_name, stop_coeff, smoothing_num,
num_splits, max_num_episodes, batch_size, discount_factor, learn_rate,
get_epsilon, use_target_qnet=None, render=False, testing_seed=None, verbose=True):
optimizer = optim.Adam(model.parameters(), learn_rate)
if use_target_qnet is not None:
target_model = copy.deepcopy(model)
else:
target_model, target_optim = None, None
# Count the steps (do not reset at episode start, to compute epsilon)
global_steps = 0
episode_durations = []
returns_trends = []
disc_rewards = []
losses = []
trajectories = []
splits = utils.chunks(len(trajectory), num_splits)
environment_states, states, real_returns = repeat_trajectory(trajectory, seed, env_name)
if testing_seed is not None:
seed = testing_seed
random.seed(testing_seed)
torch.manual_seed(testing_seed)
np.random.seed(testing_seed)
for s, split in enumerate(splits):
if verbose:
print("Split", s)
victories = []
for i in range(max_num_episodes):
starting_state_idx = np.random.choice(split)
# print("\t{}".format(starting_state_idx))
env = copy.deepcopy(environment_states[starting_state_idx])
exp_seed = int(seed + 1000 * s + 7 * i)
env.seed(exp_seed)
state = states[starting_state_idx]
starting_return = real_returns[0] - real_returns[starting_state_idx]
duration = 0
episode_return = starting_return
disc_reward = 0
current_trajectory = trajectory[:starting_state_idx]
if use_target_qnet is not None and i % 5 == 0:
target_model = copy.deepcopy(model)
env.render() if render and i % 1 == 0 else None
while True:
# if duration < len(split):
# epsilon = get_epsilon(i)
# else:
# epsilon = get_epsilon(10000)
epsilon = get_epsilon(i)
a = select_action(model, state, epsilon)
next_state, r, done, _ = env.step(a)
# env.render() if render and i % 1 == 0 else None
if (render and i % 1 == 0) or (i > 180 and np.mean(victories[-smoothing_num:]) < 0.001):
env.render()
duration += 1
episode_return += r
disc_reward += (discount_factor ** duration) * r
current_trajectory.append((state, a, r, next_state, done))
memory.push((state, a, r, next_state, done))
loss = train(model, memory, optimizer, batch_size, discount_factor,
target_model=target_model)
global_steps += 1
if done:
break
state = next_state
env.close()
# print("\t\teps = {}; return = {}; expected return = {}".format(epsilon, episode_return, real_returns[starting_state_idx]))
# print("\t{}: {}; {}/{}".format(i, starting_state_idx, episode_return, real_returns[starting_state_idx]))
# print(epsilon)
if verbose:
print("\t{}: {}; {}/{}".format(i, starting_state_idx, episode_return, real_returns[0]))
# TODO: save it in a dictionary (for example, based on reward or duration) or do it in post process
# saving the seed(i) is necessary for replaying the episode later
trajectories.append((current_trajectory, exp_seed))
losses.append(loss)
episode_durations.append(duration)
returns_trends.append(episode_return)
dr = episode_return - real_returns[0]
victory = dr >= - 0.1*abs(real_returns[starting_state_idx])
victories.append(int(victory))
# TODO - multiply it by gamma**len(trajectory till the starting point)
disc_rewards.append(disc_reward)
num_recent_victories = np.sum(victories[-smoothing_num:])
if verbose:
print("\t\tNumber of Recent Victories ", num_recent_victories)
# if len(victories) > smoothing_num and num_recent_victories >= stop_coeff:
if num_recent_victories >= stop_coeff:
break
if verbose:
print("Split", s, "finished in", i+1, "episodes out of ", max_num_episodes, ";", len(episode_durations), " episodes so far")
return model, episode_durations, returns_trends, disc_rewards, losses, trajectories
|
# Copyright (C) 2011-2012 Andy Balaam and The Pepper Developers
# Released under the MIT License. See the file COPYING.txt for details.
from itertools import imap
from buildstep import BuildStep
from parse.pepperstatements import PepperStatements
# We would like to do these imports inside _parse_tree_string_to_values,
# but Python doesn't like us to do that.
from libpepper.functionvalues import *
from libpepper.languagevalues import *
from libpepper.values import *
def _parse_tree_string_to_values( string ):
# The parse tree is actually a valid Python file
return eval( string )
def _remove_comments( ln ):
i = ln.find( "#" )
if i != -1:
return ln[:i]
else:
return ln
def _non_empty_line( ln ):
return ( ln.strip() != "" )
class ParseBuildStep( BuildStep ):
def read_from_file( self, fl ):
return ( _parse_tree_string_to_values( ln ) for ln in
filter( _non_empty_line,
imap( _remove_comments, fl ) ) )
def process( self, val ):
return PepperStatements( val )
def write_to_file( self, val, fl ):
for v in val:
fl.write( repr( v ) )
fl.write( "\n" )
|
"""
Instapush notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.instapush/
"""
import json
import logging
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (
ATTR_TITLE, ATTR_TITLE_DEFAULT, PLATFORM_SCHEMA, BaseNotificationService)
from homeassistant.const import CONF_API_KEY
_LOGGER = logging.getLogger(__name__)
_RESOURCE = 'https://api.instapush.im/v1/'
CONF_APP_SECRET = 'app_secret'
CONF_EVENT = 'event'
CONF_TRACKER = 'tracker'
DEFAULT_TIMEOUT = 10
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_APP_SECRET): cv.string,
vol.Required(CONF_EVENT): cv.string,
vol.Required(CONF_TRACKER): cv.string,
})
def get_service(hass, config):
"""Get the Instapush notification service."""
headers = {'x-instapush-appid': config[CONF_API_KEY],
'x-instapush-appsecret': config[CONF_APP_SECRET]}
try:
response = requests.get(
'{}{}'.format(_RESOURCE, 'events/list'), headers=headers,
timeout=DEFAULT_TIMEOUT).json()
except ValueError:
_LOGGER.error('Unexpected answer from Instapush API.')
return None
if 'error' in response:
_LOGGER.error(response['msg'])
return None
if len([app for app in response
if app['title'] == config[CONF_EVENT]]) == 0:
_LOGGER.error("No app match your given value. "
"Please create an app at https://instapush.im")
return None
return InstapushNotificationService(
config.get(CONF_API_KEY), config.get(CONF_APP_SECRET),
config.get(CONF_EVENT), config.get(CONF_TRACKER))
class InstapushNotificationService(BaseNotificationService):
"""Implementation of the notification service for Instapush."""
def __init__(self, api_key, app_secret, event, tracker):
"""Initialize the service."""
self._api_key = api_key
self._app_secret = app_secret
self._event = event
self._tracker = tracker
self._headers = {
'x-instapush-appid': self._api_key,
'x-instapush-appsecret': self._app_secret,
'Content-Type': 'application/json'}
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = {
'event': self._event,
'trackers': {self._tracker: title + ' : ' + message}
}
response = requests.post(
'{}{}'.format(_RESOURCE, 'post'), data=json.dumps(data),
headers=self._headers, timeout=DEFAULT_TIMEOUT)
if response.json()['status'] == 401:
_LOGGER.error(response.json()['msg'],
"Please check your Instapush settings")
|
{
"targets": [
{
"target_name": "tree_sitter_markdown_binding",
"include_dirs": [
"<!(node -e \"require('nan')\")",
"src"
],
"sources": [
"src/parser.c",
"bindings/node/binding.cc",
"src/scanner.cc"
],
"cflags_c": [
"-std=c99",
"-fexceptions"
],
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions' ],
"defines": [
"TREE_SITTER_MARKDOWN_AVOID_CRASH"
],
"conditions": [
["OS=='mac'", { "xcode_settings": { "GCC_ENABLE_CPP_EXCEPTIONS": "YES" } }]
]
}
]
}
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
import googlemaps
import base64
from cartodb_services.google.exceptions import InvalidGoogleCredentials
class GoogleMapsClientFactory():
clients = {}
@classmethod
def get(cls, client_id, client_secret, channel=None):
cache_key = "{}:{}:{}".format(client_id, client_secret, channel)
client = cls.clients.get(cache_key)
if not client:
if client_id:
cls.assert_valid_crendentials(client_secret)
client = googlemaps.Client(
client_id=client_id,
client_secret=client_secret,
channel=channel)
else:
client = googlemaps.Client(key=client_secret)
cls.clients[cache_key] = client
return client
@classmethod
def assert_valid_crendentials(cls, client_secret):
if not cls.valid_credentials(client_secret):
raise InvalidGoogleCredentials
@staticmethod
def valid_credentials(client_secret):
try:
# Only fails if the string dont have a correct padding for b64
# but this way we could provide a more clear error than
# TypeError: Incorrect padding
b64_secret = client_secret.replace('-', '+').replace('_', '/')
base64.b64decode(b64_secret)
return True
except TypeError:
return False
|
"""Bosch regular sensor."""
from ..const import SIGNAL_SENSOR_UPDATE_BOSCH
from .base import BoschBaseSensor
class BoschSensor(BoschBaseSensor):
"""Representation of a Bosch sensor."""
signal = SIGNAL_SENSOR_UPDATE_BOSCH
_domain_name = "Sensors"
@property
def device_name(self):
return "Bosch sensors"
|
from __future__ import absolute_import
import deployer.logger
from celery.signals import setup_logging
__version__ = '0.5.2'
__author__ = 'sukrit'
deployer.logger.init_logging()
setup_logging.connect(deployer.logger.init_celery_logging)
|
from . import TestStdoutReader
import pyprogress
class TestCounter(TestStdoutReader):
def tearDown(self):
self.c.stop()
self.c.join()
TestStdoutReader.tearDown(self)
def test_counter_no_total(self):
output = ['0', '\b1', '\b2', '\b3', '\b4', '\b5']
self.c = pyprogress.Counter()
self.c.start()
assert self.stdout.getvalue().strip() == output[0]
self.stdout.truncate(0)
for x in range(1, 6):
self.c.inc()
self.c.write() # force write output
assert self.stdout.getvalue().strip('\x00').strip() == output[x]
self.stdout.truncate(0)
def test_counter_with_total(self):
output = ['0/5', '\b\b\b1/5', '\b\b\b2/5', '\b\b\b3/5', '\b\b\b4/5', '\b\b\b5/5']
self.c = pyprogress.Counter(total=5)
self.c.start()
assert self.stdout.getvalue().strip() == output[0]
self.stdout.truncate(0)
for x in range(1, 6):
self.c.inc()
self.c.write() # force write output
assert self.stdout.getvalue().strip('\x00').strip() == output[x]
self.stdout.truncate(0)
def test_counter_initial(self):
output = ['2', '\b3', '\b4', '\b5']
self.c = pyprogress.Counter(initial=2)
self.c.start()
assert self.stdout.getvalue().strip() == output[0]
self.stdout.truncate(0)
for x in range(1, 4):
self.c.inc()
self.c.write() # force write output
assert self.stdout.getvalue().strip('\x00').strip() == output[x]
self.stdout.truncate(0)
def test_counter_inc_2(self):
output = ['0/10',
'\b\b\b\b2/10',
'\b\b\b\b4/10',
'\b\b\b\b6/10',
'\b\b\b\b8/10',
'\b\b\b\b10/10']
self.c = pyprogress.Counter(total=10)
self.c.start()
assert self.stdout.getvalue().strip() == output[0]
self.stdout.truncate(0)
for x in range(1, 6):
self.c.inc(2)
self.c.write()
assert self.stdout.getvalue().strip('\x00').strip() == output[x]
self.stdout.truncate(0)
class TestCounter(TestStdoutReader):
def test_counter_no_total(self):
output = ['0', '\b1', '\b2', '\b3', '\b4', '\b5']
with pyprogress.Counter() as c:
assert self.stdout.getvalue().strip() == output[0]
self.stdout.truncate(0)
for x in range(1, 6):
c.inc()
c.write() # force write output
assert self.stdout.getvalue().strip('\x00').strip() == output[x]
self.stdout.truncate(0)
def test_counter_with_total(self):
output = ['0/5', '\b\b\b1/5', '\b\b\b2/5', '\b\b\b3/5', '\b\b\b4/5', '\b\b\b5/5']
with pyprogress.Counter(total=5) as c:
assert self.stdout.getvalue().strip() == output[0]
self.stdout.truncate(0)
for x in range(1, 6):
c.inc()
c.write() # force write output
assert self.stdout.getvalue().strip('\x00').strip() == output[x]
self.stdout.truncate(0)
def test_counter_initial(self):
output = ['2', '\b3', '\b4', '\b5']
with pyprogress.Counter(initial=2) as c:
assert self.stdout.getvalue().strip() == output[0]
self.stdout.truncate(0)
for x in range(1, 4):
c.inc()
c.write() # force write output
assert self.stdout.getvalue().strip('\x00').strip() == output[x]
self.stdout.truncate(0)
def test_counter_inc_2(self):
output = ['0/10',
'\b\b\b\b2/10',
'\b\b\b\b4/10',
'\b\b\b\b6/10',
'\b\b\b\b8/10',
'\b\b\b\b10/10']
with pyprogress.Counter(total=10) as c:
assert self.stdout.getvalue().strip() == output[0]
self.stdout.truncate(0)
for x in range(1, 6):
c.inc(2)
c.write()
assert self.stdout.getvalue().strip('\x00').strip() == output[x]
self.stdout.truncate(0)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/1/17 9:17 AM
# @Author : Insomnia
# @Desc : 约瑟夫环问题, n个人围成环隔m个击毙, 最后剩下
# @File : LastInCircle.py
# @Software: PyCharm
class Solution:
def lastInCircle(self, n, m):
if n < 1 or m < 1:
return -1
last = 0
for i in range(2, n + 1):
last = (last + m) % i
return last
if __name__ == '__main__':
print("5 people 3 gap")
sol = Solution()
print(sol.lastInCircle(5, 3))
print(sol.lastInCircle(12, 3))
|
# Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from uai.arch_conf.base_conf import ArchJsonConf
from uai.arch_conf.base_conf import ArchJsonConfLoader
class XGBoostJsonConf(ArchJsonConf):
""" XGBoost Json Config class
"""
def __init__(self, parser):
""" XGBoost Json Config Class, Use the super to init
"""
super(XGBoostJsonConf, self).__init__('xgboost', parser)
def _add_args(self):
super(XGBoostJsonConf, self)._add_args()
def load_params(self):
super(XGBoostJsonConf, self).load_params()
def _load_conf_params(self):
""" Config the conf_params from the CMD
"""
super(XGBoostJsonConf, self)._load_conf_params()
self.conf_params['http_server'] = {
'exec': {
'main_file': self.params['main_file'],
'main_class': self.params['main_class']
},
'xgboost': {
'model_name': self.params['model_name']
}
}
def _load_args(self):
super(XGBoostJsonConf, self)._load_args()
def get_conf_params(self):
self._load_args()
return self.conf_params
def get_arg_params(self):
return self.params
class XGBoostJsonConfLoader(ArchJsonConfLoader):
def __init__(self, conf):
super(XGBoostJsonConfLoader, self).__init__(conf)
def _load(self):
super(XGBoostJsonConfLoader, self)._load()
self.model_name = self.server_conf['xgboost']['model_name']
def get_model_name(self):
return self.model_name
|
"""
Copyright 2018 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
Class to make/read/write k-fold x-validation lists
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
from six import string_types
import os.path as path
import logging
from collections import OrderedDict
from copy import deepcopy
import numpy as np
from .list_utils import *
class FoldList(object):
"""Class to contain folds for cross-validation.
Attributes:
key: String List with the names of the dataset/recording/i-vector
folds: Int numpy array with the number of the fold of each key.
mask: Boolean numpy array to mask elements in the key
"""
def __init__(self, fold, key, mask=None):
self.fold = fold
self.key = key
self.mask = mask
self.validate()
def validate(self):
"""Validates the class attributes attributes
"""
self.key = list2ndarray(self.key)
self.fold = list2ndarray(self.fold)
if self.fold.dtype != int:
self.fold = self.fold.astype(int)
assert len(self.key) == len(self.fold)
assert len(np.unique(self.fold[self.fold>=0])) == np.max(self.fold)+1
if self.mask is not None:
assert len(self.mask) == len(self.fold)
def copy(self):
"""Returns a copy of the object.
"""
return deepcopy(self)
def __len__(self):
"""Returns number of folds.
"""
return self.num_folds()
@property
def num_folds(self):
"""Returns number of folds.
"""
return np.max(self.fold)+1
def align_with_key(self, key, raise_missing=True):
"""Aligns the fold list with a given key
Args:
key: Key to align the fold and key variables of the object.
raise_missing: if True, raises exception when an element of key is
not found in the object.
"""
f, idx = ismember(key, self.key)
if np.all(f):
self.key = self.key[idx]
self.fold = self.fold[idx]
if self.mask is not None:
self.mask = self.mask[idx]
else:
for i in (f==0).nonzero()[0]:
logging.warning('segment %s not found' % key[i])
if raise_missing:
raise Exception('some scores were not computed')
def get_fold_idx(self, fold):
"""Returns a fold boolean indices
Args:
fold: Fold number to return
Returns:
train_idx: Indices of the elements used for training
test_idx: Indices of the elements used for test
"""
test_idx = self.fold == fold
train_idx = np.logical_not(test_idx)
if self.mask is not None:
train_idx = np.logical_and(train_idx, self.mask)
test_idx = np.logical_and(test_idx, self.mask)
return train_idx, test_idx
def get_fold(self, fold):
"""Returns a fold keys
Args:
fold: Fold number to return
Returns:
train_key: Keys of the elements used for training
test_key: Keys of the elements used for test
"""
train_idx, test_idx = self.get_fold_idx(fold)
return self.key[train_idx], self.key[test_idx]
def __getitem__(self, fold):
"""Returns a fold keys
Args:
fold: Fold number to return
Returns:
train_key: Keys of the elements used for training
test_key: Keys of the elements used for test
"""
return self.get_fold(fold)
def save(self, file_path, sep=' '):
"""Saves object to txt file
Args:
file_path: File path
sep: Separator between fold field and key field
"""
with open(file_path, 'w') as f:
for f,k in zip(self.fold, self.key):
f.write('%s%s%s\n' % (f,sep,k))
@classmethod
def load(cls, file_path, sep=' '):
"""Loads object from txt file
Args:
file_path: File path
sep: Separator between fold field and key field
Returns:
FoldList object
"""
with open(file_path, 'r') as f:
fields = [line.rstrip().split(sep=sep, maxsplit=1) for line in f]
fold = np.asarray([int(f[0]) for f in fields], dtype=int)
key = np.asarray([f[1] for f in fields])
return cls(fold, key)
@classmethod
def create(cls, segment_key, num_folds, balance_by_key=None, group_by_key=None, mask=None, shuffle=False, seed=1024):
"""Creates a FoldList object.
Args:
segment_key: String List of recordings/speech segments
num_folds: Number of folds that we want to obtain.
balance_by_key: String List of keys indicating a property of the segment to make all folds to
have the same number of elements of each class. E.g. for language ID this would be the language
of the recording.
group_by_key: String List of keys indicating a property of the segment to make all the elements
of the same class to be in the same fold. E.g. for language ID this would be the speaker ID
of the recording.
mask: Boolean numpy array to mask elements of segment_key out.
shuffle: Shuffles the segment list so that folds are not grouped in alphabetical order.
seed : Seed for shuffling
Returns:
FoldList object.
"""
if shuffle:
rng = np.random.RandomState(seed=seed)
if group_by_key is None:
group_by_key = segment_key
if balance_by_key is None:
balance_by_key = np.zeros((len(segment_key),), dtype=int)
else:
_, balance_by_key = np.unique(balance_by_key, return_inverse=True)
if mask is not None:
balance_by_key[mask==False] = -1
folds = - np.ones((len(segment_key),), dtype=int)
num_classes = np.max(balance_by_key) + 1
for i in xrange(num_classes):
idx_i = (balance_by_key == i).nonzero()[0]
group_key_i = group_by_key[idx_i]
_, group_key_i = np.unique(group_key_i, return_inverse=True)
num_groups_i = np.max(group_key_i) + 1
delta = float(num_groups_i)/num_folds
if shuffle:
shuffle_idx = np.arange(num_groups_i)
rng.shuffle(shuffle_idx)
group_key_tmp = np.zeros_like(group_key_i)
for j in xrange(num_groups_i):
group_key_tmp[group_key_i==j] = shuffle_idx[j]
group_key_i = group_key_tmp
for j in xrange(num_folds):
k1 = int(np.round(j*delta))
k2 = int(np.round((j+1)*delta))
idx_ij = np.logical_and(group_key_i>=k1, group_key_i<k2)
idx_fold = idx_i[idx_ij]
folds[idx_fold] = j
if mask is None:
assert np.all(folds>=0)
else:
assert np.all(folds[mask]>=0)
return cls(folds, segment_key, mask)
|
try:
import ConfigParser as configparser
except Exception:
import configparser
def getvalues(filepath, section, keys):
parser = configparser.ConfigParser()
if not parser.read(filepath):
raise ValueError('read() failed -- "{}"'.format(filepath))
values = []
for key in keys:
values.append(parser.get(section, key))
return values
|
import re
import time
import logging
import operator
import pandas as pd
import numpy as np
from datetime import datetime
from datetime import timedelta
from collections import Counter
from pycoingecko import CoinGeckoAPI
from expiringdict import ExpiringDict
start = datetime.now()
logger = logging.getLogger()
def get_coin_update():
"""Input: N/A
Returns: List of names and list of tickers of top 50 cryptoccurrencies from Coingecko API ordered by market cap.
Removes popular stable coins from DataFrame."""
try:
cg = CoinGeckoAPI()
coinupdate = cg.get_coins_markets(vs_currency="usd") #get all coins listed (default ordered by Mcap)
coinupdate = coinupdate[:50]
toptickers = []
topnames = []
counter = 0
for row in coinupdate: #get tickers + names
topnames.append(coinupdate[counter]["name"])
toptickers.append(coinupdate[counter]["symbol"])
counter += 1
disregardnames = ["Tether","USD Coin","Binance USD","cDAI","cUSDC","Dai"]
disregardtickers = ["usdt","usdc","busd","dai","cusdc","cdai"]
toptickers = [x.lower() for x in toptickers if x not in disregardtickers] #remove stable-coins from list and force lowercase
mystring = " "
toptickers = [mystring + s + mystring for s in toptickers] #add space-string at start of ticker string, to avoid false positives (example: Huobi Coin (HT) is a string present in every link (HTtps://..))
topnames = [x.lower() for x in topnames if x not in disregardnames] #remove stable-coins from list
return toptickers, topnames
except Exception as e:
logger.error(f"Exception in get_coin_update(): {e}", exc_info=True)
def init_dicts():
"""Initializes ExpiringDict structures for temp storage of message data
max_len = Maximum number of keys in Dict, max_age_seconds= Maximum age of keys in Dict
If value > max: key is deleted from dict """
try:
#Optimal TTL would be 30720*60 (8 days) or 15360*60 (4 days), but that exceeds the hardware limitations
#With TTL 60min, we get ~13% attribution rate instead of ~34%
redd = ExpiringDict(max_len=100000,max_age_seconds=((60*60)))
twitd = ExpiringDict(max_len=100000,max_age_seconds=((120*60))) #Optimal TTL is 120min
teled = ExpiringDict(max_len=100000,max_age_seconds=((120*60))) #Optimal TTL is 120min
return redd,twitd,teled
except Exception as e:
logger.error(f"Exception in init_dicts(): {e}", exc_info=True)
def coin_to_text_assigner(data,belongs_to,redd,twitd,teled,toptickers,topnames):
"""Input: "data" - text & metadata (JSON), "belongs_to" - data source identifier,
"redd","twitd","teled" - temporary msg storage in form dicts, "toptickers","topnames" - top 50 cryptocurrencies by Mcap
Returns: Assigned coin to current message as string or returns NaN."""
try:
#Extract relevant information from JSON
""" for Twitter API & Reddit API integration
if "Twitter" in belongs_to:
text = data["text"]
msgid = float(data["id"]) #float because parent ID may be imported as a float due to existence of NaNs
parent_id = data["reply_to_tweet_id"]
timestamp = data["created_at"]
if "Reddit" in belongs_to:
text = data["text"]#data["text"]
try:
text = data["title"] + " " + data["text"] #if message is original post, get also the title
except KeyError: #continue if not
pass
msgid = data["id"]
try:
parent_id = data["submission_id"]
except KeyError:
parent_id = np.nan #if reddit message is original post, it has no parent_id field
timestamp = data["created_at"]
"""
if "Telegram" in belongs_to:
text = data["text"]
msgid = data["id"] #float because parent ID may be imported as a float due to existence of NaNs
parent_id = data["reply_to_msg_id"]
timestamp = data["date"]
def assigner(l,text):
"""Input: list of target items, text
Returns: list of items found in text that are part of list of target items (toptickers or topnames).
-> Disregards strings /BTC,/btc to exclude irrelevant price updates (price updates in the form of ALT/BTC)."""
try:
itemlist = []
black_list = ["/BTC","/btc","etc"," ht","amp"," ht "," etc ","tron","dot","algo","link"," link ", " dot ", " maker ", " tron ", " amp "]#cant handle ETC,HT string - too many unrelated occurences
for item in l:
if item in str(text):
if str(item) not in black_list:
itemlist.append(item)
return itemlist
except Exception as e:
logger.error(f"Exception in assigner(): {e}", exc_info=True)
def to_names(toptickers,topnames, assignedtickers, assignednames):
"""Input: toptickers,topnames - tickers/names of top 50 cryptoccurrencies,
assingedtickers - top50 tickers found in message, assignednames - top50 names found in message
Translates top50 tickers into full names.
Returns: assignednames """
try:
topcryptodict = dict(zip(toptickers,topnames))
namesfromtickers = []
for ticker in assignedtickers:
if "$" in ticker:
ticker = ticker.replace("$", "")
ticker = " "+ticker+" "
if "#" in ticker:
ticker = ticker.replace("#","")
ticker = " "+ticker+" "
namesfromtickers.append(topcryptodict[ticker]) #if tickers are present, get full names of tickers as a list
assignednames += namesfromtickers #add names from tickers to assignednames list
return assignednames
except Exception as e:
logger.error(f"Exception in assigner(): {e}", exc_info=True)
def get_messagecoin_recursive(message_id,d):
"""Input: message_id - ID of a message, d - temporary storage dictionary
Implements get_messagecoin as a recursive function
Returns:coin assigned to message or NaN"""
#If the message has no parent, we get Exception and return messagecoin = nan
#If we find a parent and the parent has a coin, we return messagecoin
#If we find a parent and the parent has no coin, we call parent_coin = get_messagecoin_recursive(parent_id,teled)
try:
try:
result = d[message_id] #get message-info for current id as dict
messagecoin = result#["assigned_coin"]
if type(messagecoin) != float: #if var is not nan
return messagecoin #if a coin has been attributed, return
else:
message_id = result["parent_id"] #get parent id of current message
get_messagecoin_recursive(message_id,d) #if no coin found, look up parent message
except (KeyError,IndexError,ValueError): #if ID not present, return NaN
messagecoin = np.nan
return messagecoin
except Exception as e:
logger.error(f"Exception in get_messagecoin_recursive(): {e}", exc_info=True)
def replaceUsernameTwitter(text):
""" Replaces "@user" with "username_placeholder" """
text = re.sub('@[^\s]+','username_placeholder',text)
return text
def replaceUsernameReddit(text):
""" Replaces "u/user" with "username_placeholder" """
text = re.sub('u\/[^\s]+','username_placeholder',text)
return text
###process flow
toptickersallcaps = [x.upper() for x in toptickers]
tmp = []
for ticker in toptickersallcaps:
ticker = ticker.strip() #remove added whitespaces -> explanation on line 223-224
tmp.append(ticker)
toptickersallcaps = tmp
assignedtickersallcaps = assigner(toptickersallcaps,text) #Get all target tickers that are in CAPS (we assume that tickers in caps always refer to the target and not something else)
if assignedtickersallcaps: #if allcap tickers are found, replace them with placeholders to avoid redundant counts later on
for ticker in assignedtickersallcaps:
text = text.replace(ticker, "allcaps_ticker_placeholder")
assignedtickersallcaps = [" " + s + " " for s in assignedtickersallcaps]
if type(text) == str:
text = text.lower() #transform capital letters to lowercase letters
""" for Twitter API & Reddit API integration
if "Twitter" in belongs_to:
text = replaceUsernameTwitter(text) #replace twitter @USERNAME with placeholder
if "Reddit" in belongs_to:
text = replaceUsernameReddit(text) #replace Reddit /U/USERNAME with placeholder
#List of strings posted by reddit-bots
bot_messages_list = ["i am a bot, and this action was performed automatically","^^i ^^am ^^a ^^bot","i am just a simple bot","remindmebot","crypto_bot","this summary is auto generated by a bot","lntipbot","i'm a bot"]
if any(ext in text for ext in bot_messages_list):
text = np.nan #if text posted by bot, remove text
"""
parent_coin = np.nan
pmessage = np.nan
if "nan" not in str(parent_id): #get coin assigned to parent message if parent_id is not "nan"
if "Telegram" in belongs_to:
parent_coin = get_messagecoin_recursive(parent_id,teled)
if type(parent_coin) != float:
pmessage = parent_coin["text"]
parent_coin = parent_coin["assigned_coin"]
"""
for Twitter API & Reddit API integration
if "Twitter" in belongs_to:
parent_coin = get_messagecoin_recursive(parent_id,twitd)
if type(parent_coin) != float:
pmessage = parent_coin["text"]
parent_coin = parent_coin["assigned_coin"]
if "Reddit" in belongs_to:
parent_coin = get_messagecoin_recursive(parent_id,redd)
if type(parent_coin) != float:
pmessage = parent_coin["text"]
parent_coin = parent_coin["assigned_coin"]
"""
assignednames = assigner(topnames, text) #search target names in message
if assignednames:
#if full names are found, replace them with placeholders to avoid redundant counts (e.g. name="solana", ticker="sol")
for name in assignednames:
text = text.replace(name, "target_name_placeholder")
assignedtickers = assigner(toptickers,text) #search target tickers in message
tmp = []
for ticker in toptickers:
ticker = ticker.strip() #remove added whitespaces
tmp.append(ticker)
#the reason for not simply removing $ and # chars in text is that tickers are only attributed if there is
#a space before and after (" "BTC" ") to avoid FPs (line 42) - we will miss many cash/hashtags if just remove the $,# chars
toptickercashtags = [ "$"+ s for s in tmp] #create ticker cashtags ("$BTC")
toptickerhashtags = [ "#"+ s for s in tmp] #create ticker hashtags ("#BTC")
toptickerctht = toptickercashtags + toptickerhashtags
assignedctht = assigner(toptickerctht,text) #search target ticker cashtags and hashtags
assignedtickers = assignedtickers + assignedctht + assignedtickersallcaps
assignedtickers = [x.lower() for x in assignedtickers]
if assignedtickers:
assignednames = to_names(toptickers,topnames, assignedtickers, assignednames)
if len(assignednames) == 1:
assigned_coin = assignednames[0] #if only 1 target coin mentioned, set it as assigned coin
if len(assignednames) > 1: #if multiple target names present in text:
occurences = Counter(assignednames) #returns a dict with occurences of items in names list
highest = max(occurences.values()) #get highest value in Dict
maxoccur = [k for k, v in occurences.items() if v == highest] #Count keys that have highest value
if len(maxoccur) == 1: #If exactly one key has the highest value, set key as assigned coin
assigned_coin = maxoccur[0]
else:
if type(parent_coin) != float:
assigned_coin = parent_coin #set assigned coin of parent message as current assigned coin (if applicable)
else:
assigned_coin = np.nan
if len(assignednames) < 1: #if no target names present in text
if type(parent_coin) != float:
assigned_coin = parent_coin #set assigned coin of parent message as current assigned coin (if applicable)
else:
assigned_coin = np.nan
updatelist = [msgid,timestamp,parent_id,assigned_coin,text]
updatedict = {"msg_id":updatelist[0],"timestamp":updatelist[1],"parent_id":updatelist[2],"assigned_coin":updatelist[3],"text":updatelist[4]}
if type(assigned_coin) == str:
assigned_coin = assigned_coin.lower() #set assigned coin to lowercase for uniform representation
"""
for Twitter API & Reddit API integration
if "Twitter" in belongs_to:
twitd[msgid] = updatedict
if "Reddit" in belongs_to:
redd[msgid] = updatedict
"""
if "Telegram" in belongs_to:
teled[msgid] = updatedict
if "None" in str(assigned_coin):
assigned_coin = np.nan
return assigned_coin
except Exception as e:
logger.error(f"Exception in coin_to_text_assigner(): {e}", exc_info=True)
|
import os,sys
import json
import facebook
if __name__ == '__main__':
token = "EAANXc609TdkBAO3HmSoswBZCTIbmZBMOcdzvLa8c97fdDZBzCjZCL2vAhJYPhyKt5sURY5VlozyHOZABZB6lxrPU5Bb8jM0PLFHh0xCj376nqu6EQZA6PoGbnI1cKyGYiOtrNNyLUebm55GGjNGI5VL6Tj1R9IstsIUSQHBbW7WVP7ZBUbZAn4occ"
graph = facebook.GraphAPI(access_token=token, version = 3.0)
#profile = graph.get_object('974146599436745_974147879436617',fields='get_connections')
likes = graph.get_object(id='974146599436745_974530109398394', fields='shares,likes.summary(true),comments.summary(true)')
##GENERAL INFO##
about = graph.get_object(id='974146599436745', fields='about')
country_page_likes = graph.get_object(id='974146599436745', fields='country_page_likes')
fan_count = graph.get_object(id='974146599436745', fields='fan_count')
location = graph.get_object(id='974146599436745', fields='location')
new_like_count = graph.get_object(id='974146599436745', fields='new_like_count')
page_token = graph.get_object(id='974146599436745', fields='page_token')
notifications = graph.get_object(id='974146599436745', fields='notifications')
posts = graph.get_object(id='974146599436745', fields='posts')
##PAGE INSIGHTS
page_content_activity_by_action_type_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_content_activity_by_action_type_unique).period(week)')
page_content_activity_by_age_gender_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_content_activity_by_age_gender_unique).period(week)')
page_content_activity_by_country_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_content_activity_by_country_unique).period(week)')
page_content_activity_by_locale_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_content_activity_by_locale_unique).period(week)')
page_content_activity = graph.get_object(id='974146599436745', fields='insights.metric(page_content_activity).period(week)')
post_activity_unique = graph.get_object(id='974146599436745_974147879436617', fields='insights.metric(post_activity_unique)')
page_impressions = graph.get_object(id='974146599436745', fields='insights.metric(page_impressions).period(day)')
page_impressions_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_impressions_unique).period(week)')
page_impressions_by_story_type =graph.get_object(id='974146599436745', fields='insights.metric(page_impressions_by_story_type).period(day)')
page_impressions_frequency_distribution = graph.get_object(id='974146599436745', fields='insights.metric(page_impressions_frequency_distribution).period(week)')
page_impressions_by_age_gender_unique=graph.get_object(id='974146599436745', fields='insights.metric(page_impressions_by_age_gender_unique).period(week)')
page_engaged_users = graph.get_object(id='974146599436745', fields='insights.metric(page_engaged_users).period(week)')
page_post_engagements = graph.get_object(id='974146599436745', fields='insights.metric(page_post_engagements).period(week)')
page_consumptions = graph.get_object(id='974146599436745', fields='insights.metric(page_post_engagements).period(week)')
page_consumptions_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_consumptions_unique).period(week)')
##
page_negative_feedback = graph.get_object(id='974146599436745', fields='insights.metric(page_negative_feedback).period(week)')
page_positive_feedback_by_type=graph.get_object(id='974146599436745', fields='insights.metric(page_positive_feedback_by_type).period(week)')
page_fans_online = graph.get_object(id='974146599436745', fields='insights.metric(page_fans_online)')
page_fans_online_per_day = graph.get_object(id='974146599436745', fields='insights.metric(page_fans_online_per_day)')
page_fan_adds_by_paid_non_paid_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_fan_adds_by_paid_non_paid_unique)')
page_actions_post_reactions_like_total = graph.get_object(id='974146599436745', fields='insights.metric(page_actions_post_reactions_like_total)')
page_total_actions = graph.get_object(id='974146599436745', fields='insights.metric(page_total_actions)')
##DEMOGRAPHICS
page_fans_locale = graph.get_object(id='974146599436745', fields='insights.metric(page_fans_locale)')
page_fans_city = graph.get_object(id='974146599436745', fields='insights.metric(page_fans_city)')
page_fans_country = graph.get_object(id='974146599436745', fields='insights.metric(page_fans_country)')
page_fans_gender_age = graph.get_object(id='974146599436745', fields='insights.metric(page_fans_gender_age)')
page_fan_adds = graph.get_object(id='974146599436745', fields='insights.metric(page_fan_adds)')
page_fan_removes = graph.get_object(id='974146599436745', fields='insights.metric(page_fan_removes)')
page_fans_by_unlike_source_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_fans_by_unlike_source_unique)')
##
page_tab_views_login_top_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_tab_views_login_top_unique)')
page_views_total = graph.get_object(id='974146599436745', fields='insights.metric(page_views_total)')
page_views_external_referrals = graph.get_object(id='974146599436745', fields='insights.metric(page_views_external_referrals)')
page_views_by_profile_tab_logged_in_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_views_by_profile_tab_logged_in_unique)')
page_views_by_internal_referer_logged_in_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_views_by_internal_referer_logged_in_unique)')
#page_views_by_referers_logged_in_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_views_by_referers_logged_in_unique)')
##PAGE POST
page_posts_impressions = graph.get_object(id='974146599436745', fields='insights.metric(page_posts_impressions).period(week)')
page_posts_impressions_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_posts_impressions_unique)')
page_posts_impressions_frequency_distribution = graph.get_object(id='974146599436745', fields='insights.metric(page_posts_impressions_frequency_distribution)')
##POSTS
post_impressions = graph.get_object(id='974146599436745_974147879436617', fields='insights.metric(post_impressions)')
post_impressions_fan = graph.get_object(id='974146599436745_974147879436617', fields='insights.metric(post_impressions_fan)')
post_reactions_by_type_total = graph.get_object(id='974146599436745_974147879436617', fields='insights.metric(post_reactions_by_type_total)')
page_content_activity_by_city_unique =graph.get_object(id='974146599436745', fields='insights.metric(page_content_activity_by_city_unique)')
page_impressions_by_country_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_impressions_by_country_unique)')
page_impressions_by_city_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_impressions_by_city_unique)')
page_fans_by_like_source = graph.get_object(id='974146599436745', fields='insights.metric(page_fans_by_like_source)')
page_content_activity_by_city_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_content_activity_by_city_unique)')
page_impressions_by_city_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_impressions_by_city_unique)')
page_fans_city= graph.get_object(id='974146599436745', fields='insights.metric(page_fans_city)')
page_fans = graph.get_object(id='974146599436745', fields='insights.metric(page_fans)')
page_content_activity_by_action_type = graph.get_object(id='974146599436745', fields='insights.metric(page_content_activity_by_action_type)')
page_impressions_by_country_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_impressions_by_country_unique).date_preset(last_7d)')
#ad_campaign = graph.get_object(id='974146599436745', fields='ad_campaign')
page_content_activity_by_country_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_content_activity_by_country_unique).date_preset(last_7d)')
page_content_activity_by_city_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_content_activity_by_city_unique).date_preset(last_7d)')
#content_temp_end_time=page_content_activity_by_city_unique["insights"]["data"][0]['values'][0]['end_time']
#content_temp_value=page_content_activity_by_city_unique["insights"]["data"][0]['values'][0]['value']
page_impressions_by_locale_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_impressions_by_locale_unique).date_preset(last_7d)')
page_content_activity_by_locale_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_content_activity_by_locale_unique).date_preset(last_7d)')
##latest
page_posts_impressions_viral = graph.get_object(id='974146599436745', fields='insights.metric(page_posts_impressions_viral).period(week)')
post_clicks = graph.get_object(id='974146599436745_974147879436617', fields='insights.metric(post_clicks)')
post_reactions_by_type_total = graph.get_object(id='974146599436745_974147879436617', fields='insights.metric(post_reactions_by_type_total)')
page_fans_by_like_source = graph.get_object(id='974146599436745', fields='insights.metric(page_fans_by_like_source)')
page_positive_feedback_by_type = graph.get_object(id='974146599436745', fields='insights.metric(page_positive_feedback_by_type)')
page_consumptions = graph.get_object(id='974146599436745', fields='insights.metric(page_consumptions).period(week)')
page_actions_post_reactions_total= graph.get_object(id='974146599436745', fields='insights.metric(page_actions_post_reactions_total)')
page_fan_adds_by_paid_non_paid_unique = graph.get_object(id='974146599436745', fields='insights.metric(page_fan_adds_by_paid_non_paid_unique)')
post_reactions_by_type_total = graph.get_object(id='974146599436745_974147879436617', fields='insights.metric(post_reactions_by_type_total)')
print (json.dumps(page_fans, indent=4))
|
from .. import math
import numpy as np
def TM(t, C_a, K_trans, k_ep=None, v_e=None, v_p=None):
if k_ep is None:
k_ep = Conversion.k_ep(K_trans=K_trans, v_e=v_e)
tofts = K_trans*np.exp(-t*k_ep)
return math.NP.convolve(tofts, C_a, t)
def ETM(t, C_a, K_trans, k_ep=None, v_p=None, v_e=None):
if k_ep is None:
k_ep = Conversion.k_ep(K_trans=K_trans, v_e=v_e)
return TM(t, C_a, K_trans, k_ep) + C_a*v_p
def twoCXM(t, C_a, PS=None, F_p=None, v_e=None, v_p=None, K_trans=None, k_ep=None):
if 0 in [K_trans, PS] and not 0 in [F_p, v_e, v_p]:
two_compartment_model = F_p*np.exp(-t*F_p/v_p)
else:
if PS is None:
PS = Conversion.PS(F_p=F_p, K_trans=K_trans)
E = PS/float(PS + F_p)
e = v_e/float(v_e + v_p)
Ee = E*e
tau_pluss = (E - Ee + e)/(2.*E)*(1 + np.sqrt(1 - 4*(Ee*(1-E)*(1-e))/(E - Ee + e)**2 ) )
tau_minus = (E - Ee + e)/(2.*E)*(1 - np.sqrt(1 - 4*(Ee*(1-E)*(1-e))/(E - Ee + e)**2 ) )
F_pluss = F_p*(tau_pluss - 1.)/(tau_pluss - tau_minus)
F_minus = -F_p*(tau_minus - 1.)/(tau_pluss - tau_minus)
K_pluss = F_p/((v_p + v_e) * tau_minus)
K_minus = F_p/((v_p + v_e) * tau_pluss)
two_compartment_model = F_pluss*np.exp(-t*K_pluss) + F_minus*np.exp(-t*K_minus)
return math.NP.convolve(two_compartment_model, C_a, t)
class Conversion:
def __init__(self):
pass
def raiseError(self):
raise TypeError('Invalid argument')
@staticmethod
def k_ep(K_trans=None, v_e=None, PS=None, F_p=None):
'''
Needs one of the following combinations of paramaters:
[K_trans, v_e]
[PS, F_p, v_e]
'''
try:
return K_trans/v_e
except TypeError:
pass
try:
return PS*F_p/(PS + F_p)/v_e
except TypeError:
pass
# if not None in [K_trans, v_e]:
# return K_trans/v_e
# if not None in [v_e, PS, F_p]:
# return self.K_trans(PS=PS, F_p=F_p)/v_e
@staticmethod
def K_trans(PS=None, F_p=None, k_ep=None, v_e=None):
'''
Needs one of the following combinations of paramaters:
[PS, F_p]
[k_ep, v_e]
'''
try:
return PS*F_p/(PS + F_p)
except TypeError:
raise TypeError('Invalid argument')
try:
return k_ep*v_e
except TypeError:
raise TypeError('Invalid argument')
# if not None in [PS, F_p]:
# return PS*F_p/(PS + F_p)
# if not None in [k_ep, v_e]:
# return k_ep*v_e
@staticmethod
def v_e(K_trans=None, k_ep=None):
return K_trans/k_ep
@staticmethod
def PS(F_p, K_trans):
return F_p*K_trans/(F_p - K_trans)
@staticmethod
def F_p(K_trans, PS):
return K_trans*PS/(PS - K_trans)
|
from sqlalchemy_continuum_vendored import make_versioned
# make_versioned(user_cls=None, options={'strategy' : 'subquery'})
# Import the DB things.
from common.main_archive_db import WebPages
from common.main_archive_db import WebFiles
from common.main_archive_db import PluginStatus
from common.main_archive_db import NuReleaseItem
from common.main_archive_db import NuResolvedOutbound
from common.raw_archive_db import RawWebPages
from common.rss_func_db import Tags
from common.rss_func_db import Author
from common.rss_func_db import RssFeedPost
from common.rss_func_db import RssFeedUrlMapper
from common.rss_func_db import RssFeedEntry
from common.rss_func_db import QidianFeedPostMeta
from common.misc_db import KeyValueStore
from common.misc_db import get_from_db_key_value_store
from common.misc_db import set_in_db_key_value_store
from common.misc_db import get_from_version_check_table
from common.misc_db import set_in_version_check_table
from common.cookie_db import WebCookieDb
from common.db_engine import get_engine
from common.db_engine import get_db_session
from common.db_engine import delete_db_session
from common.db_engine import session_context
from common.db_constants import DB_REALTIME_PRIORITY
from common.db_constants import DB_HIGH_PRIORITY
from common.db_constants import DB_MED_PRIORITY
from common.db_constants import DB_LOW_PRIORITY
from common.db_constants import DB_IDLE_PRIORITY
from common.db_constants import DB_DEFAULT_DIST
from common.db_constants import MAX_DISTANCE
from common.db_base import Base
from common.redis import redis_session_context
import sqlalchemy as sa
sa.orm.configure_mappers()
# from sqlalchemy_searchable import make_searchable
# make_searchable()
|
from ...primitives import Int
from .. import List, range as wf_range
def test_range():
assert isinstance(wf_range(10), List[Int])
assert isinstance(wf_range(0, 10), List[Int])
assert isinstance(wf_range(0, 10, 2), List[Int])
|
#!/usr/bin/env python
"""
Pyomo Solver Performance Benchmarking Library
"""
import sys
from setuptools import setup, find_packages
def warn(s):
sys.stderr.write('*** WARNING *** {}\n'.format(s))
kwargs = dict(
name='pysperf',
packages=find_packages(),
install_requires=[],
extras_require={},
package_data={
# If any package contains *.template or *.json files, include them:
'': ['*.template', '*.json']
},
scripts=[],
author='Qi Chen',
author_email='qichen@andrew.cmu.edu',
maintainer='Qi Chen',
url="https://github.com/grossmann-group/pysperf",
license='BSD 2-clause',
description="Pyomo Solver Performance Benchmarking Library",
long_description=__doc__,
data_files=[],
keywords=["pyomo", "generalized disjunctive programming"],
classifiers=[
"Programming Language :: Python :: 3.6",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules"
],
entry_points="""\
[console_scripts]
pysperf=pysperf.__main__:main
"""
)
try:
setup(setup_requires=['setuptools_scm'], use_scm_version=True, **kwargs)
except (ImportError, LookupError):
default_version = '1.0.0'
warn('Cannot use .git version: package setuptools_scm not installed '
'or .git directory not present.')
print('Defaulting to version: {}'.format(default_version))
setup(**kwargs)
|
from pyecg.annotations import ECGAnnotationSample
def test_equality():
annotation1 = ECGAnnotationSample("N", 1)
annotation2 = ECGAnnotationSample("N", 1)
assert annotation1 == annotation2
def test_inequality_0():
annotation1 = ECGAnnotationSample("A", 1)
annotation2 = ECGAnnotationSample("N", 1)
assert annotation1 != annotation2
def test_inequality_1():
annotation1 = ECGAnnotationSample("N", 1)
annotation2 = ECGAnnotationSample("N", 2)
assert annotation1 != annotation2
def test_inequality_2():
annotation1 = ECGAnnotationSample("A", 1)
annotation2 = ECGAnnotationSample("N", 2)
assert annotation1 != annotation2
|
import smtplib
# bring in the by fault email module
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
# turned into a function to import into scrape.py
# put your own values in the from & to email fields
def send(filename):
from_add = "from_email@gmail.com"
to_add = "to_email@gmail.com"
subject = "Finance Stock Report"
msg = MIMEMultipart()
msg["From"] = from_add
msg["To"] = to_add
msg["Subject"] = subject
body = "<b>Today's Report Attached</b>"
msg.attach(MIMEText(body, "html"))
my_file = open(filename, "rb")
part = MIMEBase("application", "octet-stream")
part.set_payload((my_file).read())
encoders.encode_base64(part)
part.add_header("Content-Disposition", "attachment; filename= " + filename)
msg.attach(part)
message = msg.as_string()
# start a mail server
server = smtplib.SMTP("smtp.gmail.com", 587)
# make the server secure
server.starttls()
# login info & from , could use a .env file but trying to keep lines of code at minimum
# instead used gmail to simply generate a custom password for this app
# add your own server login password
server.login(from_add, "server_password_here")
server.sendmail(from_add, to_add, message)
# quit the server once sent
server.quit()
|
from django.db.models.signals import pre_save
from django.dispatch import receiver
from periodic_tasks.models import PeriodicTask
@receiver(pre_save, sender=PeriodicTask)
def set_next_run_timestamp(sender, instance=None, **kwargs):
"""
Signal to set next run before PeriodicTask instance saving
"""
instance.set_next_run_timestamp()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import logging
import tensorflow as tf
from invoke import run, exceptions
log = logging.getLogger('biomedbert')
log.setLevel(logging.INFO)
def fine_tune_squad(v1: bool, model_type: str, bucket_name: str, model_dir: str, train_file: str, predict_file: str,
tpu_name: str, tpu_zone: str, gcp_project: str, tpu_cores: str):
"""fine tune squad"""
use_tpu = True
sub_folder = 'v2.0'
output_dir = 'squad_v2/'
config = 'large_bert_config.json'
version_2_with_negative = True
num_tpu_cores = 8
if tpu_cores is not None:
num_tpu_cores = int(tpu_cores)
if tpu_name is None:
tpu_name = 'false'
use_tpu = False
if model_type == 'base':
# bert base
config = 'base_bert_config.json'
elif model_type == 'large':
# bert large
config = 'large_bert_config.json'
else:
log.info('No config file')
sys.exit(1)
if v1:
version_2_with_negative = False
output_dir = 'squad_v1/'
sub_folder = 'v1.1'
init_checkpoint = tf.train.latest_checkpoint('gs://{}/{}'.format(bucket_name, model_dir))
vocab_file = 'gs://{}/{}/vocab.txt'.format(bucket_name, model_dir)
bert_config_file = 'gs://{}/{}/{}'.format(bucket_name, model_dir, config)
output_dirs = 'gs://{}/{}/{}'.format(bucket_name, model_dir, output_dir)
train_file_path = 'gs://{}/squad_data/{}/{}'.format(bucket_name, sub_folder, train_file)
predict_file_path = 'gs://{}/squad_data/{}/{}'.format(bucket_name, sub_folder, predict_file)
try:
run('python3 bert/run_squad.py --vocab_file={} '
'--bert_config_file={} '
'--init_checkpoint={} --do_train=true --train_file={} '
'--do_predict=True --predict_file={} --train_batch_size=16 '
'--predict_batch_size=16 --learning_rate=3e-5 --num_train_epochs=2.0 '
'--max_seq_length=384 --doc_stride=128 --output_dir={} '
'--num_tpu_cores=128 --use_tpu={} --tpu_name={} --tpu_zone={} '
'--gcp_project={} --version_2_with_negative={}'.format(
vocab_file, bert_config_file, init_checkpoint, train_file_path,
predict_file_path, output_dirs, use_tpu, tpu_name, tpu_zone, gcp_project,
version_2_with_negative))
except exceptions.UnexpectedExit:
print('Cannot fine tune SQuAD')
def evaluate_squad(v1: bool, bucket_name: str, model_dir: str, evaluate_file: str,
predict_file: str):
"""evaluate squad"""
sub_folder = 'v2.0'
output_dir = 'squad_v2'
if v1:
output_dir = 'squad_v1'
sub_folder = 'v1.1'
try:
if not os.path.exists('squad_evaluate'):
run('mkdir squad_evaluate')
run('gsutil cp gs://{}/squad_data/{} ./squad_evaluate/'.format(bucket_name, evaluate_file))
run('gsutil cp gs://{}/squad_data/{}/{} ./squad_evaluate/'.format(bucket_name, sub_folder, predict_file))
run('gsutil cp gs://{}/{}/{}/predictions.json ./squad_evaluate/'.format(bucket_name, model_dir, output_dir))
if v1:
run('python3 ./squad_evaluate/{} ./squad_evaluate/{} ./squad_evaluate/predictions.json'.format(
evaluate_file, predict_file))
else:
run('gsutil cp gs://{}/{}/{}/null_odds.json ./squad_evaluate/'.format(bucket_name, model_dir, output_dir))
run('python3 ./squad_evaluate/{} ./squad_evaluate/{} ./squad_evaluate/predictions.json --na-prob-file '
'./squad_evaluate/null_odds.json'.format(evaluate_file, predict_file))
except exceptions.UnexpectedExit:
print('Cannot evaluate SQuAD')
|
from __future__ import absolute_import
import blinker
from collections import deque
from functools import wraps, partial
from threading import local
import sys
from .compat import reraise, iteritems, is_nextable
def noop(*_, **dummy):
pass
class StopIterationWithValue(StopIteration):
value = None
def __init__(self, value):
super(StopIterationWithValue, self).__init__()
self.value = value
class _PendingRunnable(object):
def __init__(self, it, parent=None, key=None, callback=None, callback_exc=None):
self.iterable = it
self.iteration = 0
self.parent = parent
self.key = key
self.callback = callback
self.callback_exc = callback_exc
self.dependency_results = None
self.dependencies_remaining = 0
self.exception_to_raise = None
self.result = None
self.result_exception = None
def step(self):
assert self.iteration >= 0
self.iteration += 1
if self.iteration == 1:
assert self.dependency_results is None and self.exception_to_raise is None
run_fn = partial(next, self.iterable)
elif self.exception_to_raise is not None:
exc, self.exception_to_raise = self.exception_to_raise, None
run_fn = partial(self.iterable.throw, *exc)
else:
run_fn = partial(self.iterable.send, self.dependency_results)
try:
requirements = run_fn()
except StopIteration as e:
self.result = getattr(e, 'value', None)
self.iteration = -1
return None
except Exception:
self.result_exception = sys.exc_info()
self.iteration = -1
return None
if requirements is None:
requirements = []
dependencies = None
if isinstance(requirements, dict):
dependencies = requirements
self.dependency_results = {}
self.dependency_completed = partial(self._depencency_completed_list_or_dict, self.iteration)
elif isinstance(requirements, (list, set, frozenset, tuple)):
dependencies = dict(enumerate(requirements))
self.dependency_results = [None] * len(dependencies)
self.dependency_completed = partial(self._depencency_completed_list_or_dict, self.iteration)
else:
dependencies = {'': requirements}
self.dependency_results = None
self.dependency_completed = partial(self._dependency_completed_single, self.iteration)
self.dependency_threw = partial(self._dependency_threw, self.iteration)
self.dependencies_remaining = len(dependencies)
return dependencies
def _depencency_completed_list_or_dict(self, iteration, loop, k, v):
if self.iteration != iteration:
return
self.dependency_results[k] = v
self.dependencies_remaining -= 1
if self.ready:
loop.runnable(self)
def _dependency_completed_single(self, iteration, loop, _, v):
if self.iteration != iteration:
return
self.dependency_results = v
self.dependencies_remaining -= 1
if self.ready:
loop.runnable(self)
def _dependency_threw(self, iteration, loop, _, type_, value, traceback):
if self.iteration != iteration:
return
self.exception_to_raise = (type_, value, traceback)
self.iteration += 1
self.dependencies_remaining = 0
if self.ready:
loop.runnable(self)
dependency_completed = None # dynamically changed.
dependency_threw = None
@property
def ready(self):
return self.dependencies_remaining == 0 and getattr(self.iterable, 'ready', True)
LOCAL_ID = 0
def new_local_id():
global LOCAL_ID
LOCAL_ID += 1
return LOCAL_ID
class RunLoop(object):
def __init__(self):
self.locals = dict()
self.run_queue = deque()
self.total_pending = 0
self.main_runnable = None
self.on_queue_exhausted = blinker.Signal()
self.on_runnable_added = blinker.Signal()
self.on_iteration = blinker.Signal()
def run(self, iterable):
self.main_runnable = self.add(iterable)
while self.total_pending:
assert self.run_queue
self.on_iteration.send()
self._run_all_runnables()
if self.total_pending:
self.on_queue_exhausted.send()
if self.main_runnable.result_exception:
reraise(*self.main_runnable.result_exception)
return self.main_runnable.result
def add(self, iterable, callback_ok=None, callback_exc=None):
callback_ok = callback_ok or noop
callback_exc = callback_exc or noop
obj = _PendingRunnable(iterable, callback=callback_ok, callback_exc=callback_exc)
self.total_pending += 1
if obj.ready:
self.run_queue.append(obj)
if hasattr(iterable, 'on_add_to_loop'):
iterable.on_add_to_loop(self, obj)
self.on_runnable_added.send(runnable=obj)
return obj
def runnable(self, runnable):
"""Notify the context that routine is runnable. This assumes that
.add() was already called with this iterable."""
assert isinstance(runnable, _PendingRunnable)
self.run_queue.append(runnable)
def _run_all_runnables(self):
while self.run_queue:
runnable = self.run_queue.popleft()
deps = runnable.step()
if deps is None:
if runnable.result_exception:
runnable.callback_exc(*runnable.result_exception)
elif runnable.callback is not None:
runnable.callback(runnable.result)
self.total_pending -= 1
continue
for k, v in iteritems(deps):
self.add(v,
partial(runnable.dependency_completed, self, k),
partial(runnable.dependency_threw, self, k))
if runnable.ready:
self.run_queue.append(runnable)
class _ThreadingLocalRunLoop(local):
loop = None
_CURRENT_RUN_LOOP = _ThreadingLocalRunLoop()
def current_run_loop():
return _CURRENT_RUN_LOOP.loop
def use_threading_local():
assert current_run_loop() is None
global _CURRENT_RUN_LOOP
_CURRENT_RUN_LOOP = _ThreadingLocalRunLoop()
try:
from gevent.local import local as gevent_local
except ImportError as ex:
def use_gevent_local():
raise ImportError("Gevent not present")
else:
class _GeventLocalRunLoop(gevent_local):
loop = None
def use_gevent_local():
assert current_run_loop() is None
global _CURRENT_RUN_LOOP
_CURRENT_RUN_LOOP = _GeventLocalRunLoop()
def runloop_coroutine():
"""Creates a coroutine that gets run in a run loop.
The run loop will be created if necessary."""
def wrap(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if _CURRENT_RUN_LOOP.loop:
it = fn(*args, **kwargs)
assert is_nextable(it), '%s did not return an iterator' % (fn)
return it
else:
_CURRENT_RUN_LOOP.loop = loop = RunLoop()
try:
it = fn(*args, **kwargs)
assert is_nextable(it), '%s did not return an iterator' % (fn)
return loop.run(it)
finally:
_CURRENT_RUN_LOOP.loop = None
return wrapper
return wrap
def requires_runloop():
"""Same as @runloop_coroutine, but refuses to create a loop if one is not present."""
def wrap(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
assert current_run_loop()
return fn(*args, **kwargs)
return wrapper
return wrap
def coro_return(value):
raise StopIterationWithValue(value)
class _DeferredIterable(object):
def __init__(self):
self.value = None
self.exception = None
self.ready = False
self.batch_context = None
self.runnable = None
self.on_ready = blinker.Signal()
def on_add_to_loop(self, context, runnable):
assert self.batch_context is None
self.batch_context = context
self.runnable = runnable
def set_value(self, value):
assert not self.ready
self.ready = True
self.value = value
if self.batch_context:
self.batch_context.runnable(self.runnable)
self.on_ready.send()
def set_exception(self, type_, value=None, traceback=None):
assert not self.ready
self.ready = True
self.exception = (type_, value, traceback)
if self.batch_context:
self.batch_context.runnable(self.runnable)
self.on_ready.send()
def __next__(self):
coro_return(self.get())
next = __next__
def get(self):
if __debug__:
if not self.ready:
raise ValueError(".get() on non-ready deferred.")
if self.exception is not None:
reraise(*self.exception)
return self.value
@requires_runloop()
def deferred():
assert current_run_loop()
coro_return(_DeferredIterable())
yield # pragma: no cover
@requires_runloop()
def future(iterable):
"""Given an iterable, this returns an object that can be yielded again once
you want to use it's value. This is useful to "front-load" some expensive
calls that you don't need the results of immediately.
Usage:
thing_later = yield future(thing_resolver())
... Do things ...
thing = yield thing_later
In addition, this may be used to catch exceptions when doing several actions in parallel:
a, b, c = yield future(get_a()), future(get_b()), future(get_c())
try:
a_thing = yield a
except ValueError:
a_thing = None # it's ok we don't need it anyway
b_thing, c_thing = yield b, c
"""
result = yield deferred()
current_run_loop().add(iterable, result.set_value, result.set_exception)
coro_return(result)
@requires_runloop()
def wait(deferreds, count=None):
"""iwait(deferreds_or_futures, count=None).
Waits until up to `count` (or all, if count is None) deferreds to complete. Returns
the objects that completed. Example:
a, b, c = yield future(get_a()), future(get_b()), future(get_c())
first, second = yield wait([a, b, c], count=2)
# At this point 2/3 of the above futures are complete."""
if count is None:
count = len(deferreds)
assert count <= len(deferreds), 'Waiting on too many deferreds: %s' % (count)
ready_list = [d for d in deferreds if d.ready]
# Check if any of the deferreds are ready.
if len(ready_list) < count:
wait_deferred = yield deferred()
for d in deferreds:
def on_ready(_):
if wait_deferred.ready:
return # This is mostly necessary for PyPy because weak refs
# aren't immediately removed there.
ready_list.append(d)
if len(ready_list) >= count:
wait_deferred.set_value(True)
d.on_ready.connect(on_ready, weak=True)
yield wait_deferred
assert len(ready_list) == count
coro_return(ready_list)
|
from setuptools import find_packages, setup
setup(
name='museum_app',
version='1.0.0',
packages=find_packages(),
include_package_data=False,
zip_safe=False,
install_requires=[
'flask', 'graphene', 'mongoengine', 'werkzeug'
],
)
|
import augeas
from jadi import interface
class AugeasError(Exception):
def __init__(self, aug):
self.message = None
self.data = {}
aug.dump('/')
for ep in aug.match('/augeas//error'):
self.message = aug.get(ep + '/message')
for p in aug.match(ep + '/*'):
self.data[p.split('/')[-1]] = aug.get(p)
def __str__(self):
return self.message
class Augeas(augeas.Augeas):
"""
A smarter and faster wrapper around :class:`augeas.Augeas`.augeas
For faster startup, no modules and lenses are preloaded::
aug = Augeas(modules=[{
'name': 'Interfaces', # module name
'lens': 'Interfaces.lns', # lens name
'incl': [ # included files list
self.path,
self.path + '.d/*',
]
}])
Don't forget to call :func:`.load()` afterwards.
"""
def __init__(self, modules=[], loadpath=None):
augeas.Augeas.__init__(self, loadpath=loadpath, flags=augeas.Augeas.NO_MODL_AUTOLOAD | augeas.Augeas.NO_LOAD)
for module in modules:
path = '/augeas/load/%s' % module['name']
self.set(path + '/lens', module['lens'])
for index, incl in enumerate(module['incl']):
self.set(path + '/incl[%i]' % (index + 1), incl)
def __enc(self, v):
if v:
return v.encode('utf8')
def match(self, path):
return augeas.Augeas.match(self, self.__enc(path))
def get(self, path):
return augeas.Augeas.get(self, self.__enc(path))
def set(self, path, value):
augeas.Augeas.set(self, self.__enc(path), self.__enc(value))
def setd(self, path, value, default=None):
"""
Sets `path` to `value`, or removes `path` if `value == default`
"""
if value is not None:
self.set(path, value)
if value == default:
self.remove(path)
def raise_error(self):
"""
Extracts error information from Augeas tree and raises :exc:`AugeasError`
"""
raise AugeasError(self)
def save(self):
try:
augeas.Augeas.save(self)
except IOError:
self.raise_error()
def dump(self, path):
"""
Dumps contents under `path` to stdout.
"""
for sp in self.match(path + '/*'):
print(sp, '=', self.get(sp))
@interface
class AugeasEndpoint(object):
"""
Implement this to provide Augeas trees to the frontend.
"""
id = None
def __init__(self, context):
self.context = context
def get_augeas(self):
"""
Should return a ready-to-use :class:`Augeas`
"""
raise NotImplementedError
def get_root_path(self):
"""
Should return an Augeas path of the root node to be provided to the frontend.
"""
raise NotImplementedError
|
import tensorflow as tf
from metrics.chamfer.nearest_neighbour_cuda import nn_distance as nn_distance_cpu
def bidirectionalchamfer(pointCloud1, pointCloud2):
with tf.name_scope('bidirectionalchamfer'):
shape1 = pointCloud1.shape.as_list()
shape2 = pointCloud2.shape.as_list()
pointCloud1 = tf.reshape(pointCloud1, [-1] + shape1[-2:])
pointCloud2 = tf.reshape(pointCloud2, [-1] + shape2[-2:])
dist1, _, dist2, __ = nn_distance_cpu(pointCloud1, pointCloud2)
loss1 = tf.reduce_sum(dist1, axis=-1)
loss2 = tf.reduce_sum(dist2, axis=-1)
if len(shape1) > 3:
loss1 = tf.reshape(loss1, shape1[:-2])
if len(shape2) > 3:
loss2 = tf.reshape(loss2, shape2[:-2])
return loss1 + loss2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
import sys
from .exc import SpaceTrackEntityNotSupported
if sys.version_info >= (3, 0, 0):
basestring = str
unicode = str
SUPPORTABLE_ENTITIES = (
'tle_latest',
'tle_publish',
'omm',
'boxscore',
'satcat',
'launch_site',
'satcat_change',
'satcat_debut',
'decay',
'tip',
'tle',
)
class SpaceTrackQueryBuilder(object):
__slots__ = (
'_filters',
'_entity',
'_order_by',
'_limit',
'_format',
'_metadata',
'_distinct',
'_predicate',
)
def __init__(self, entity=None, order_by=None, limit=None,
fmt=None, metadata=False, distinct=True, predicate=None,
*args, **filters):
self.entity = entity
self.filters = filters
self.predicate = predicate
self.order_by = order_by
self.limit = limit
self.format = fmt
self.metadata = metadata
self.distinct = distinct
@property
def entity(self):
return self._entity
@entity.setter
def entity(self, value):
if isinstance(value, collections.Iterable) and not isinstance(value, basestring):
value = tuple(value)
value = value and value[0]
if value is None:
value = 'tle'
if not isinstance(value, basestring):
raise TypeError('Attribute `entity` must be basestring')
elif value not in SUPPORTABLE_ENTITIES:
raise SpaceTrackEntityNotSupported(self.entity)
self._entity = value
@property
def order_by(self):
return self._order_by
@order_by.setter
def order_by(self, value):
if value is None:
value = tuple()
if isinstance(value, collections.Iterable) and not isinstance(value, basestring):
value = tuple(value)
if not isinstance(value, (basestring, collections.Iterable)):
raise TypeError('Attribute `order_by` must be basestring or collections.Iterable')
self._order_by = value
@property
def predicate(self):
return self._predicate
@predicate.setter
def predicate(self, value):
if value is None:
value = tuple()
if isinstance(value, collections.Iterable) and not isinstance(value, basestring):
value = tuple(value)
if not isinstance(value, (basestring, collections.Iterable)):
raise TypeError('Attribute `predicate` must be basestring or collections.Iterable')
self._predicate = value
@property
def limit(self):
return self._limit
@limit.setter
def limit(self, value):
if isinstance(value, collections.Iterable) and not isinstance(value, basestring):
value = tuple(value)
value = value and value[0]
if value is not None:
value = int(value)
self._limit = value
@property
def format(self):
return self._format
@format.setter
def format(self, value):
if isinstance(value, collections.Iterable) and not isinstance(value, basestring):
value = tuple(value)
value = value and value[0]
if value is None:
value = 'json'
if not isinstance(value, basestring):
raise TypeError('Attribute `format` must be basestring')
self._format = value
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, value):
if isinstance(value, collections.Iterable) and not isinstance(value, basestring):
value = tuple(value)
value = value and value[0]
self._metadata = bool(value)
@property
def distinct(self):
return self._distinct
@distinct.setter
def distinct(self, value):
if isinstance(value, collections.Iterable) and not isinstance(value, basestring):
value = tuple(value)
value = value and value[0]
self._distinct = bool(value)
@property
def filters(self):
return self._filters
@filters.setter
def filters(self, dictionary):
_filters = collections.defaultdict(list)
if isinstance(dictionary, dict):
for key, value in dictionary.items():
if isinstance(value, collections.Iterable) and not isinstance(value, (basestring, bytes)):
_filters[key].extend(value)
else:
_filters[key].append(value)
self._filters = _filters
@property
def query_params(self):
return dict(entity=self.entity,
filters=self.serialize_multivalue(self.filters),
format=self.format,
limit=self.limit,
metadata="true" if self.metadata else "false",
order_by=self.serialize_multivalue(self.order_by),
predicates=self.serialize_multivalue(self.predicate))
def query(self):
q = ('basicspacedata/query/'
'class/{entity}/'
'{filters}/'
'format/{format}/'
'metadata/{metadata}/')
if self.order_by:
q += 'orderby/{order_by}/'
if self.predicate:
q += 'predicates/{predicates}/'
if self.limit:
q += 'limit/{limit}/'
return q.format(**self.query_params)
@staticmethod
def serialize_multivalue(multivalue):
if isinstance(multivalue, dict):
return "/".join('{}/{}'.format(key, ",".join(unicode(value) for value in values))
for key, values in multivalue.items())
elif isinstance(multivalue, collections.Iterable) and not isinstance(multivalue, basestring):
return ",".join(unicode(value) for value in multivalue)
return multivalue
def __repr__(self):
return '<{}("{}")>'.format(self.__class__.__name__, unicode(self))
def __str__(self):
return self()
def __call__(self):
return self.query()
|
import json
from typing import Optional
import zipcodes
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.exceptions import InvalidExpectationConfigurationError
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
def is_validarizonazip(zip: str):
list_of_dicts_ofarizonazips = zipcodes.filter_by(state="AZ")
list_ofarizonazips = [d["zip_code"] for d in list_of_dicts_ofarizonazips]
if len(zip) > 10:
return False
elif type(zip) != str:
return False
elif zip in list_ofarizonazips:
return True
else:
return False
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesToBeValidArizonaZip(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.validarizonazip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_validarizonazip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesToBeValidArizonaZip(ColumnMapExpectation):
"""Expect values in this column to be valid Arizona zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"validarizonazip": ["86511", "85920", "85308", "85001"],
"invalidarizonazip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "validarizonazip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalidarizonazip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.validarizonazip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidArizonaZip().print_diagnostic_checklist()
|
import turtle
# fill_box(x, y, dx, dy, [color=None])
# box((x1,y1), (x2,y2), [color=None], [width=None])
# box(x1,y1,x2,y2, [color=None], [width=None])
# fill_box_array(x, y, dx, dy, cnt, [add_x=0], [add_y=0], [color=None])
# line(x1, y1, x2, y2, [color=None], [width=None])
# multiline((x1,y1), (x2,y2), ...., (xn,yn) , params)
# set_color([color='black'])
# circle(x, y, r, [color=None], [width=None], [fill_color=None], [fill=False])
# circle_array(x, y, r, cnt, [dx=0], [dy=0], [color=None], [width=None], [fill_color=None], [fill=False]):
def fill_box(x, y, dx, dy, color=None):
turtle.up()
turtle.goto(x,y)
if color:
turtle.color(color, color)
turtle.down()
turtle.begin_fill()
turtle.setx(x + dx)
turtle.sety(y + dy)
turtle.setx(x)
turtle.sety(y)
turtle.end_fill()
def box1(x1, y1, x2, y2, color=None, width=None):
turtle.up()
turtle.goto(x1, y1)
if width is not None:
turtle.pen(pensize=width)
if color:
turtle.color(color, color)
turtle.down()
turtle.setx(x2)
turtle.sety(y2)
turtle.setx(x1)
turtle.sety(y1)
def box(start, end, color=None, width=None):
turtle.up()
turtle.goto(start)
if width is not None:
turtle.pen(pensize=width)
if color:
turtle.color(color, color)
x1, y1 = start
x2, y2 = end
turtle.down()
turtle.setx(x2)
turtle.sety(y2)
turtle.setx(x1)
turtle.sety(y1)
def fill_box_array(x, y, dx, dy, cnt, add_x=0, add_y=0, color=None):
for i in range(cnt):
fill_box(x, y, dx, dy, color=color)
x += add_x
y += add_y
def line(x1, y1, x2, y2, color=None, width=None):
turtle.up()
turtle.goto(x1, y1)
if color:
turtle.color(color, color)
if width is not None:
turtle.pen(pensize=width)
turtle.down()
turtle.goto(x2, y2)
def multiline(*points, **params):
if 'color' in params:
turtle.color(params['color'])
if 'width' in params:
turtle.pen(pensize=params['width'])
turtle.up()
turtle.goto(points[0])
turtle.down()
for x in points[1:]:
turtle.goto(x)
def set_color(color='black'):
turtle.color(color)
def circle(x, y, r, color=None, width=None, fill_color=None, fill=False):
turtle.up()
turtle.goto(x, y-r)
turtle.setheading(0)
if color:
turtle.pen(pencolor=color)
if fill_color:
turtle.pen(fillcolor=fill_color)
if width is not None:
turtle.pen(pensize=width)
if fill:
turtle.begin_fill()
turtle.down()
turtle.circle(r)
if fill:
turtle.end_fill()
def circle_array(x, y, r, cnt, dx=0, dy=0, color=None, width=None, fill_color=None, fill=False):
for i in range(cnt):
circle(x, y, r, color=color, width=width, fill_color=fill_color, fill=fill)
x += dx
y += dy
|
import mechanize
import cookielib
import urllib
import os,time,datetime,re
from dateutil.relativedelta import relativedelta
from BeautifulSoup import BeautifulSoup
svc_domain = r'http://jps-amiprod2.jps.net:9090/'
user_name = os.environ.get('SVC_USER')
user_pass = os.environ.get('SVC_PASS')
start_date = datetime.datetime(2011, 1, 1)
stop_date = datetime.datetime(2012,1,1)
incr_date = relativedelta(months=1)
# Parameters
MAX_METER_IX = 8871
METER_IX_INCR = 60
OUTFILE_NAME = 'meter_list.csv'
DUMP_SLEEP_TIME = 5.0
svc_home = svc_domain + 'serview.jsp'
svc_login = svc_domain + 'home_D.jsp'
meter_names = svc_domain + 'secure/selectDevice/selectResult_DT.jsp?deviceIdPattern=%25&execute=search+all+devices&cachedRequestId=_cachedDataSetRequestID_0¤tPosition=60'
# Browser
br = mechanize.Browser()
# Cookie Jar
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
# Browser options
br.set_handle_equiv(True)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-agent', r'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko')]
# THe site doesn't seem to like it when we try to login directly, if we do it gives
# "Invalid direct reference to form login page"
# So we request the home page first, then try to log in.
resp = br.open(svc_home)
time.sleep(10)
# Now open the login page
resp = br.open(svc_login)
# Select the first form on the login page
br.select_form(nr=0)
# User credentials
br.form['j_username'] = user_name
br.form['j_password'] = user_pass
# Login
resp = br.submit()
time.sleep(10)
current_meter_ix = 0
meter_list = []
while current_meter_ix + METER_IX_INCR < MAX_METER_IX:
meter_names = svc_domain + 'secure/selectDevice/selectResult_DT.jsp?deviceIdPattern=%25&execute=search+all+devices&cachedRequestId=_cachedDataSetRequestID_0¤tPosition=' \
+ str(current_meter_ix)
'''
post_fields = {'deviceid': '%'
}
#Encode the parameters
post_data = urllib.urlencode(post_fields)
#Submit the form (POST request). You get the post_url and the request type(POST/GET) the same way with the parameters.
resp = br.open(search_button_url,post_data)
time.sleep(0.2)
'''
try:
resp = br.open(meter_names)
time.sleep(10)
except:
time.sleep(30)
resp = br.open(meter_names)
time.sleep(10)
meter_list = meter_list + re.findall(r'\?deviceId=(.*?)&',resp.read(),re.DOTALL)
current_meter_ix = current_meter_ix + METER_IX_INCR
with open(OUTFILE_NAME, 'wb') as outfile:
outfile.write(','.join(meter_list) + '\n')
a=2
|
import datetime
import os.path
import re
import string
import sys
from rust import RustHelperBackend
from stone import ir
from stone.backends.python_helpers import fmt_class as fmt_py_class
class Permissions(object):
@property
def permissions(self):
# For generating tests, make sure we include any internal
# fields/structs if we're using internal specs. If we're not using
# internal specs, this is a no-op, so just do it all the time. Note
# that this only needs to be done for json serialization, the struct
# definitions will include all fields, all the time.
return ['internal']
class TestBackend(RustHelperBackend):
def __init__(self, target_folder_path, args):
super(TestBackend, self).__init__(target_folder_path, args)
# Don't import other generators until here, otherwise stone.cli will
# call them with its own arguments, in addition to the TestBackend.
from stone.backends.python_types import PythonTypesBackend
self.target_path = target_folder_path
self.ref_path = os.path.join(target_folder_path, 'reference')
self.reference = PythonTypesBackend(self.ref_path, args)
self.reference_impls = {}
def generate(self, api):
print(u'Generating Python reference code')
self.reference.generate(api)
with self.output_to_relative_path('reference/__init__.py'):
self.emit(u'# this is the Stone-generated reference Python SDK')
print(u'Loading reference code:')
sys.path.insert(0, self.target_path)
from reference.stone_serializers import json_encode
for ns in api.namespaces:
print('\t' + ns)
python_ns = ns
if ns == 'async':
# hack to work around 'async' being a Python3 keyword
python_ns = 'async_'
self.reference_impls[ns] = __import__('reference.'+python_ns).__dict__[python_ns]
print(u'Generating test code')
for ns in api.namespaces.values():
ns_name = self.namespace_name(ns)
with self.output_to_relative_path(ns_name + '.rs'):
self._emit_header()
for typ in ns.data_types:
type_name = self.struct_name(typ)
# the general idea here is to instantiate each type using
# the reference Python code, put some random data in the
# fields, serialize it to JSON, emit the JSON into the Rust
# test, have Rust deserialize it, and emit assertions that
# the fields match. Then have Rust re-serialize to JSON and
# desereialize it again, then check the fields of the
# newly-deserialized struct. This verifies Rust's
# serializer.
is_serializable = True
test_value = None
if ir.is_struct_type(typ):
if typ.has_enumerated_subtypes():
# TODO: generate tests for all variants
# for now, just pick the first variant
variant = typ.get_enumerated_subtypes()[0]
test_value = TestPolymorphicStruct(
self, typ, self.reference_impls, variant)
else:
test_value = TestStruct(self, typ, self.reference_impls)
elif ir.is_union_type(typ):
# TODO: generate tests for all variants
# for now, just pick the first variant
# prefer choosing from this type and not the parent if we can
variants = [field for field in typ.fields if not field.catch_all]
if len(variants) == 0:
# fall back to parent type's variants
variants = [field for field in typ.all_fields if not field.catch_all]
if not variants:
# Rust code will refuse to serialize a type with no variants (or only
# the catch-all variant), so don't bother testing that
is_serializable = False
variant = typ.all_fields[0] # this assumes there's at least one
else:
variant = variants[0]
test_value = TestUnion(self, typ, self.reference_impls, variant)
else:
raise RuntimeError(u'ERROR: type {} is neither struct nor union'
.format(typ))
pyname = fmt_py_class(typ.name)
json = json_encode(
self.reference_impls[ns.name].__dict__[pyname + '_validator'],
test_value.value,
Permissions())
with self._test_fn(type_name):
self.emit(u'let json = r#"{}"#;'.format(json))
self.emit(u'let x = ::serde_json::from_str::<::dropbox_sdk::{}::{}>(json).unwrap();'
.format(ns_name,
self.struct_name(typ)))
test_value.emit_asserts(self, 'x')
if is_serializable:
# now serialize it back to JSON, deserialize it again, and test
# it again.
self.emit()
self.emit(u'let json2 = ::serde_json::to_string(&x).unwrap();')
de = u'::serde_json::from_str::<::dropbox_sdk::{}::{}>(&json2).unwrap()' \
.format(ns_name,
self.struct_name(typ))
if typ.all_fields:
self.emit(u'let x2 = {};'.format(de))
test_value.emit_asserts(self, 'x2')
else:
self.emit(u'{};'.format(de))
else:
# assert that serializing it returns an error
self.emit(u'assert!(::serde_json::to_string(&x).is_err());')
self.emit()
# for typ
# .rs test file
# for ns
with self.output_to_relative_path('mod.rs'):
self._emit_header()
for ns in api.namespaces:
self.emit(u'#[cfg(feature = "dbx_{}")]'.format(ns))
self.emit(u'mod {};'.format(self.namespace_name_raw(ns)))
self.emit()
def _emit_header(self):
self.emit(u'// DO NOT EDIT')
self.emit(u'// This file was generated by Stone')
self.emit()
self.emit(u'#![allow(bad_style)]')
self.emit()
self.emit(u'#![allow(')
self.emit(u' clippy::float_cmp,')
self.emit(u' clippy::unreadable_literal,')
self.emit(u' clippy::cyclomatic_complexity,')
self.emit(u')]')
self.emit()
def _test_fn(self, name):
self.emit(u'#[test]')
return self.emit_rust_function_def(u'test_' + name)
class TestField(object):
def __init__(self, name, python_value, test_value, stone_type, option):
self.name = name
self.value = python_value
self.test_value = test_value
self.typ = stone_type
self.option = option
def emit_assert(self, codegen, expression_path):
extra = ('.' + self.name) if self.name else ''
if self.option:
expression = '(*' + expression_path + extra + '.as_ref().unwrap())'
else:
expression = expression_path + extra
if isinstance(self.test_value, TestValue):
self.test_value.emit_asserts(codegen, expression)
elif ir.is_string_type(self.typ):
codegen.emit(u'assert_eq!({}.as_str(), r#"{}"#);'.format(
expression, self.value))
elif ir.is_numeric_type(self.typ):
codegen.emit(u'assert_eq!({}, {});'.format(
expression, self.value))
elif ir.is_boolean_type(self.typ):
codegen.emit(u'assert_eq!({}, {});'.format(
expression, 'true' if self.value else 'false'))
elif ir.is_timestamp_type(self.typ):
codegen.emit(u'assert_eq!({}.as_str(), "{}");'.format(
expression, self.value.strftime(self.typ.format)))
elif ir.is_bytes_type(self.typ):
codegen.emit(u'assert_eq!(&{}, &[{}]);'.format(
expression, ",".join(str(x) for x in self.value)))
else:
raise RuntimeError(u'Error: assetion unhandled for type {} of field {} with value {}'
.format(self.typ, self.name, self.value))
class TestValue(object):
def __init__(self, rust_generator):
self.rust_generator = rust_generator
self.fields = []
self.value = None
def emit_asserts(self, codegen, expression_path):
raise NotImplementedError('you\'re supposed to implement TestValue.emit_asserts')
class TestStruct(TestValue):
def __init__(self, rust_generator, stone_type, reference_impls):
super(TestStruct, self).__init__(rust_generator)
if stone_type.has_enumerated_subtypes():
stone_type = stone_type.get_enumerated_subtypes()[0].data_type
self._stone_type = stone_type
self._reference_impls = reference_impls
py_name = fmt_py_class(stone_type.name)
try:
self.value = reference_impls[stone_type.namespace.name].__dict__[py_name]()
except Exception as e:
raise RuntimeError(u'Error instantiating value for {}: {}'.format(stone_type.name, e))
for field in stone_type.all_fields:
field_value = make_test_field(
field.name, field.data_type, rust_generator, reference_impls)
if field_value is None:
raise RuntimeError(u'Error: incomplete type generated: {}'.format(stone_type.name))
self.fields.append(field_value)
try:
setattr(self.value, field.name, field_value.value)
except Exception as e:
raise RuntimeError(u'Error generating value for {}.{}: {}'
.format(stone_type.name, field.name, e))
def emit_asserts(self, codegen, expression_path):
for field in self.fields:
field.emit_assert(codegen, expression_path)
class TestUnion(TestValue):
def __init__(self, rust_generator, stone_type, reference_impls, variant):
super(TestUnion, self).__init__(rust_generator)
self._stone_type = stone_type
self._reference_impls = reference_impls
self._rust_name = rust_generator.enum_name(stone_type)
self._rust_variant_name = rust_generator.enum_variant_name_raw(variant.name)
self._rust_namespace_name = rust_generator.namespace_name(stone_type.namespace)
self._variant_type = variant.data_type
self._inner_value = make_test_field(
None, self._variant_type, rust_generator, reference_impls)
if self._inner_value is None:
raise RuntimeError(u'Error generating union variant value for {}.{}'
.format(stone_type.name, variant.name))
self.value = self.get_from_inner_value(variant.name, self._inner_value)
def get_from_inner_value(self, variant_name, generated_field):
pyname = fmt_py_class(self._stone_type.name)
try:
return self._reference_impls[self._stone_type.namespace.name] \
.__dict__[pyname](variant_name, generated_field.value)
except Exception as e:
raise RuntimeError(u'Error generating value for {}.{}: {}'
.format(self._stone_type.name, variant_name, e))
def is_open(self):
return len(self._stone_type.all_fields) > 1
def emit_asserts(self, codegen, expression_path):
if expression_path[0] == '(' and expression_path[-1] == ')':
expression_path = expression_path[1:-1] # strip off superfluous parens
with codegen.block(u'match {}'.format(expression_path)):
if ir.is_void_type(self._variant_type):
codegen.emit(u'::dropbox_sdk::{}::{}::{} => (),'.format(
self._rust_namespace_name,
self._rust_name,
self._rust_variant_name))
else:
with codegen.block(u'::dropbox_sdk::{}::{}::{}(ref v) =>'.format(
self._rust_namespace_name,
self._rust_name,
self._rust_variant_name)):
self._inner_value.emit_assert(codegen, '(*v)')
if self.is_open():
codegen.emit(u'_ => panic!("wrong variant")')
class TestPolymorphicStruct(TestUnion):
def get_from_inner_value(self, variant_name, generated_field):
return generated_field.value
def is_open(self):
return len(self._stone_type.get_enumerated_subtypes()) > 1
class TestList(TestValue):
def __init__(self, rust_generator, stone_type, reference_impls):
super(TestList, self).__init__(rust_generator)
self._stone_type = stone_type
self._reference_impls = reference_impls
self._inner_value = make_test_field(None, stone_type, rust_generator, reference_impls)
if self._inner_value is None:
raise RuntimeError(u'Error generating value for list of {}'.format(stone_type.name))
self.value = self._inner_value.value
def emit_asserts(self, codegen, expression_path):
self._inner_value.emit_assert(codegen, expression_path + '[0]')
class TestMap(TestValue):
def __init__(self, rust_generator, stone_type, reference_impls):
super(TestMap, self).__init__(rust_generator)
self._stone_type = stone_type
self._reference_impls = reference_impls
self._key_value = make_test_field(None, stone_type.key_data_type, rust_generator,
reference_impls)
self._val_value = make_test_field(None, stone_type.value_data_type, rust_generator,
reference_impls)
self.value = {self._key_value.value: self._val_value.value}
def emit_asserts(self, codegen, expression_path):
key_str = u'["{}"]'.format(self._key_value.value)
self._val_value.emit_assert(codegen, expression_path + key_str)
def make_test_field(field_name, stone_type, rust_generator, reference_impls):
rust_name = rust_generator.field_name_raw(field_name) if field_name is not None else None
typ, option = ir.unwrap_nullable(stone_type)
inner = None
value = None
if ir.is_struct_type(typ):
if typ.has_enumerated_subtypes():
variant = typ.get_enumerated_subtypes()[0]
inner = TestPolymorphicStruct(rust_generator, typ, reference_impls, variant)
else:
inner = TestStruct(rust_generator, typ, reference_impls)
value = inner.value
elif ir.is_union_type(typ):
# pick the first tag
# TODO(wfraser) generate tests for them ALL!
if len(typ.fields) == 0:
# there must be a parent type; go for it
variant = typ.all_fields[0]
else:
variant = typ.fields[0]
inner = TestUnion(rust_generator, typ, reference_impls, variant)
value = inner.value
elif ir.is_list_type(typ):
inner = TestList(rust_generator, typ.data_type, reference_impls)
value = [inner.value]
elif ir.is_map_type(typ):
inner = TestMap(rust_generator, typ, reference_impls)
value = inner.value
elif ir.is_string_type(typ):
if typ.pattern:
value = Unregex(typ.pattern, typ.min_length).generate()
elif typ.min_length:
value = 'a' * typ.min_length
else:
value = 'something'
elif ir.is_numeric_type(typ):
value = typ.max_value or typ.maximum or 1e307
elif ir.is_boolean_type(typ):
value = True
elif ir.is_timestamp_type(typ):
value = datetime.datetime.utcfromtimestamp(2**33 - 1)
elif ir.is_bytes_type(typ):
value = bytes([0,1,2,3,4,5])
elif not ir.is_void_type(typ):
raise RuntimeError(u'Error: unhandled field type of {}: {}'.format(field_name, typ))
return TestField(rust_name, value, inner, typ, option)
class Unregex(object):
"""
Generate a minimal string that passes a regex and optionally is of a given
minimum length.
"""
def __init__(self, regex_string, min_len=None):
self._min_len = min_len
self._group_refs = {}
self._tokens = re.sre_parse.parse(regex_string)
def generate(self):
return self._generate(self._tokens)
def _generate(self, tokens):
result = ''
for (opcode, argument) in tokens:
opcode = str(opcode).lower()
if opcode == 'literal':
result += chr(argument)
elif opcode == 'at':
pass # start or end anchor; nothing to add
elif opcode == 'in':
if argument[0][0] == 'negate':
rejects = []
for opcode, reject in argument[1:]:
if opcode == 'literal':
rejects.append(chr(reject))
elif opcode == 'range':
for i in range(reject[0], reject[1]):
rejects.append(chr(i))
choices = list(set(string.printable)
.difference(string.whitespace)
.difference(rejects))
result += choices[0]
else:
result += self._generate([argument[0]])
elif opcode == 'any':
result += '*'
elif opcode == 'range':
result += chr(argument[0])
elif opcode == 'branch':
result += self._generate(argument[1][0])
elif opcode == 'subpattern':
group_number, add_flags, del_flags, sub_tokens = argument
sub_result = self._generate(sub_tokens)
self._group_refs[group_number] = sub_result
result += sub_result
elif opcode == 'groupref':
result += self._group_refs[argument]
elif opcode == 'min_repeat' or opcode == 'max_repeat':
min_repeat, max_repeat, sub_tokens = argument
if self._min_len:
n = max(min_repeat, min(self._min_len, max_repeat))
else:
n = min_repeat
sub_result = self._generate(sub_tokens) if n != 0 else ''
result += str(sub_result) * n
elif opcode == 'category':
if argument == 'category_digit':
result += '0'
elif argument == 'category_not_space':
result += '!'
else:
raise NotImplementedError('category {}'.format(argument))
elif opcode == 'assert_not':
# let's just hope for the best...
pass
elif opcode == 'assert' or opcode == 'negate':
# note: 'negate' is handled in the 'in' opcode
raise NotImplementedError('regex opcode {} not implemented'.format(opcode))
else:
raise NotImplementedError('unknown regex opcode: {}'.format(opcode))
return result
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2013-2014, NYU-Poly.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
try:
import simplejson as json
except ImportError:
import json
from .convert import DictToTable, ListToTable
class JSONTable(object):
def compute(self):
json_file = self.get_input('file').name
with open(json_file, 'rb') as fp:
obj = json.load(fp)
self.convert_to_table(obj)
class JSONObject(JSONTable, DictToTable):
"""Loads a JSON file and build a table from an object.
In JSON, an object is written with `{}`. It is essentially an associative
array. A column will contain the keys in this array.
Example::
{
"John": {"lastname": "Smith", "age": 25, "city": "New York"},
"Ashley": {"lastname": "Crofts", "age": 21, "city": "Fort Worth"},
"Michael": {"lastname": "Buck", "age": 78, "city": "Goodman"}
}
key | lastname | age | city
-------+----------+-----+-----------
John | Smith | 25 | New York
Ashley | Crofts | 21 | Fort Worth
Michal | Buck | 78 | Goodman
Rows can also be lists (but they still all have to be in the same format).
In this case, columns will not be named.
To read a list of rows, use the JSONList module instead.
"""
_input_ports = [('file', '(org.vistrails.vistrails.basic:File)')]
class JSONList(JSONTable, ListToTable):
"""Loads a JSON file and build a table from a list.
In JSON, a list is written with `[]`.
Example::
[[ 4, 14, 15, 1],
[ 9, 7, 6, 12],
[ 5, 11, 10, 8],
[16, 2, 3, 13]]
gives a 4x4 unnamed table.
"""
_input_ports = [('file', '(org.vistrails.vistrails.basic:File)')]
_modules = [JSONObject, JSONList]
###############################################################################
import unittest
from vistrails.tests.utils import execute, intercept_results
from ..identifiers import identifier
class TestJSON(unittest.TestCase):
def test_object(self):
"""Reads an object with object or list rows.
"""
json_files = [
("""
{
"John": {"lastname": "Smith", "age": 25, "city": "New York"},
"Lara": {"lastname": "Croft", "age": 21, "city": "Nashville"},
"Michael": {"lastname": "Buck", "age": 78, "city": "Goodman"}
}
""", True),
("""
{
"John": ["Smith", 25, "New York"],
"Lara": ["Croft", 21, "Nashville"],
"Michael": ["Buck", 78, "Goodman"]
}
""", False),
]
for json_file, has_names in json_files:
with intercept_results(JSONObject, 'value', 'column_count',
'column_names') as results:
self.assertFalse(execute([
('WriteFile', 'org.vistrails.vistrails.basic', [
('in_value', [('String', json_file)]),
]),
('read|JSONObject', identifier, []),
],
[
(0, 'out_value', 1, 'file'),
]))
self.assertTrue(all((len(r) == 1) for r in results[:2]))
(table,), (count,), names = results
self.assertEqual(count, 4)
import numpy
if has_names:
self.assertEqual(names, [table.names])
self.assertEqual(table.names[0], 'key')
self.assertEqual(set(table.names[1:]),
set(['lastname', 'age', 'city']))
f_city = table.names.index('city')
f_age = table.names.index('age')
else:
self.assertEqual(names, [])
self.assertIsNone(table.names)
f_city = 3
f_age = 2
self.assertEqual(set(table.get_column(f_city)),
set(["New York", "Nashville", "Goodman"]))
l = table.get_column(f_age, True)
self.assertIsInstance(l, numpy.ndarray)
self.assertEqual(set(l), set([21, 25, 78]))
def test_list(self):
"""Reads a list of object or list rows.
"""
json_files = [
"""
[
{"firstname": "John", "lastname": "Smith", "age": 25},
{"firstname": "Lara", "lastname": "Croft", "age": 21},
{"firstname": "Michael", "lastname": "Buck", "age": 78}
]
""",
"""
[[2, 7, 6],
[9, 5, 1],
[4, 3, 8]]
""",
]
for nb, json_file in enumerate(json_files):
with intercept_results(JSONList, 'value', 'column_count',
'column_names') as results:
self.assertFalse(execute([
('WriteFile', 'org.vistrails.vistrails.basic', [
('in_value', [('String', json_file)]),
]),
('read|JSONList', identifier, []),
],
[
(0, 'out_value', 1, 'file'),
]))
self.assertTrue(all((len(r) == 1) for r in results[:2]))
(table,), (count,), names = results
self.assertEqual(count, 3)
import numpy
if nb == 0:
self.assertEqual(names, [table.names])
self.assertEqual(set(table.names),
set(['firstname', 'lastname', 'age']))
self.assertEqual(set(table.get_column_by_name('firstname')),
set(["John", "Lara", "Michael"]))
l = table.get_column_by_name('age', True)
self.assertIsInstance(l, numpy.ndarray)
self.assertEqual(set(l), set([21, 25, 78]))
else:
self.assertEqual(names, [])
self.assertIsNone(table.names)
self.assertEqual([table.get_column(col) for col in xrange(3)],
[[2, 9, 4],
[7, 5, 3],
[6, 1, 8]])
|
""" This process performs a backup of all the application entities for the given
app ID to the local filesystem.
"""
import argparse
import cPickle
import errno
import logging
import multiprocessing
import os
import random
import re
import shutil
import sys
import time
sys.path.append(os.path.join(os.path.dirname(__file__), "../../"))
import appscale_datastore_batch
import dbconstants
import datastore_server
import entity_utils
from zkappscale import zktransaction as zk
sys.path.append(os.path.join(os.path.dirname(__file__), "../../../lib/"))
import appscale_info
# The location to look at in order to verify that an app is deployed.
_SOURCE_LOCATION = '/opt/appscale/apps/'
class DatastoreBackup(multiprocessing.Process):
""" Backs up all the entities for a set application ID. """
# The amount of seconds between polling to get the backup lock.
LOCK_POLL_PERIOD = 60
# The location where the backup files will be stored.
BACKUP_FILE_LOCATION = "/opt/appscale/backups/"
# The backup filename suffix.
BACKUP_FILE_SUFFIX = ".backup"
# The number of entities retrieved in a datastore request.
BATCH_SIZE = 100
# Blob entity regular expressions.
BLOB_CHUNK_REGEX = '(.*)__BlobChunk__(.*)'
BLOB_INFO_REGEX = '(.*)__BlobInfo__(.*)'
# Retry sleep on datastore error in seconds.
DB_ERROR_PERIOD = 30
# Max backup file size in bytes.
MAX_FILE_SIZE = 100000000 # <- 100 MB
# Any kind that is of __*__ is private.
PRIVATE_KINDS = '(.*)__(.*)__(.*)'
# Any kind that is of _*_ is protected.
PROTECTED_KINDS = '(.*)_(.*)_(.*)'
def __init__(self, app_id, zoo_keeper, table_name, source_code=False,
skip_list=[]):
""" Constructor.
Args:
app_id: The application ID.
zk: ZooKeeper client.
table_name: The database used (e.g. cassandra).
source_code: True when a backup of the source code is requested,
False otherwise.
skip_list: A list of Kinds to be skipped during backup; empty list if
none.
"""
multiprocessing.Process.__init__(self)
self.app_id = app_id
self.zoo_keeper = zoo_keeper
self.table = table_name
self.source_code = source_code
self.skip_kinds = skip_list
self.last_key = self.app_id + '\0' + dbconstants.TERMINATING_STRING
self.backup_timestamp = time.strftime("%Y%m%d-%H%M%S")
self.backup_dir = None
self.current_fileno = 0
self.current_file_size = 0
self.entities_backed_up = 0
self.db_access = None
def stop(self):
""" Stops the backup thread. """
pass
def set_filename(self):
""" Creates a new backup filename. Also creates the backup folder if it
doesn't exist.
Returns:
True on success, False otherwise.
"""
if not self.backup_dir:
self.backup_dir = '{0}{1}-{2}/'.format(self.BACKUP_FILE_LOCATION,
self.app_id, self.backup_timestamp)
try:
os.makedirs(self.backup_dir)
logging.info("Backup dir created: {0}".format(self.backup_dir))
except OSError, os_error:
if os_error.errno == errno.EEXIST:
logging.warn("OSError: Backup directory already exists.")
logging.error(os_error.message)
elif os_error.errno == errno.ENOSPC:
logging.error("OSError: No space left to create backup directory.")
logging.error(os_error.message)
return False
elif os_error.errno == errno.EROFS:
logging.error("OSError: READ-ONLY filesystem detected.")
logging.error(os_error.message)
return False
except IOError, io_error:
logging.error("IOError while creating backup dir.")
logging.error(io_error.message)
return False
file_name = '{0}-{1}-{2}{3}'.format(self.app_id, self.backup_timestamp,
self.current_fileno, self.BACKUP_FILE_SUFFIX)
self.filename = '{0}{1}'.format(self.backup_dir, file_name)
logging.info("Backup file: {0}".format(self.filename))
return True
def backup_source_code(self):
""" Copies the source code of the app into the backup directory.
Skips this step if the file is not found.
"""
sourcefile = '{0}{1}.tar.gz'.format(_SOURCE_LOCATION, self.app_id)
if os.path.isfile(sourcefile):
try:
shutil.copy(sourcefile, self.backup_dir)
logging.info("Source code has been successfully backed up.")
except shutil.Error, error:
logging.error("Error: {0} while backing up source code. Skipping...".\
format(error))
else:
logging.error("Couldn't find the source code for this app. Skipping...")
def run(self):
""" Starts the main loop of the backup thread. """
while True:
logging.debug("Trying to get backup lock.")
if self.get_backup_lock():
logging.info("Got the backup lock.")
self.db_access = appscale_datastore_batch.DatastoreFactory.\
getDatastore(self.table)
self.set_filename()
if self.source_code:
self.backup_source_code()
self.run_backup()
try:
self.zoo_keeper.release_lock_with_path(zk.DS_BACKUP_LOCK_PATH)
except zk.ZKTransactionException, zk_exception:
logging.error("Unable to release zk lock {0}.".\
format(str(zk_exception)))
break
else:
logging.info("Did not get the backup lock. Another instance may be "
"running.")
time.sleep(random.randint(1, self.LOCK_POLL_PERIOD))
def get_backup_lock(self):
""" Tries to acquire the lock for a datastore backup.
Returns:
True on success, False otherwise.
"""
return self.zoo_keeper.get_lock_with_path(zk.DS_BACKUP_LOCK_PATH)
def get_entity_batch(self, first_key, batch_size, start_inclusive):
""" Gets a batch of entities to operate on.
Args:
first_key: The last key from a previous query.
batch_size: The number of entities to fetch.
start_inclusive: True if first row should be included, False otherwise.
Returns:
A list of entities.
"""
batch = self.db_access.range_query(dbconstants.APP_ENTITY_TABLE,
dbconstants.APP_ENTITY_SCHEMA, first_key, self.last_key,
batch_size, start_inclusive=start_inclusive)
if batch:
logging.debug("Retrieved entities from {0} to {1}".
format(batch[0].keys()[0], batch[-1].keys()[0]))
return batch
def verify_entity(self, key, txn_id):
""" Verify that the entity is not blacklisted.
Args:
key: The key to the entity table.
txn_id: An int, a transaction ID.
Returns:
True on success, False otherwise.
"""
app_prefix = entity_utils.get_prefix_from_entity_key(key)
try:
if self.zoo_keeper.is_blacklisted(app_prefix, txn_id):
logging.warn("Found a blacklisted item for version {0} on key {1}".\
format(txn_id, key))
return False
except zk.ZKTransactionException, zk_exception:
logging.error("Caught exception {0}, backing off!".format(zk_exception))
time.sleep(self.DB_ERROR_PERIOD)
except zk.ZKInternalException, zk_exception:
logging.error("Caught exception: {0}, backing off!".format(
zk_exception))
time.sleep(self.DB_ERROR_PERIOD)
return True
def dump_entity(self, entity):
""" Dumps the entity content into a backup file.
Args:
entity: The entity to be backed up.
Returns:
True on success, False otherwise.
"""
# Open file and write pickled batch.
if self.current_file_size + len(entity) > self.MAX_FILE_SIZE:
self.current_fileno += 1
self.set_filename()
self.current_file_size = 0
try:
with open(self.filename, 'ab+') as file_object:
cPickle.dump(entity, file_object, cPickle.HIGHEST_PROTOCOL)
self.entities_backed_up += 1
self.current_file_size += len(entity)
except IOError as io_error:
logging.error(
"Encountered IOError while accessing backup file {0}".
format(self.filename))
logging.error(io_error.message)
return False
except OSError as os_error:
logging.error(
"Encountered OSError while accessing backup file {0}".
format(self.filename))
logging.error(os_error.message)
return False
except Exception as exception:
logging.error(
"Encountered an unexpected error while accessing backup file {0}".
format(self.filename))
logging.error(exception.message)
return False
return True
def process_entity(self, entity):
""" Verifies entity, fetches from journal if necessary and calls
dump_entity.
Args:
entity: The entity to be backed up.
Returns:
True on success, False otherwise.
"""
key = entity.keys()[0]
kind = entity_utils.get_kind_from_entity_key(key)
# Skip protected and private entities.
if re.match(self.PROTECTED_KINDS, kind) or\
re.match(self.PRIVATE_KINDS, kind):
# Do not skip blob entities.
if not re.match(self.BLOB_CHUNK_REGEX, kind) and\
not re.match(self.BLOB_INFO_REGEX, kind):
logging.debug("Skipping key: {0}".format(key))
return False
one_entity = entity[key][dbconstants.APP_ENTITY_SCHEMA[0]]
if one_entity == datastore_server.TOMBSTONE:
return False
app_prefix = entity_utils.get_prefix_from_entity_key(key)
root_key = entity_utils.get_root_key_from_entity_key(key)
success = True
while True:
# Acquire lock.
txn_id = self.zoo_keeper.get_transaction_id(app_prefix)
try:
if self.zoo_keeper.acquire_lock(app_prefix, txn_id, root_key):
version = entity[key][dbconstants.APP_ENTITY_SCHEMA[1]]
if not self.verify_entity(key, version):
# Fetch from the journal.
entity = entity_utils.fetch_journal_entry(self.db_access, key)
if not entity:
logging.error("Bad journal entry for key: {0} and result: {1}".
format(key, entity))
success = False
else:
one_entity = entity[key][dbconstants.APP_ENTITY_SCHEMA[0]]
if self.dump_entity(one_entity):
logging.debug("Backed up key: {0}".format(key))
success = True
else:
success = False
else:
logging.warn("Entity with key: {0} not found".format(key))
success = False
except zk.ZKTransactionException, zk_exception:
logging.error("Zookeeper exception {0} while requesting entity lock".
format(zk_exception))
success = False
except zk.ZKInternalException, zk_exception:
logging.error("Zookeeper exception {0} while requesting entity lock".
format(zk_exception))
success = False
except dbconstants.AppScaleDBConnectionError, db_exception:
logging.error("Database exception {0} while requesting entity lock".
format(db_exception))
success = False
finally:
if not success:
if not self.zoo_keeper.notify_failed_transaction(app_prefix, txn_id):
logging.error("Unable to invalidate txn for {0} with txnid: {1}"\
.format(app_prefix, txn_id))
logging.error("Failed to backup entity. Retrying shortly...")
try:
self.zoo_keeper.release_lock(app_prefix, txn_id)
except zk.ZKTransactionException, zk_exception:
logging.error(
"Zookeeper exception {0} while releasing entity lock.".
format(zk_exception))
except zk.ZKInternalException, zk_exception:
logging.error(
"Zookeeper exception {0} while releasing entity lock.".
format(zk_exception))
if success:
break
else:
time.sleep(self.DB_ERROR_PERIOD)
return success
def run_backup(self):
""" Runs the backup process. Loops on the entire dataset and dumps it into
a file.
"""
logging.info("Backup started")
start = time.time()
first_key = '{0}\x00'.format(self.app_id)
start_inclusive = True
entities_remaining = []
while True:
try:
# Fetch batch.
entities = entities_remaining + self.get_entity_batch(first_key,
self.BATCH_SIZE, start_inclusive)
logging.info("Processing {0} entities".format(self.BATCH_SIZE))
if not entities:
break
# Loop through entities retrieved and if not to be skipped, process.
skip = False
for entity in entities:
first_key = entity.keys()[0]
kind = entity_utils.get_kind_from_entity_key(first_key)
logging.debug("Processing key: {0}".format(first_key))
index = 1
for skip_kind in self.skip_kinds:
if re.match(skip_kind, kind):
logging.warn("Skipping entities of kind: {0}".format(skip_kind))
skip = True
first_key = first_key[:first_key.find(skip_kind)+
len(skip_kind)+1] + dbconstants.TERMINATING_STRING
self.skip_kinds = self.skip_kinds[index:]
break
index += 1
if skip:
break
self.process_entity(entity)
if not skip:
first_key = entities[-1].keys()[0]
start_inclusive = False
except dbconstants.AppScaleDBConnectionError, connection_error:
logging.error("Error getting a batch: {0}".format(connection_error))
time.sleep(self.DB_ERROR_PERIOD)
del self.db_access
time_taken = time.time() - start
logging.info("Backed up {0} entities".format(self.entities_backed_up))
logging.info("Backup took {0} seconds".format(str(time_taken)))
def init_parser():
""" Initializes the command line argument parser.
Returns:
A parser object.
"""
parser = argparse.ArgumentParser(
description='Backup application code and data.')
parser.add_argument('-a', '--app-id', required=True,
help='the application ID to run the backup for')
parser.add_argument('--source-code', action='store_true',
default=False, help='backup the source code too. Disabled by default.')
parser.add_argument('-d', '--debug', required=False, action="store_true",
default=False, help='display debug messages')
parser.add_argument('--skip', required=False, nargs="+",
help='skip the following kinds, separated by spaces')
return parser
def main():
""" This main function allows you to run the backup manually. """
parser = init_parser()
args = parser.parse_args()
# Set up logging.
level = logging.INFO
if args.debug:
level = logging.DEBUG
logging.basicConfig(format='%(asctime)s %(levelname)s %(filename)s:' \
'%(lineno)s %(message)s ', level=level)
logging.info("Logging started")
message = "Backing up "
if args.source_code:
message += "source and "
message += "data for: {0}".format(args.app_id)
logging.info(message)
zk_connection_locations = appscale_info.get_zk_locations_string()
zookeeper = zk.ZKTransaction(host=zk_connection_locations)
db_info = appscale_info.get_db_info()
table = db_info[':table']
skip_list = args.skip
if not skip_list:
skip_list = []
logging.info("Will skip the following kinds: {0}".format(sorted(skip_list)))
ds_backup = DatastoreBackup(args.app_id, zookeeper, table,
source_code=args.source_code, skip_list=sorted(skip_list))
try:
ds_backup.run()
finally:
zookeeper.close()
if __name__ == "__main__":
main()
|
from qqbot import QQBotSlot as qqbotslot, QQBot
from qqbot.qcontactdb import QContact
from tenhoubot import main as starttenhou
import threading
import re
import random
import time
is_playing = False
qq_group = None
class BotConnector(object):
def __init__(self, qqbot):
self.qbot = qqbot
self.stop_wait = False
self.first_time = True
def on_receive_message(self, message):
if 'WAIT_READY' in message:
self.qbot.SendTo(qq_group, '我排好了,你们赶紧的')
if 'WAIT_TO_END' in message:
self.qbot.SendTo(qq_group, '操你们妈啊,喊3缺1叫我,然后又没人来')
if 'FINAL_RESULT' in message:
result_list = re.search(r'FINAL_RESULT\s*(.*)', message).group(1)
self.qbot.SendTo(qq_group, '刚刚跟你们这群菜鸡打了一局,结果感人')
result = re.search(r'\[(.*)\((.*)\) (.*), (.*)\((.*)\) (.*), (.*)\((.*)\) (.*), (.*)\((.*)\) (.*)\]',result_list)
for i in [1, 4, 7, 10]:
name = result.group(i)
point = result.group(i+1)
score = result.group(i+2)
if name == 'Lattish':
name = '老子我'
formatted_result = '%s: %s (%s)' % (name, score, point)
time.sleep(0.3)
self.qbot.SendTo(qq_group, formatted_result)
if result.group(10) == 'Lattish' and float(result.group(12).replace(' ', '')) < 0:
time.sleep(1)
self.qbot.SendTo(qq_group, '你们竟然敢打飞我???烟了,全都烟了!!!')
members = [x for x in self.qbot.List(qq_group) if x.role == '成员']
self.qbot.GroupShut(qq_group, members, t=60)
class TenhouThread (threading.Thread):
def __init__(self, connector):
threading.Thread.__init__(self)
self.connector = connector
def run(self):
global is_playing
starttenhou(self.connector)
is_playing = False
botConnector.stop_wait = False
my_names = ["@ME", "Lattish", "yunini", "lattish"]
@qqbotslot
def onQQMessage(bot, contact, member, content):
global qq_group
global botConnector
global is_playing
if contact.qq == '625219436':
qq_group = contact
if '烟' in content or '🚬' in content:
if member.role == '普通成员':
if '烟呢' in content:
shut_seconds = random.randint(1, 10) * 60
bot.GroupShut(contact, [member], t=shut_seconds)
bot.SendTo(contact, '这呢,成全你这个抖 M')
else:
num = random.random()
if num < 0.2:
bot.GroupShut(contact, [member], t=60)
bot.SendTo(contact, '还真当我不懂啊,智障')
elif num < 0.22:
bot.GroupShut(contact, [member], t=3600)
bot.SendTo(contact, '今天试试这电子烟怎么样?')
else:
bot.SendTo(contact, "烟?什么意思?完全不懂啊")
else:
bot.SendTo(contact, '渣渣管理员别跟我提烟')
if '麻吗' in content or "麻?" in content or "棍吗" in content or "棍?" in content:
num = random.random()
if num < 0.5:
bot.SendTo(contact, '搞事搞事搞事')
else:
bot.SendTo(contact, '来啊,来屁胡啊')
elif '机器人' in content or 'AI' in content or 'bot' in content:
bot.SendTo(contact, '操你妈别以为我不知道你在说我')
elif 'latish' in content or 'Latish' in content:
bot.SendTo(contact, '智障能把我名字打对吗???')
elif any([x in content for x in my_names]): # being mentioned
if "在吗" in content or "zaima" in content:
if not is_playing:
num = random.random()
if num < 0.5:
bot.SendTo(contact, '摸了')
else:
bot.SendTo(contact, 'buzai cmn')
else:
bot.SendTo(contact, '我正堇业着呢,叫也没用')
elif "缺人" in content:
if not is_playing:
bot.SendTo(contact, '3缺1再叫我,谢谢,你说缺人谁他妈知道你缺几个')
else:
bot.SendTo(contact, '我正在跟别人干着呢,叫也没用')
elif "3缺1" in content or "三缺一" in content:
if not is_playing:
is_playing = True
bot.SendTo(contact, '你群打个麻将都贵阳,知道了,这就上线')
tenhou_thread = TenhouThread(botConnector)
tenhou_thread.start()
else:
bot.SendTo(contact, '我正在跟别人干着呢,叫也没用')
elif "别排" in content:
if is_playing:
bot.SendTo(contact, '你他妈遛我玩呢?下回缺人别JB找我')
botConnector.stop_wait = True
else:
bot.SendTo(contact, '你他妈是不是傻,老子本来也没排啊')
elif "地址" in content or "链接" in content:
bot.SendTo(contact, '不会自己看群公告啊,傻逼')
bot.SendTo(contact, '网页版:http://tenhou.net/3/?L2587')
bot.SendTo(contact, 'Flash 版:http://tenhou.net/0/?L2587')
elif "傻逼" in content or "真蠢" in content:
bot.SendTo(contact, '信不信我烟你')
elif "疯了" in content or "可爱" in content:
bot.SendTo(contact, '嘻嘻')
elif "闭嘴" in content or "好吵" in content:
bot.SendTo(contact, '哦,那你可以烟我啊')
elif "吃" in content:
bot.SendTo(contact, '不吃')
elif "飞了" in content:
bot.SendTo(contact, '丢人,你退群吧')
else:
num = random.random()
if num < 0.3:
bot.SendTo(contact, '操你妈要求真多')
elif num < 0.66:
bot.SendTo(contact, '人家不懂,不然先抽烟?')
else:
bot.SendTo(contact, '哎呀人家不懂了啦')
elif random.random() > 0.98:
bot.SendTo(contact, content)
if __name__ == '__main__':
bot = QQBot()
botConnector = BotConnector(bot)
bot.Login(user='Lattish')
bot.Run()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import socket
import logging
import dateutil.parser
from constants import MSG_DATE_SEP, END_MSG
from moves import initialize_keyboard, press_keys
logging.basicConfig(filename='jagger_server.log', level=logging.DEBUG)
logger = logging.getLogger("jagger_server")
class JaggerServer(object):
def __init__(self, host, port=8765):
super(JaggerServer, self).__init__()
self.host = host
self.port = port
self.total_msgs = 0
self.total_delay = 0
self.avg_delay = 0
self.max_delay = 0
self.min_delay = 1000000000
self.max_msg_count = 1000
def get_msg(self, client_msg):
if MSG_DATE_SEP in client_msg:
client_time, msg = client_msg.split(MSG_DATE_SEP)
client_time = dateutil.parser.parse(client_time)
return client_time, msg
else:
pass
def proccess_msg(self, client_msg):
now = datetime.datetime.utcnow()
client_time, msg = self.get_msg(client_msg)
time_delay = now - client_time
# time delay in miliseconds
time_delay = time_delay.total_seconds() * 1000
keys = [key for key in msg]
press_keys(keys)
return time_delay
def calculate_stats_on_delay(self, time_delay):
self.min_delay = min(self.min_delay, time_delay)
self.max_delay = max(self.max_delay, time_delay)
self.total_delay += time_delay
self.total_msgs += 1
# calculate the avg once the max msg count is archived
if self.total_msgs >= self.max_msg_count:
new_avg = self.total_delay / self.total_msgs
self.avg_delay = (self.avg_delay + new_avg) / 2
self.total_delay = 0
self.total_msgs = 0
logger.debug("Delay: current: {} min: {} max: {} avg: {}".format(
time_delay,
self.min_delay,
self.max_delay,
self.avg_delay
))
def main_loop_controller(self, udp):
while True:
client_msg_binary, client = udp.recvfrom(1024)
client_msg = client_msg_binary.decode('UTF-8')
if END_MSG in client_msg:
break
time_delay = self.proccess_msg(client_msg)
self.calculate_stats_on_delay(time_delay)
def run(self):
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
orig = (self.host, self.port)
udp.bind(orig)
initialize_keyboard()
try:
self.main_loop_controller(udp)
except Exception as e:
logger.exception(e)
finally:
logger.debug("close")
udp.close()
if __name__ == '__main__':
server = JaggerServer(host='', port=8865)
server.run()
|
"""
69. Sqrt(x)
Implement int sqrt(int x).
Compute and return the square root of x, where x is guaranteed to be a non-negative integer.
Since the return type is an integer, the decimal digits are truncated and only the integer part of the result is returned.
Example 1:
Input: 4
Output: 2
Example 2:
Input: 8
Output: 2
Explanation: The square root of 8 is 2.82842..., and since the decimal part is truncated, 2 is returned.
"""
def mySqrt(x):
if x == 0:
return 0
left = 1
right = x
while True:
mid = left + (right - left) // 2
if mid > x / mid:
right = mid - 1
else:
if mid + 1 > x / (mid + 1):
return mid
else:
left = mid + 1
|
import requests
def fetchFromAPI(query):
'''Fetches response data from API'''
URL = f"https://the-words.herokuapp.com/api/v2/definitions/en-US/entries/{query}"
HEADERS={'Accept': 'application/json'}
response = requests.get(URL, headers=HEADERS)
print(response.status_code)
#print(response.headers)
print(response.text)
def fetchAudioFromAPI(query):
'''Fetches pronunciation data from API'''
URL = f"https://the-words.herokuapp.com/api/v2/audio/en-US/entries/{query}"
HEADERS={'Accept': 'application/json'}
response = requests.get(URL, headers=HEADERS)
print(response.status_code)
#print(response.headers)
print(response.text)
fetchFromAPI("hello")
fetchAudioFromAPI("hello")
|
from __future__ import unicode_literals
import frappe, sys
import erpnext
import frappe.utils
from erpnext.demo.user import hr, sales, purchase, manufacturing, stock, accounts, projects, fixed_asset, education
from erpnext.demo.setup import education, manufacture, setup_data, healthcare
"""
Make a demo
1. Start with a fresh account
bench --site demo.erpnext.dev reinstall
2. Install Demo
bench --site demo.erpnext.dev execute erpnext.demo.demo.make
3. If Demo breaks, to continue
bench --site demo.erpnext.dev execute erpnext.demo.demo.simulate
"""
def make(domain='Manufacturing', days=100):
frappe.flags.domain = domain
frappe.flags.mute_emails = True
setup_data.setup(domain)
if domain== 'Manufacturing':
manufacture.setup_data()
elif domain== 'Education':
education.setup_data()
elif domain== 'Healthcare':
healthcare.setup_data()
site = frappe.local.site
frappe.destroy()
frappe.init(site)
frappe.connect()
simulate(domain, days)
def simulate(domain='Manufacturing', days=100):
runs_for = frappe.flags.runs_for or days
frappe.flags.company = erpnext.get_default_company()
frappe.flags.mute_emails = True
if not frappe.flags.start_date:
# start date = 100 days back
frappe.flags.start_date = frappe.utils.add_days(frappe.utils.nowdate(),
-1 * runs_for)
current_date = frappe.utils.getdate(frappe.flags.start_date)
# continue?
demo_last_date = frappe.db.get_global('demo_last_date')
if demo_last_date:
current_date = frappe.utils.add_days(frappe.utils.getdate(demo_last_date), 1)
# run till today
if not runs_for:
runs_for = frappe.utils.date_diff(frappe.utils.nowdate(), current_date)
# runs_for = 100
fixed_asset.work()
for i in range(runs_for):
sys.stdout.write("\rSimulating {0}: Day {1}".format(
current_date.strftime("%Y-%m-%d"), i))
sys.stdout.flush()
frappe.flags.current_date = current_date
if current_date.weekday() in (5, 6):
current_date = frappe.utils.add_days(current_date, 1)
continue
try:
hr.work()
purchase.work()
stock.work()
accounts.work()
projects.run_projects(current_date)
# run_messages()
if domain=='Manufacturing':
sales.work()
manufacturing.work()
elif domain=='Education':
education.work()
except:
frappe.db.set_global('demo_last_date', current_date)
raise
finally:
current_date = frappe.utils.add_days(current_date, 1)
frappe.db.commit()
|
#!/usr/bin/env python3
def show_banner(value: str, times: int):
print(value * times)
def show_strings_demo1():
first_name = "Mohd"
last_name = 'Azim'
person_description = """
My Name is
Mohd Azim
"""
show_banner('=', 50)
print("Strings Demo 1")
show_banner('=', 50)
print(first_name)
print(last_name)
print(person_description)
show_banner('-', 50)
def show_strings_demo2():
first_name = "Mohd"
last_name = 'Azim'
person_description = """
My Name is
Mohd Azim
"""
show_banner('=', 50)
print("Strings Demo 2")
show_banner('=', 50)
print(first_name, last_name, person_description, sep='\n')
show_banner('-', 50)
def show_strings_demo3():
first = "First's"
last = 'Last"s'
show_banner('=', 50)
print("Strings Demo 3")
show_banner('=', 50)
print(first, last)
print(first + " " + last)
print(f"Contains both Single and Double Quotes {first} {last}")
show_banner('-', 50)
def main():
show_strings_demo1()
show_strings_demo2()
show_strings_demo3()
# Program Execution Starts here
main()
|
import TestHelperSuperClass
import unittest
import json
import pytz
import datetime
import copy
serverInfo = {
'Server': {
'Version': TestHelperSuperClass.env['APIAPP_VERSION'],
"APIAPP_APIDOCSURL": TestHelperSuperClass.env['APIAPP_APIDOCSURL'],
"APIAPP_FRONTENDURL": TestHelperSuperClass.env['APIAPP_FRONTENDURL']
},
'Derived': None
}
#@TestHelperSuperClass.wipd
class test_api(TestHelperSuperClass.testHelperAPIClient):
def test_getServerInfo(self):
result = self.testClient.get('/api/info/serverinfo')
self.assertEqual(result.status_code, 200, msg="Wrong response when calling /api/info/serverinfo")
resultJSON = json.loads(result.get_data(as_text=True))
self.assertJSONStringsEqual(resultJSON, serverInfo)
def test_getServerInfoSpecialData(self):
expected = copy.deepcopy(serverInfo)
expected["Derived"] = {
"test": "123"
}
TestHelperSuperClass.serverInfoExtra["ret"] = { "test": "123" }
result = self.testClient.get('/api/info/serverinfo')
self.assertEqual(result.status_code, 200, msg="Wrong response when calling /api/info/serverinfo")
resultJSON = json.loads(result.get_data(as_text=True))
self.assertJSONStringsEqual(resultJSON, expected)
|
import numpy as np
import cv2 as cv
import argparse
from deep_text_detect import text_detect, text_spotter
from read_license import read
parser = argparse.ArgumentParser()
parser.add_argument("image", help="Path to car image")
args = parser.parse_args()
image_path = args.image
image = cv.imread(image_path)
original = image.copy()
cur, coords = text_detect(image, text_spotter) ##SSD text spotter, works for wide shots
max_plate = None
max_dims = None
if cur is not None:
cur = cv.resize(cur, (600, 200), None, interpolation = cv.INTER_LINEAR)
plate, pic = read(cur)
if len(plate) > 4:
left, top, width, height = coords
cv.rectangle(image, (left, top), (left + width, top + height), (0, 128, 0), 2)
cv.imshow("plate", image)
cv.waitKey(0)
print(plate)
quit()
else:
max_plate = plate
max_dims = coords
image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
kernel = cv.getStructuringElement(cv.MORPH_RECT, (20, 4)) # larger than license plate
opening = cv.morphologyEx(image, cv.MORPH_OPEN, kernel) #remove salt and increase dark spots
image_sub = cv.subtract(image, opening) #with open - same as Tophat
retval, thresh1 = cv.threshold(image_sub, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
output = cv.connectedComponentsWithStats(thresh1, 4, cv.CV_32S)
stats = output[2]
stats = sorted(stats, key = lambda x : x[4], reverse = True)[1:16] #largest components
stats = sorted(stats, key = lambda x : x[1], reverse = True) # sort from bottom to the top
for i in range(len(stats)):
left, top, width, height, area = stats[i]
if area > 50 and width / height > 1.5 and width/height < 6:
box = original[top : top + height, left :left + width]
box = cv.resize(box, (600, 200), None, interpolation = cv.INTER_LINEAR)
cv.imshow("plate", box)
cv.waitKey(0)
plate, pic = read(box)
if len(plate) == 7:
cv.rectangle(original, (left, top), (left + width, top + height), (0, 128, 0), 2)
cv.imshow("plate", original)
cv.waitKey(0)
print(plate)
quit()
elif len(plate) > 0 and (max_plate is None or len(plate) > len(max_plate)):
max_plate = plate
max_dims = (left, top, width, height)
if max_plate:
left, top, width, height = max_dims
cv.rectangle(original, (left, top), (left + width, top + height), (0, 128, 0), 2)
cv.imshow("plate", original)
cv.waitKey(0)
print(max_plate)
else:
print("Nothing found")
|
import torch.nn as nn
from typing import Dict
from yolact_edge.yolact import Yolact
class YolactWrapper(nn.Module):
def __init__(self, yolact_module: Yolact, extras: Dict):
super().__init__()
self.yolact_module = yolact_module
self.extras = extras
def forward(self, x):
out_dict = self.yolact_module(x, extras=self.extras)['pred_outs'][0]
# return order class, score, box, mask, proto
return (out_dict['class'], out_dict['score'], out_dict['box'], out_dict['mask'], out_dict['proto'])
|
from violas_client.canoser.int_type import Uint8
from violas_client.canoser.tuple_t import TupleT
from violas_client.canoser.map_t import MapT
from violas_client.canoser.str_t import StrT
from violas_client.canoser.bytes_t import BytesT, ByteArrayT
from violas_client.canoser.bool_t import BoolT
from violas_client.canoser.array_t import ArrayT
def my_import(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def type_mapping(field_type):
"""
Mapping python types to canoser types
"""
if field_type == str:
return StrT
elif field_type == bytes:
return BytesT()
elif field_type == bytearray:
return ByteArrayT()
elif field_type == bool:
return BoolT
elif type(field_type) == list:
if len(field_type) == 0:
return ArrayT(Uint8)
elif len(field_type) == 1:
item = field_type[0]
return ArrayT(type_mapping(item))
elif len(field_type) == 2:
item = field_type[0]
size = field_type[1]
return ArrayT(type_mapping(item), size)
elif len(field_type) == 3:
item = field_type[0]
size = field_type[1]
encode_len = field_type[2]
return ArrayT(type_mapping(item), size, encode_len)
else:
raise TypeError("Array has one item type, no more.")
raise AssertionError("unreacheable")
elif type(field_type) == dict:
if len(field_type) == 0:
ktype = BytesT()
vtype = [Uint8]
elif len(field_type) == 1:
ktype = next(iter(field_type.keys()))
vtype = next(iter(field_type.values()))
else:
raise TypeError("Map type has one item mapping key type to value type.")
return MapT(type_mapping(ktype), type_mapping(vtype))
elif type(field_type) == tuple:
arr = []
for item in field_type:
arr.append(type_mapping(item))
return TupleT(*arr)
elif type(field_type) == str:
return my_import(field_type)
else:
return field_type
|
# V1.43 messages
# PID advanced
# does not include feedforward data or vbat sag comp or thrust linearization
# pid_advanced = b"$M>2^\x00\x00\x00\x00x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x007\x00\xfa\x00\xd8\x0e\x00\x00\x00\x00\x01\x01\x00\n\x14H\x00H\x00H\x00\x00\x15\x1a\x00(\x14\x00\xc8\x0fd\x04\x00\xb1"
pid_advanced = b'$M>2^\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x007\x00\xfa\x00\xd8\x0e\x00\x00\x00\x00\x01\x01\x00\n\x14H\x00H\x00H\x00\x00\x15\x1a\x00(\x14\x00\xc8\x0fd\x04\x00\xb1'
# PID coefficient
pid = b"$M>\x0fp\x16D\x1f\x1aD\x1f\x1dL\x0457K(\x00\x00G"
fc_version = b"$M>\x03\x03\x01\x0c\x15\x18"
fc_version_2 = b"$M>\x03\x01\x00\x01\x15\x16"
api_version = b"$M>\x03\x01\x00\x01\x15\x16"
status_response = b"$M>\x16e}\x00\x00\x00!\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x1a\x04\x01\x01\x00\x004"
status_ex_response = (
b"$M>\x16\x96}\x00\x00\x00!\x00\x00\x00\x00\x00\x00\x05\x00\x03\x00\x00\x1a\x04\x01\x01\x00\x00\xc4"
)
sensor_alignment = b"$M>\x07~\x01\x01\x00\x01\x00\x01\x01x"
# status_ex_response = (
# b'$M>\x16\x96}\x00\x00\x00!\x00\x00\x00\x00\x00\x00\x05\x00\x03\x04\x00\x1a\x04\x00\x00\x00\x00\xc0'
# )
rc_tuning = b"$M>\x17od\x00FFFA2\x00F\x05\x00dd\x00\x00d\xce\x07\xce\x07\xce\x07\x00\xc7"
rx_tuning2 = (
b"$M>\x02l\x07\xdc\x05\x1a\x04\x00u\x03C\x08\x02\x13\xe2\x04\x00\x00\x00\x00\x00\x00(\x02\x01\x00\x00\x01\x03\x00"
)
board_info = b"$M>J\x04S405\x00\x00\x027\tSTM32F405\nCLRACINGF4\x04CLRA\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x02@\x1f\x00\x00\x00\x00\r"
# MSP.ATTITUDE
attitude_response = b"$M>\x06l`\x02\xaa\xff\x0e\x00S"
# MSP.BOXNAMES
box_names_response = (
b"$M>\xfft\x0b\x01ARM;ANGLE;HORIZON;HEADFREE;FAILSAFE;HEADADJ;BEEPER;"
b"OSD DISABLE SW;BLACKBOX;FPV ANGLE MIX;BLACKBOX ERASE (>30s);CAMERA CONTROL 1;"
b"CAMERA CONTROL 2;CAMERA CONTROL 3;FLIP OVER AFTER CRASH;PREARM;VTX PIT MODE;"
b"PARALYZE;USER1;ACRO TRAINER;DISABLE VTX CONTROL;LA"
)
# MSP.BOXIDS
box_names_response = b'$M>\x16w\x00\x01\x02\x06\x1b\x07\r\x13\x1a\x1e\x1f !"#$\'-(/01U'
# MSP.FEATURE_CONFIG
feature_config_response = b'$M>\x04$\x00 D0t'
|
import os
import re
class Interpreter:
def __init__(self, file_path):
self._data_path = file_path
self._file_list = []
for root, dirs, files in os.walk(self._data_path):
# 存储了文件夹下所有的文件
all_files = files
# 抽取出.txt文件
for i in range(0, len(all_files)):
if '.txt' in all_files[i]:
self._file_list.append(all_files[i])
print(self._file_list)
# 文件数量
self._file_num = len(self._file_list)
# 方法名
self._method_name = ''
# 用于存储将要返回的路径
self._ret_vec = []
# 用于存储hash code
self._hash_code = []
# dict,用于存储树的索引
self._hash_tree = {}
# 文件迭代索引,用于迭代
self._file_index = 0
def file_iterator(self):
# 清空数据
self._ret_vec = []
self._hash_code = []
self._hash_tree = {}
# 处理
self.data_handler()
self._file_index += 1
def data_handler(self):
is_tree = False
filename = os.path.join(self._data_path, self._file_list[self._file_index])
f = open(filename)
self._method_name = f.readline()
line = f.readline()
while line:
if len(line.strip()) == 0:
is_tree = True
line = f.readline()
continue
if is_tree:
self.tree_interpreter(line)
else:
self.path_interpreter(line)
line = f.readline()
# 这里需要对已经解析出来的数据进行存储
f.close()
def path_interpreter(self, path):
path = path.strip()
# 提取字符串中的hash code
hash_pattern = re.compile(r'(?<=[,\|])\d+(?=[\|\(])')
it = hash_pattern.finditer(path)
tmp_hash_unit = []
for math_unit in it:
tmp_hash_unit.append(math_unit.group())
self._hash_code.append(tmp_hash_unit)
repl_pattern = re.compile(r'(?<=,)[\d\|]+(?=\()')
res_path = repl_pattern.sub('', path)
self._ret_vec.append(res_path)
def tree_interpreter(self, tree_seg):
tree_seg = tree_seg.strip()
split_list = tree_seg.split(', ')
parent_node = split_list[0]
child_node = split_list[1]
if len(child_node.split(':')[1]):
self._hash_tree[parent_node.split(': ')[1]] = child_node.split(': ')[1].strip('|').split('|')
else:
self._hash_tree[parent_node.split(': ')[1]] = []
@property
def ret_vec(self):
return self._ret_vec
@property
def hash_code(self):
return self._hash_code
@property
def hash_tree(self):
return self._hash_tree
|
import pickle
import bz2
import io
import torch
from torch import nn
from torch.utils import data
from torch.nn import functional as F
# model
class Attention(nn.Module):
def __init__(self, feature_dim, max_seq_len=70):
super().__init__()
self.attention_fc = nn.Linear(feature_dim, 1)
self.bias = nn.Parameter(torch.zeros(1, max_seq_len, 1, requires_grad=True))
def forward(self, rnn_output):
"""
forward attention scores and attended vectors
:param rnn_output: (#batch,#seq_len,#feature)
:return: attended_outputs (#batch,#feature)
"""
attention_weights = self.attention_fc(rnn_output)
seq_len = rnn_output.size(1)
attention_weights = self.bias[:, :seq_len, :] + attention_weights
attention_weights = torch.tanh(attention_weights)
attention_weights = torch.exp(attention_weights)
attention_weights_sum = torch.sum(attention_weights, dim=1, keepdim=True) + 1e-7
attention_weights = attention_weights / attention_weights_sum
attended = torch.sum(attention_weights * rnn_output, dim=1)
return attended
class GaussianNoise(nn.Module):
def __init__(self, stddev):
super(GaussianNoise, self).__init__()
self.stddev = stddev
def forward(self, x):
if self.training:
noise = torch.empty_like(x)
noise.normal_(0, self.stddev)
return x + noise
else:
return x
class SpatialDropout(nn.Dropout2d):
def forward(self, x):
x = x.unsqueeze(2) # (N, T, 1, K)
x = x.permute(0, 3, 2, 1) # (N, K, 1, T)
x = super(SpatialDropout, self).forward(x) # (N, K, 1, T), some features are masked
x = x.permute(0, 3, 2, 1) # (N, T, 1, K)
x = x.squeeze(2) # (N, T, K)
return x
class NeuralNet(nn.Module):
def __init__(self, model_factor, num_units1, num_units2, embedding_matrix, max_features, num_aux_targets, max_seq_len, num_feats):
super(NeuralNet, self).__init__()
self.model_factor = model_factor if model_factor is not None else [0,3,1,0,1,1]
self.num_feats = num_feats
embed_size = embedding_matrix.shape[1]
self.embedding = nn.Embedding(max_features, embed_size)
self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32))
self.embedding.weight.requires_grad = False
self.embedding_dropout = SpatialDropout(self.model_factor[4]/10.0)
if self.model_factor[2] == 0:
self.layer1 = nn.LSTM(embed_size, num_units1, bidirectional=True, batch_first=True)
self.layer2 = nn.LSTM(num_units1 * 2, num_units2, bidirectional=True, batch_first=True)
elif self.model_factor[2] == 1:
self.layer1 = nn.LSTM(embed_size, num_units1, bidirectional=True, batch_first=True)
self.layer2 = nn.GRU(num_units1 * 2, num_units2, bidirectional=True, batch_first=True)
elif self.model_factor[2] == 2:
self.layer1 = nn.LSTM(embed_size, num_units1, bidirectional=True, batch_first=True)
self.layer2 = nn.Conv1d(num_units1 * 2, num_units2 * 2, kernel_size=3, stride=1, padding=2)
elif self.model_factor[2] == 3:
self.layer1 = nn.Conv1d(embed_size, num_units1 * 2, kernel_size=3, stride=1, padding=2)
self.layer2 = nn.LSTM(num_units1 * 2, num_units2, bidirectional=True, batch_first=True)
if self.model_factor[1] == 0:
num_dense_units = num_units2 * 4
elif self.model_factor[1] == 1:
self.attention1 = Attention(num_units1*2, max_seq_len)
num_dense_units = num_units1 * 2 + num_units2 * 4
elif self.model_factor[1] == 2:
self.attention2 = Attention(num_units2*2, max_seq_len)
num_dense_units = num_units2 * 6
elif self.model_factor[1] == 3:
self.attention1 = Attention(num_units1*2, max_seq_len)
self.attention2 = Attention(num_units2*2, max_seq_len)
num_dense_units = num_units1 * 2 + num_units2 * 6
if self.model_factor[0] == 0:
self.dropout = nn.Dropout(self.model_factor[5]/10.0)
elif self.model_factor[0] == 1:
self.noise = GaussianNoise(self.model_factor[5]/10.0)
elif self.model_factor[0] == 2:
self.dropout = nn.Dropout(self.model_factor[5]/10.0)
self.bn = nn.BatchNorm1d(num_dense_units, momentum=0.5)
elif self.model_factor[0] == 3:
self.noise = GaussianNoise(self.model_factor[5]/10.0)
self.bn = nn.BatchNorm1d(num_dense_units, momentum=0.5)
num_dense_units = num_dense_units + self.num_feats
if self.model_factor[3] == 1:
self.hidden1 = nn.Linear(num_dense_units, num_dense_units)
elif self.model_factor[3] == 2:
self.hidden1 = nn.Linear(num_dense_units, num_dense_units)
self.hidden2 = nn.Linear(num_dense_units, num_dense_units)
elif self.model_factor[3] == 3:
self.hidden1 = nn.Linear(num_dense_units, num_dense_units)
self.hidden2 = nn.Linear(num_dense_units, num_dense_units)
self.hidden3 = nn.Linear(num_dense_units, num_dense_units)
self.linear_out = nn.Linear(num_dense_units, 1)
self.linear_aux_out = nn.Linear(num_dense_units, num_aux_targets)
def forward(self, x):
if self.num_feats > 0:
sent = x[:,self.num_feats:]
feat = x[:,:self.num_feats].to(torch.float)
else:
sent = x
h_embedding = self.embedding(sent)
h_embedding = self.embedding_dropout(h_embedding)
if self.model_factor[2] == 0 or self.model_factor[2] == 1:
h_layer_1, _ = self.layer1(h_embedding)
h_layer_2, _ = self.layer2(h_layer_1)
elif self.model_factor[2] == 2:
h_layer_1, _ = self.layer1(h_embedding)
h_layer_2 = F.relu(torch.transpose(self.layer2(torch.transpose(h_layer_1,1,2)),2,1))
elif self.model_factor[2] == 3:
h_layer_1 = F.relu(torch.transpose(self.layer1(torch.transpose(h_embedding,1,2)),2,1))
h_layer_2, _ = self.layer2(h_layer_1)
avg_pool = torch.mean(h_layer_2, 1)
max_pool, _ = torch.max(h_layer_2, 1)
if self.model_factor[1] == 0:
h_conc = torch.cat((avg_pool, max_pool), 1)
elif self.model_factor[1] == 1:
h_atten_1 = self.attention1(h_layer_1)
h_conc = torch.cat((h_atten_1, avg_pool, max_pool), 1)
elif self.model_factor[1] == 2:
h_atten_2 = self.attention2(h_layer_2)
h_conc = torch.cat((h_atten_2, avg_pool, max_pool), 1)
elif self.model_factor[1] == 3:
h_atten_1 = self.attention1(h_layer_1)
h_atten_2 = self.attention2(h_layer_2)
h_conc = torch.cat((h_atten_1, h_atten_2, avg_pool, max_pool), 1)
if self.model_factor[0] == 0:
h_conc = self.dropout(h_conc)
elif self.model_factor[0] == 1:
h_conc = self.noise(h_conc)
elif self.model_factor[0] == 2:
h_conc = self.dropout(h_conc)
h_conc = self.bn(h_conc)
elif self.model_factor[0] == 3:
h_conc = self.noise(h_conc)
h_conc = self.bn(h_conc)
if self.num_feats > 0:
h_conc = torch.cat((h_conc, feat), 1)
if self.model_factor[3] == 0:
hidden = h_conc
elif self.model_factor[3] == 1:
h_conc_linear1 = F.relu(self.hidden1(h_conc))
hidden = h_conc + h_conc_linear1
elif self.model_factor[3] == 2:
h_conc_linear1 = F.relu(self.hidden1(h_conc))
h_conc_linear2 = F.relu(self.hidden2(h_conc))
hidden = h_conc + h_conc_linear1 + h_conc_linear2
elif self.model_factor[3] == 3:
h_conc_linear1 = F.relu(self.hidden1(h_conc))
h_conc_linear2 = F.relu(self.hidden2(h_conc))
h_conc_linear3 = F.relu(self.hidden3(h_conc))
hidden = h_conc + h_conc_linear1 + h_conc_linear2 + h_conc_linear3
result = self.linear_out(hidden)
aux_result = self.linear_aux_out(hidden)
out = torch.cat([result, aux_result], 1)
return out
def save_model(self, filename):
model = self
params = [
model.layer1.state_dict(),
model.layer2.state_dict(),
model.linear_out.state_dict(),
model.linear_aux_out.state_dict() ]
if model.model_factor[1] == 1:
params.append(model.attention1.state_dict())
elif model.model_factor[1] == 2:
params.append(model.attention2.state_dict())
elif model.model_factor[1] == 2:
params.append(model.attention1.state_dict())
params.append(model.attention2.state_dict())
if model.model_factor[0] >= 2:
params.append(model.bn.state_dict())
if model.model_factor[3] == 1:
params.append(model.hidden1.state_dict())
elif model.model_factor[3] == 2:
params.append(model.hidden1.state_dict())
params.append(model.hidden2.state_dict())
elif model.model_factor[3] == 3:
params.append(model.hidden1.state_dict())
params.append(model.hidden2.state_dict())
params.append(model.hidden3.state_dict())
with bz2.open(filename, 'wb') as fout:
buffer = io.BytesIO()
torch.save(params, buffer)
fout.write(buffer.getbuffer())
def load_model(self, filename, device=torch.device('cuda')):
with bz2.open(filename, 'rb') as fin:
buffer = io.BytesIO(fin.read())
params = torch.load(buffer, map_location=device)
self.layer1.load_state_dict(params.pop(0))
self.layer1.to(device)
self.layer2.load_state_dict(params.pop(0))
self.layer2.to(device)
self.linear_out.load_state_dict(params.pop(0))
self.linear_out.to(device)
self.linear_aux_out.load_state_dict(params.pop(0))
self.linear_aux_out.to(device)
if self.model_factor[1] == 1:
self.attention1.load_state_dict(params.pop(0))
self.attention1.to(device)
elif self.model_factor[1] == 2:
self.attention2.load_state_dict(params.pop(0))
self.attention2.to(device)
elif self.model_factor[1] == 2:
self.attention1.load_state_dict(params.pop(0))
self.attention1.to(device)
self.attention2.load_state_dict(params.pop(0))
self.attention2.to(device)
if self.model_factor[0] >= 2:
self.bn.load_state_dict(params.pop(0))
self.bn.to(device)
if self.model_factor[3] == 1:
self.hidden1.load_state_dict(params.pop(0))
self.hidden1.to(device)
elif self.model_factor[3] == 2:
self.hidden1.load_state_dict(params.pop(0))
self.hidden1.to(device)
self.hidden2.load_state_dict(params.pop(0))
self.hidden2.to(device)
elif self.model_factor[3] == 3:
self.hidden1.load_state_dict(params.pop(0))
self.hidden1.to(device)
self.hidden2.load_state_dict(params.pop(0))
self.hidden2.to(device)
self.hidden3.load_state_dict(params.pop(0))
self.hidden3.to(device)
def get_model(model_factor, num_units1, num_units2, embedding_matrix, max_features, num_aux_targets, max_seq_len, num_feats):
return NeuralNet(model_factor, num_units1, num_units2, embedding_matrix, max_features, num_aux_targets, max_seq_len, num_feats)
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible.cli.arguments import option_helpers as opt_help
from ansible.utils import context_objects as co
@pytest.fixture
def parser():
parser = opt_help.create_base_parser('testparser')
opt_help.add_runas_options(parser)
opt_help.add_meta_options(parser)
opt_help.add_runtask_options(parser)
opt_help.add_vault_options(parser)
opt_help.add_async_options(parser)
opt_help.add_connect_options(parser)
opt_help.add_subset_options(parser)
opt_help.add_check_options(parser)
opt_help.add_inventory_options(parser)
return parser
@pytest.fixture
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
yield
co.GlobalCLIArgs._Singleton__instance = None
|
# Generated by Django 2.1.4 on 2018-12-16 18:11
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Extractor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file_path', models.FilePathField(match='.*\\.htm(l)?', path='/code/fileBucket', unique=True)),
('pagination', models.IntegerField(default=1, verbose_name='Paginations')),
('is_parsed', models.BooleanField(default=False, verbose_name='Is Parsed')),
('created_at', models.DateTimeField(verbose_name='created at')),
('modified_at', models.DateTimeField(verbose_name='modified at')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=255, unique=True)),
('post_age', models.DurationField(default=datetime.timedelta, verbose_name='post age')),
('created_at', models.DateTimeField(verbose_name='created at')),
('modified_at', models.DateTimeField(verbose_name='modified at')),
('hacker_news_url', models.URLField(verbose_name='hacker news url')),
('upvotes', models.IntegerField(verbose_name='number of upvotes')),
('comments', models.IntegerField(verbose_name='number of comments')),
('is_read', models.BooleanField(default=False, verbose_name='is read')),
('is_deleted', models.BooleanField(default=False, verbose_name='is deleted')),
],
),
]
|
from settings import *
import pygame
import math
class Player:
def __init__(self):
self.x, self.y = player_pos
self.angle = player_angle
self.sensitivity = 0.004
@property
def pos(self):
return (self.x, self.y)
def movement(self):
self.keys_control()
self.mouse_control()
self.angle %= DOUBLE_PI
def keys_control(self):
sin_a = math.sin(self.angle)
cos_a = math.cos(self.angle)
keys = pygame.key.get_pressed()
if keys[pygame.K_ESCAPE]:
exit()
if keys[pygame.K_w]:
self.x += player_speed * cos_a
self.y += player_speed * sin_a
if keys[pygame.K_s]:
self.x += -player_speed * cos_a
self.y += -player_speed * sin_a
if keys[pygame.K_a]:
self.x += player_speed * sin_a
self.y += -player_speed * cos_a
if keys[pygame.K_d]:
self.x += -player_speed * sin_a
self.y += player_speed * cos_a
if keys[pygame.K_LEFT]:
self.angle -= 0.02
if keys[pygame.K_RIGHT]:
self.angle += 0.02
def mouse_control(self):
if pygame.mouse.get_focused():
difference = pygame.mouse.get_pos()[0] - HALF_WIDTH
pygame.mouse.set_pos((HALF_WIDTH, HALF_HEIGHT))
self.angle += difference * self.sensitivity
|
import os
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import app,db
#app.config.from_object(os.environ['APP_SETTINGS'])
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://genrichez:Kandra5moneR@localhost/Recommendations'
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
from machine import I2C, Pin,RTC,WDT,reset
from ssd1306 import SSD1306_I2C
from font import Font
import time
i2c = I2C(scl=Pin(0), sda=Pin(2))
display= SSD1306_I2C(128, 32, i2c)
f=Font(display)
f.text("sdada",0,0,24)
f.show()
|
try:
from ._version import version as __version__ # type: ignore[import]
except ImportError:
__version__ = "UNKNOWN"
from .core import *
from . import loss, enc, demo, data, image, pyramid, optim
import os
os.makedirs(home(), exist_ok=True)
|
import numpy as np
from ...sgmcmc_sampler import SGMCMCSampler, SeqSGMCMCSampler
from .parameters import SVMPrior, SVMParameters
from .helper import SVMHelper
class SVMSampler(SGMCMCSampler):
def __init__(self, n, m, observations=None, prior=None, parameters=None,
forward_message=None, name="SVMSampler", **kwargs):
self.options = kwargs
self.n = n
self.m = m
self.name = name
self.setup(
observations=observations,
prior=prior,
parameters=parameters,
forward_message=forward_message,
)
return
def setup(self, observations=None, prior=None,
parameters=None, forward_message=None):
""" Initialize the sampler
Args:
observations (ndarray): T by m ndarray of time series values
prior (SVMPrior): prior
forward_message (ndarray): prior probability for latent state
parameters (SVMParameters): initial parameters
(optional, will sample from prior by default)
"""
self.observations = observations
if prior is None:
prior = SVMPrior.generate_default_prior(n=self.n, m=self.m)
self.prior = prior
if parameters is None:
self.parameters = self.prior.sample_prior().project_parameters()
else:
if not isinstance(parameters, SVMParameters):
raise ValueError("parameters is not a SVMParameter")
self.parameters = parameters
if forward_message is None:
forward_message = {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.eye(self.n)/10,
}
self.forward_message = forward_message
self.backward_message = {
'log_constant': 0.0,
'mean_precision': np.zeros(self.n),
'precision': np.zeros((self.n, self.n)),
}
self.message_helper=SVMHelper(
n=self.n,
m=self.m,
forward_message=forward_message,
backward_message=self.backward_message,
)
return
def sample_x(self, parameters=None, observations=None, tqdm=None,
num_samples=None, **kwargs):
""" Sample X """
raise NotImplementedError()
def sample_gibbs(self, tqdm=None):
""" One Step of Blocked Gibbs Sampler
Returns:
parameters (SVMParameters): sampled parameters after one step
"""
raise NotImplementedError()
class SeqSVMSampler(SeqSGMCMCSampler, SVMSampler):
pass
|
"""
API facade that allows interaction with the library with strings and vanilla Python objects.
Copyright 2021 InferStat Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created by: Thomas Oliver
Created date: 18th March 2021
"""
# Python standard library
from typing import List, Union
from copy import deepcopy
import pandas as pd
from infertrade.algos import algorithm_functions
from infertrade.utilities.operations import ReturnsFromPositions
from infertrade.PandasEnum import PandasEnum
class Api:
"""All public methods should input/output json-serialisable dictionaries."""
@staticmethod
def get_allocation_information() -> dict:
"""Provides information on algorithms that calculate positions."""
combined_data = {}
for ii_package in algorithm_functions:
combined_data.update(algorithm_functions[ii_package][PandasEnum.ALLOCATION.value])
return combined_data
@staticmethod
def get_signal_information() -> dict:
"""Provides information on algorithms that calculate signals."""
combined_data = {}
for ii_package in algorithm_functions:
combined_data.update(algorithm_functions[ii_package][PandasEnum.SIGNAL.value])
return combined_data
@staticmethod
def get_algorithm_information() -> dict:
"""Provides information on algorithms (signals and positions) as flat list (not nested by category)."""
combined_allocation_data = Api.get_allocation_information()
combined_signal_data = Api.get_signal_information()
combined_data = {}
combined_data.update(combined_allocation_data)
combined_data.update(combined_signal_data)
return combined_data
@staticmethod
def available_packages() -> List[str]:
"""Returns the list of supported packages."""
return list(algorithm_functions.keys())
@staticmethod
def return_algorithm_category(algorithm_name: str) -> str:
"""Returns the category of algorithm as a string."""
if algorithm_name in Api.get_signal_information():
algo_type = PandasEnum.SIGNAL.value
elif algorithm_name in Api.get_allocation_information():
algo_type = PandasEnum.ALLOCATION.value
else:
raise NameError("Algorithm is not supported: ", algorithm_name)
return algo_type
@staticmethod
def algorithm_categories() -> List[str]:
"""Returns the list of supported packages."""
return [PandasEnum.ALLOCATION.value, "signal"]
@staticmethod
def available_algorithms(
filter_by_package: Union[str, List[str]] = None, filter_by_category: Union[str, List[str]] = None
) -> List[str]:
"""Returns a list of strings that are available strategies."""
if not filter_by_package:
filter_by_package = Api.available_packages()
elif isinstance(filter_by_package, str):
filter_by_package = [filter_by_package]
if not filter_by_category:
filter_by_category = Api.algorithm_categories()
elif isinstance(filter_by_category, str):
filter_by_category = [filter_by_category]
names = []
for ii_package in filter_by_package:
for jj_type in filter_by_category:
algorithms = list(algorithm_functions[ii_package][jj_type].keys())
names += algorithms
return names
@staticmethod
def determine_package_of_algorithm(name_of_algorithm: str) -> str:
"""Determines the original package of a strategy."""
category = Api.return_algorithm_category(name_of_algorithm)
package_name = "Unknown"
for ii_package in Api.available_packages():
algo_list = Api.available_algorithms(filter_by_package=ii_package, filter_by_category=category)
if name_of_algorithm in algo_list:
package_name = ii_package
return package_name
@staticmethod
def required_inputs_for_algorithm(name_of_strategy: str) -> List[str]:
"""Describes the input columns needed for the strategy."""
full_info = Api.get_algorithm_information()
required_inputs = full_info[name_of_strategy]["series"]
return required_inputs
@staticmethod
def required_parameters_for_algorithm(name_of_strategy: str) -> List[str]:
"""Describes the input columns needed for the strategy."""
full_info = Api.get_algorithm_information()
required_inputs = full_info[name_of_strategy]["parameters"]
return required_inputs
@staticmethod
def _get_raw_class(name_of_strategy_or_signal: str) -> callable:
"""Private method to return the raw class - should not be used externally."""
info = Api.get_allocation_information()
raw_class = info[name_of_strategy_or_signal]["function"]
return raw_class
@staticmethod
def calculate_allocations(
df: pd.DataFrame, name_of_strategy: str, name_of_price_series: str = "price"
) -> pd.DataFrame:
"""Calculates the allocations using the supplied strategy."""
if name_of_price_series is not "price":
df[PandasEnum.MID.value] = df[name_of_price_series]
class_of_rule = Api._get_raw_class(name_of_strategy)
df_with_positions = class_of_rule(df)
return df_with_positions
@staticmethod
def calculate_returns(df: pd.DataFrame) -> pd.DataFrame:
"""Calculates the returns from supplied positions."""
df_with_returns = ReturnsFromPositions().transform(df)
return df_with_returns
@staticmethod
def calculate_allocations_and_returns(
df: pd.DataFrame, name_of_strategy: str, name_of_price_series: str = "price"
) -> pd.DataFrame:
"""Calculates the returns using the supplied strategy."""
df_with_positions = Api.calculate_allocations(df, name_of_strategy, name_of_price_series)
df_with_returns = ReturnsFromPositions().transform(df_with_positions)
return df_with_returns
@staticmethod
def calculate_signal(
df: pd.DataFrame, name_of_signal: str
) -> pd.DataFrame:
"""Calculates the allocations using the supplied strategy."""
class_of_signal_generator = Api._get_raw_class(name_of_signal)
df_with_signal = class_of_signal_generator(df)
return df_with_signal
|
from typing import Any, Dict, Type, TypeVar
from injector import Injector, Provider, Scope, ScopeDecorator
from appunit import context
__all__ = ["RequestScope", "request"]
T = TypeVar("T")
class CachedProviderWrapper(Provider):
def __init__(self, old_provider: Provider) -> None:
self._old_provider = old_provider
self._cache: Dict[int, Any] = {}
def get(self, injector: Injector) -> Any:
key = id(injector)
try:
return self._cache[key]
except KeyError:
instance = self._cache[key] = self._old_provider.get(injector)
return instance
class RequestScope(Scope):
def get(self, key: Type[T], old_provider: Provider[T]) -> Provider[T]:
scope: dict = context.get_request_scope()
try:
return scope[key]
except KeyError:
new_provider = scope[key] = CachedProviderWrapper(old_provider)
context.set_request_scope(scope)
return new_provider
request = ScopeDecorator(RequestScope)
|
import math
import datetime
import Config
import Math
import TargetNone
class TargetAvoid(TargetNone.TargetNone):
def __init__(self):
if 'avoid' not in Config.save_json:
Config.save_json['avoid'] = []
self.avoid_coordinate = Config.save_json['avoid']
self.vehicle_width = 1.3
if 'vehicle_width' in Config.config_json:
self.vehicle_width = Config.config_json[
'vehicle_width']
print("[Target avoid] " + str(len(self.avoid_coordinate)) + " avoidance coordinates")
print("[Target avoid] vehicle width:", self.vehicle_width)
self.rot_diff = 0.0
self.go_up = False
self.timestamp = datetime.datetime.now()
self.obstacle_ahead = False
self.travel_duration_min_s = 2.0
if 'travel_time_min' in Config.config_json:
self.travel_duration_min_s = Config.config_json['travel_time_min']
self.travel_duration_max_s = 3.0
if 'travel_time_max' in Config.config_json:
self.travel_duration_max_s = Config.config_json['travel_time_max']
self.init_travel_duration_s = self.travel_duration_min_s
self.travel_duration_s = self.init_travel_duration_s
print("[Target avoid] travel_duration =", self.travel_duration_s)
self.is_give_up = False
self.previous_angle_sign = 1
def reset(self):
self.avoid_coordinate = Config.save_json['avoid']
print("[Target avoid] " + str(len(self.avoid_coordinate)) + " avoidance coordinates")
print("[Target avoid] vehicle width:", self.vehicle_width)
self.rot_diff = 0.0
self.go_up = False
self.timestamp = datetime.datetime.now()
self.obstacle_ahead = False
self.init_travel_duration_s = self.init_travel_duration_s + (
(self.travel_duration_max_s - self.travel_duration_min_s) / 10.0)
if self.init_travel_duration_s > self.travel_duration_max_s:
self.init_travel_duration_s = self.travel_duration_min_s
self.travel_duration_s = self.init_travel_duration_s
print("[Target avoid] travel_duration = ", self.travel_duration_s, "s")
self.is_give_up = False
self.previous_angle_sign = 1
def run(self, position, rotation, speed_ms, rot_diff, target_speed_ms, go_up):
current_timestamp = datetime.datetime.now()
if current_timestamp - self.timestamp > datetime.timedelta(seconds=0.0): # FIXME: hard coded value
# Not sure why I have to make this but, RoRBot and trucks have not the same orientation
rotation[1] = rotation[1] + 90
# rotation[1] = rotation[1] + rot_diff # rotation with regards results of previous "target" configuration
self.find_avoidance_parameters(speed_ms, position, rotation)
self.timestamp = current_timestamp
# print("rot_diff",rot_diff)
# print("self.rot_diff", self.rot_diff)
# print("rot_diff + self.rot_diff", rot_diff + self.rot_diff)
# print("")
if self.rot_diff != 0:
return self.rot_diff, target_speed_ms, go_up
else:
return rot_diff, target_speed_ms, go_up
def find_avoidance_parameters(self, speed_ms, position, rotation):
# speed_ms = 2.8 # FIXME hard coded value
travel_distance_m = speed_ms * self.travel_duration_s
current_rotation = rotation[1]
if self.is_no_obstacle_ahead(position, current_rotation, travel_distance_m) is True:
if self.obstacle_ahead is True:
print("[TargetAvoid] No more obstacle ahead within", travel_distance_m, "m,", self.travel_duration_s,
"s")
self.obstacle_ahead = False
self.rot_diff = 0
self.is_give_up = False
return
current_detection_width_m = self.vehicle_width
while current_detection_width_m >= self.vehicle_width / 2.0:
current_travel_duration_s = self.travel_duration_s
while current_travel_duration_s >= self.travel_duration_min_s - 0.1:
for try_rot_diff in range(5, 175, 5):
# print("trying rotation", current_rotation + try_rot_diff)
if self.is_no_obstacle_ahead(position, current_rotation + try_rot_diff, travel_distance_m) is True:
# print("rotation", -try_rot_diff, "OK")
self.rot_diff = -try_rot_diff
self.is_give_up = False
self.previous_angle_sign = -1
return
# print("trying rotation", current_rotation - rot_diff)
if self.is_no_obstacle_ahead(position, current_rotation - try_rot_diff, travel_distance_m) is True:
# print("rotation", try_rot_diff, "OK")
self.rot_diff = try_rot_diff
self.is_give_up = False
self.previous_angle_sign = 1
return
print("[TargetAvoid] dead end within", travel_distance_m, "m,", self.travel_duration_s, "s")
current_travel_duration_s = current_travel_duration_s / 2.0
travel_distance_m = speed_ms * current_travel_duration_s
print("[TargetAvoid] Lower travel distance to", travel_distance_m, "m,", current_travel_duration_s, "s")
print("[TargetAvoid] dead end for detection width = ", current_detection_width_m, "m")
current_detection_width_m = current_detection_width_m / 2.0
print("[TargetAvoid] Lower detection width to ", current_detection_width_m, "m")
# self.rot_diff = self.previous_angle_sign * 180
self.rot_diff = 0
if self.is_give_up is False:
print("[TargetAvoid] give-up, trying", self.rot_diff)
self.is_give_up = True
def is_no_obstacle_ahead(self, position, rotation, distance_m):
# print("Checking rotation", rotation)
# Build rectangle and check it avoids everything
rect = [
[
0,
- self.vehicle_width / 2.0
],
[
0,
self.vehicle_width / 2.0
],
[
-distance_m,
self.vehicle_width / 2.0
],
[
-distance_m,
- self.vehicle_width / 2.0
]
]
# print("rect", rect)
angle_rad = math.radians(rotation)
s = math.sin(angle_rad)
c = math.cos(angle_rad)
rotated_rect = [
[
c * rect[0][0] - s * rect[0][1],
s * rect[0][0] + c * rect[0][1],
],
[
c * rect[1][0] - s * rect[1][1],
s * rect[1][0] + c * rect[1][1],
],
[
c * rect[2][0] - s * rect[2][1],
s * rect[2][0] + c * rect[2][1],
],
[
c * rect[3][0] - s * rect[3][1],
s * rect[3][0] + c * rect[3][1],
]
]
# print("rotated_rect", rotated_rect)
final_rect = [
[
rotated_rect[0][0] + position[0],
rotated_rect[0][1] + position[2]
],
[
rotated_rect[1][0] + position[0],
rotated_rect[1][1] + position[2]
],
[
rotated_rect[2][0] + position[0],
rotated_rect[2][1] + position[2]
],
[
rotated_rect[3][0] + position[0],
rotated_rect[3][1] + position[2]
]
]
# print("final rect", final_rect)
for coord in self.avoid_coordinate:
test_point = [coord[0], coord[2]]
if Math.point_in_rectangle(test_point, final_rect) is True:
if self.obstacle_ahead is False:
print("[TargetAvoid] Obstacle within", distance_m, "m")
self.obstacle_ahead = True
return False
# print("distance=", distance_m, "angle=", angle, "free")
return True
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import sys
import datetime
message = "Hello, %s! Current time: %s." % (sys.argv[1], datetime.datetime.now())
# Print the message to stdout.
print(message)
# Append the message to the log file.
with open("/tmp/Greengrass_HelloWorld.log", "a") as f:
print(message, file=f)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Advent of Code, Day 12
======================
Author: hbldh <henrik.blidh@nedomkull.com>
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import re
with open('input_12.txt', 'r') as f:
instructions = f.read().strip().splitlines()
def solve(registers):
n = 0
while n < len(instructions):
parts = instructions[n].split(' ')
if parts[0] == 'cpy':
registers[parts[2]] = int(parts[1]) if parts[1].isdigit() else registers[parts[1]]
elif parts[0] == 'inc':
registers[parts[1]] += 1
elif parts[0] == 'dec':
registers[parts[1]] -= 1
elif parts[0] == 'jnz':
parts[1] = int(parts[1]) if parts[1].isdigit() else registers[parts[1]]
if parts[1] != 0:
n += int(parts[2])
continue
else:
raise ValueError(str(parts))
n += 1
return registers
print("[Part 1]: Register 'a': {a}".format(**solve({'a': 0, 'b': 0, 'c': 0, 'd': 0})))
print("[Part 2]: Register 'a': {a}".format(**solve({'a': 0, 'b': 0, 'c': 1, 'd': 0})))
|
"""Module that manages the testing states of the access ports"""
import threading
from forch.utils import get_logger
from forch.proto.shared_constants_pb2 import PortBehavior
from forch.proto.devices_state_pb2 import DeviceBehavior, DevicePlacement
from forch.proto.shared_constants_pb2 import DVAState
INVALID_VLAN = 0
STATE_HANDLERS = {}
def _register_state_handler(state_name):
def register(func):
STATE_HANDLERS[state_name] = func
return func
return register
class PortStateMachine:
"""State machine class that manages testing states of an access port"""
UNAUTHENTICATED = 'unauthenticated'
AUTHENTICATED = 'authenticated'
SEQUESTERED = 'sequestered'
OPERATIONAL = 'operational'
INFRACTED = 'infracted'
TRANSITIONS = {
UNAUTHENTICATED: {
PortBehavior.cleared: OPERATIONAL,
PortBehavior.sequestered: SEQUESTERED,
},
SEQUESTERED: {
PortBehavior.passed: OPERATIONAL,
PortBehavior.failed: INFRACTED,
PortBehavior.deauthenticated: UNAUTHENTICATED,
},
OPERATIONAL: {
PortBehavior.cleared: OPERATIONAL,
PortBehavior.deauthenticated: UNAUTHENTICATED,
},
}
# pylint: disable=too-many-arguments
def __init__(self, mac, initial_state, unauthenticated_state_callback, sequester_state_callback,
operational_state_callback, infracted_state_callback):
self._mac = mac
self._current_state = initial_state
self._unauthenticated_state_callback = unauthenticated_state_callback
self._sequester_state_callback = sequester_state_callback
self._operational_state_callback = operational_state_callback
self._infracted_state_callback = infracted_state_callback
self._logger = get_logger('portsm')
self._handle_current_state()
def handle_port_behavior(self, port_behavior):
"""Handle port behavior"""
next_state = self.TRANSITIONS.get(self._current_state, {}).get(port_behavior, {})
if not next_state:
self._logger.warning(
'Cannot find next state for device %s in state %s for port behavior %s',
self._mac, self._current_state, port_behavior)
return
self._logger.info(
'Device %s is entering %s state from %s state',
self._mac, next_state, self._current_state)
self._current_state = next_state
self._handle_current_state()
def get_current_state(self):
"""Get current state of the port"""
return self._current_state
def _handle_current_state(self):
if self._current_state in STATE_HANDLERS:
STATE_HANDLERS[self._current_state](self)
@_register_state_handler(state_name=UNAUTHENTICATED)
def _handle_unauthenticated_state(self):
self._logger.info('Handling unauthenticated state for device %s', self._mac)
self._unauthenticated_state_callback(self._mac)
@_register_state_handler(state_name=SEQUESTERED)
def _handle_sequestered_state(self):
self._logger.info('Handling sequestered state for device %s', self._mac)
self._sequester_state_callback(self._mac)
@_register_state_handler(state_name=OPERATIONAL)
def _handle_operational_state(self):
self._logger.info('Handling operational state for device %s', self._mac)
self._operational_state_callback(self._mac)
@_register_state_handler(state_name=INFRACTED)
def _handle_infracted_state(self):
self._logger.info('Handling infracted state for device %s', self._mac)
self._infracted_state_callback(self._mac)
class PortStateManager:
"""Manages the states of the access ports for orchestrated testing"""
# pylint: disable=too-many-arguments
def __init__(self, device_state_manager=None, varz_updater=None, testing_segment=None):
self._state_machines = {}
self._static_port_behaviors = {}
self._static_device_behaviors = {}
self._dynamic_device_behaviors = {}
self._device_state_manager = device_state_manager
self._varz_updater = varz_updater
self._placement_to_mac = {}
self._testing_segment = testing_segment
self._lock = threading.RLock()
self._logger = get_logger('portmgr')
def handle_static_device_behavior(self, mac, device_behavior):
"""Add static testing state for a device"""
with self._lock:
static_port_behavior = device_behavior.port_behavior
if static_port_behavior:
self._static_port_behaviors[mac] = static_port_behavior
if device_behavior.segment:
self.handle_device_behavior(mac, device_behavior, static=True)
def handle_device_behavior(self, mac, device_behavior, static=False):
"""Handle authentication result"""
if device_behavior.segment:
self._handle_authenticated_device(mac, device_behavior, static)
if static:
self._update_static_vlan_varz(
mac, vlan=self._get_vlan_from_segment(device_behavior.segment))
else:
self._handle_deauthenticated_device(mac, static)
def handle_device_placement(self, mac, device_placement, static=False):
"""Handle a learning or expired VLAN for a device"""
if device_placement.connected:
return self._handle_learned_device(mac, device_placement, static)
return self._handle_disconnected_device(device_placement)
def _handle_learned_device(self, mac, device_placement, static=False):
# if device is learned
old_mac = self._placement_to_mac.get((device_placement.switch, device_placement.port))
stale_mac = old_mac if old_mac and old_mac != mac else None
if stale_mac:
switch = device_placement.switch
port = device_placement.port
self._logger.warning(
'Cleaning stale device placement: %s, %s, %s', old_mac, switch, port)
stale_placement = DevicePlacement(switch=switch, port=port, connected=False)
self._handle_disconnected_device(stale_placement)
self._placement_to_mac[(device_placement.switch, device_placement.port)] = mac
self._process_device_placement(mac, device_placement, static=static)
if mac not in self._state_machines:
self._state_machines[mac] = PortStateMachine(
mac, PortStateMachine.UNAUTHENTICATED, self._handle_unauthenticated_state,
self._set_port_sequestered, self._set_port_operational,
self._handle_infracted_state)
device_behavior = (self._static_device_behaviors.get(mac) or
self._dynamic_device_behaviors.get(mac))
if device_behavior:
static = mac in self._static_device_behaviors
self.handle_device_behavior(mac, device_behavior, static=static)
return True, None, stale_mac
def _handle_disconnected_device(self, device_placement):
eth_src = self._placement_to_mac.pop((device_placement.switch, device_placement.port), None)
# Dont propagate removal of placement if not in cache
if not eth_src:
return False, None, None
self._process_device_placement(eth_src, device_placement, static=False)
if eth_src in self._state_machines:
self._state_machines.pop(eth_src)
self._update_device_state_varz(eth_src, DVAState.initial)
return True, eth_src, None
def _handle_authenticated_device(self, mac, device_behavior, static):
"""Initialize or update the state machine for an authenticated device"""
if not self._process_device_behavior:
return
with self._lock:
device_behaviors = (
self._static_device_behaviors if static else self._dynamic_device_behaviors)
device_behaviors.setdefault(mac, DeviceBehavior()).CopyFrom(device_behavior)
static_port_behavior = self._static_port_behaviors.get(mac)
if not self._testing_segment or static_port_behavior == PortBehavior.cleared:
port_behavior = PortBehavior.cleared
else:
port_behavior = PortBehavior.sequestered
if mac in self._state_machines:
self._state_machines[mac].handle_port_behavior(port_behavior)
def _handle_deauthenticated_device(self, mac, static):
"""Handle an deauthenticated device"""
if not self._process_device_behavior:
return
with self._lock:
device_behaviors = (
self._static_device_behaviors if static else self._dynamic_device_behaviors)
if mac in device_behaviors:
device_behaviors.pop(mac)
else:
self._logger.warning(
'%s behavior does not exist for %s', 'static' if static else 'dynamic', mac)
# ignore dynamic behavior for device that has static behavior defined
if not static and mac in self._static_device_behaviors:
return
if mac in self._state_machines:
port_behavior = PortBehavior.deauthenticated
self._state_machines[mac].handle_port_behavior(port_behavior)
self._process_device_behavior(mac, DeviceBehavior(), static=static)
def handle_testing_result(self, testing_result):
"""Update the state machine for a device according to the testing result"""
for mac, device_behavior in testing_result.device_mac_behaviors.items():
self._handle_port_behavior(mac, device_behavior.port_behavior)
def _handle_port_behavior(self, mac, port_behavior):
with self._lock:
state_machine = self._state_machines.get(mac)
if not state_machine:
self._logger.error(
'No state machine defined for device %s before receiving testing result', mac)
return
state_machine.handle_port_behavior(port_behavior)
def _handle_unauthenticated_state(self, mac):
self._update_device_state_varz(mac, DVAState.unauthenticated)
def _set_port_sequestered(self, mac):
"""Set port to sequester vlan"""
if not self._process_device_behavior:
return
device_behavior = DeviceBehavior(segment=self._testing_segment)
self._process_device_behavior(mac, device_behavior, static=False)
self._update_device_state_varz(mac, DVAState.sequestered)
def _set_port_operational(self, mac):
"""Set port to operation vlan"""
if not self._process_device_behavior:
return
static = mac in self._static_device_behaviors
device_behavior = (
self._static_device_behaviors.get(mac) or self._dynamic_device_behaviors.get(mac))
assert device_behavior
self._process_device_behavior(mac, device_behavior, static=static)
self._update_device_state_varz(mac, DVAState.static if static else DVAState.operational)
def _handle_infracted_state(self, mac):
static = mac in self._static_device_behaviors
self._process_device_behavior(mac, DeviceBehavior(), static=static)
self._update_device_state_varz(mac, DVAState.infracted)
def clear_static_device_behaviors(self):
"""Remove all static device behaviors"""
with self._lock:
macs = list(self._static_device_behaviors.keys())
for mac in macs:
self._update_static_vlan_varz(mac, INVALID_VLAN)
self._handle_deauthenticated_device(mac, static=True)
def _process_device_placement(self, mac, device_placement, static=False):
if self._device_state_manager:
self._device_state_manager.process_device_placement(mac, device_placement, static)
def _process_device_behavior(self, mac, device_behavior, static=False):
if self._device_state_manager:
self._device_state_manager.process_device_behavior(mac, device_behavior, static)
def _get_vlan_from_segment(self, segment):
if self._device_state_manager:
return self._device_state_manager.get_vlan_from_segment(segment)
return None
def _update_device_state_varz(self, mac, device_state):
if self._varz_updater:
self._varz_updater.update_device_state_varz(mac, device_state)
def _update_static_vlan_varz(self, mac, vlan):
if self._varz_updater:
self._varz_updater.update_static_vlan_varz(mac, vlan)
|
#!/usr/bin/env python
#
# Copyright (c), 2021, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
from timeit import timeit
from memory_profiler import profile
from elementpath import XPath1Parser, XPath2Parser
from elementpath.xpath30 import XPath30Parser
def run_timeit(stmt='pass', setup='pass', number=1000):
seconds = timeit(stmt, setup=setup, number=number)
print("{}: {}s".format(stmt, seconds))
@profile
def xpath1_parser_objects():
return [XPath1Parser() for _ in range(10000)]
@profile
def xpath2_parser_objects():
return [XPath2Parser() for _ in range(10000)]
@profile
def xpath30_parser_objects():
return [XPath30Parser() for _ in range(10000)]
if __name__ == '__main__':
print('*' * 62)
print("*** Memory and timing profile of XPathParser1/2/3 classes ***")
print("***" + ' ' * 56 + "***")
print('*' * 62)
print()
xpath1_parser_objects()
xpath2_parser_objects()
xpath30_parser_objects()
NUMBER = 10000
SETUP = 'from __main__ import XPath1Parser'
run_timeit("XPath1Parser().parse('18 - 9 + 10')", SETUP, NUMBER)
run_timeit("XPath1Parser().parse('true()')", SETUP, NUMBER)
run_timeit("XPath1Parser().parse('contains(\"foobar\", \"bar\")')", SETUP, NUMBER)
run_timeit("XPath1Parser().parse('/A/B/C/D')", SETUP, NUMBER)
print()
SETUP = 'from __main__ import XPath2Parser'
run_timeit("XPath2Parser().parse('18 - 9 + 10')", SETUP, NUMBER)
run_timeit("XPath2Parser().parse('true()')", SETUP, NUMBER)
run_timeit("XPath2Parser().parse('contains(\"foobar\", \"bar\")')", SETUP, NUMBER)
run_timeit("XPath2Parser().parse('/A/B/C/D')", SETUP, NUMBER)
print()
SETUP = 'from __main__ import XPath30Parser'
run_timeit("XPath30Parser().parse('18 - 9 + 10')", SETUP, NUMBER)
run_timeit("XPath30Parser().parse('true()')", SETUP, NUMBER)
run_timeit("XPath30Parser().parse('contains(\"foobar\", \"bar\")')", SETUP, NUMBER)
run_timeit("XPath30Parser().parse('/A/B/C/D')", SETUP, NUMBER)
print()
|
import yaml
CONFIG_PATH = "./config.yml"
config = {}
with open(CONFIG_PATH, 'r') as f:
config = yaml.load(f)
|
from __future__ import print_function, division, absolute_import
import time
import multiprocessing
import pickle
from collections import defaultdict
import warnings
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import numpy as np
import six.moves as sm
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import imgaug as ia
import imgaug.multicore as multicore
from imgaug import augmenters as iaa
from imgaug.testutils import reseed
def main():
time_start = time.time()
test_BatchLoader()
# test_BackgroundAugmenter.get_batch()
test_BackgroundAugmenter__augment_images_worker()
# test_BackgroundAugmenter.terminate()
time_end = time.time()
print("<%s> Finished without errors in %.4fs." % (__file__, time_end - time_start,))
class TestPool(unittest.TestCase):
def setUp(self):
reseed()
def test_property_pool(self):
mock_Pool = mock.MagicMock()
mock_Pool.return_value = mock_Pool
mock_Pool.close.return_value = None
mock_Pool.join.return_value = None
with mock.patch("multiprocessing.Pool", mock_Pool):
augseq = iaa.Noop()
with multicore.Pool(augseq, processes=1, maxtasksperchild=4, seed=123) as pool:
assert pool.processes == 1
assert pool._pool is None
assert mock_Pool.call_count == 1
assert mock_Pool.close.call_count == 1
assert mock_Pool.join.call_count == 1
assert mock_Pool.call_args[0][0] == 1 # processes
assert mock_Pool.call_args[1]["initargs"] == (augseq, 123)
assert mock_Pool.call_args[1]["maxtasksperchild"] == 4
def test_processes(self):
augseq = iaa.Noop()
mock_Pool = mock.MagicMock()
mock_cpu_count = mock.Mock()
with mock.patch("multiprocessing.Pool", mock_Pool), mock.patch("multiprocessing.cpu_count", mock_cpu_count):
combos = [
(1, 1, 1),
(2, 1, 1),
(3, 1, 1),
(1, 2, 2),
(3, 2, 2),
(1, None, None),
(2, None, None),
(3, None, None),
(1, -1, 1),
(2, -1, 1),
(3, -1, 2),
(4, -2, 2)
]
for ret_val, inputs, expected in combos:
mock_cpu_count.return_value = ret_val
with multicore.Pool(augseq, processes=inputs) as _pool:
pass
if expected is None:
assert mock_Pool.call_args[0][0] is None
else:
assert mock_Pool.call_args[0][0] == expected
def _test_map_batches_both(self, call_async):
augseq = iaa.Noop()
mock_Pool = mock.MagicMock()
mock_Pool.return_value = mock_Pool
mock_Pool.map.return_value = "X"
mock_Pool.map_async.return_value = "X"
with mock.patch("multiprocessing.Pool", mock_Pool):
batches = [ia.Batch(images=[ia.quokka()]), ia.Batch(images=[ia.quokka()+1])]
with multicore.Pool(augseq, processes=1) as pool:
if call_async:
_ = pool.map_batches_async(batches)
else:
_ = pool.map_batches(batches)
if call_async:
to_check = mock_Pool.map_async
else:
to_check = mock_Pool.map
assert to_check.call_count == 1
# args, arg 0
assert to_check.call_args[0][0] == multicore._Pool_starworker
# args, arg 1 (batches with ids), tuple 0, entry 0 in tuple (=> batch id)
assert to_check.call_args[0][1][0][0] == 0
# args, arg 1 (batches with ids), tuple 0, entry 1 in tuple (=> batch)
assert np.array_equal(to_check.call_args[0][1][0][1].images_unaug, batches[0].images_unaug)
# args, arg 1 (batches with ids), tuple 1, entry 0 in tuple (=> batch id)
assert to_check.call_args[0][1][1][0] == 1
# args, arg 1 (batches with ids), tuple 1, entry 1 in tuple (=> batch)
assert np.array_equal(to_check.call_args[0][1][1][1].images_unaug, batches[1].images_unaug)
def test_map_batches(self):
self._test_map_batches_both(call_async=False)
def test_map_batches_async(self):
self._test_map_batches_both(call_async=True)
def _test_imap_batches_both(self, call_unordered):
batches = [ia.Batch(images=[ia.quokka()]), ia.Batch(images=[ia.quokka()+1])]
def _generate_batches():
for batch in batches:
yield batch
augseq = iaa.Noop()
mock_Pool = mock.MagicMock()
mock_Pool.return_value = mock_Pool
mock_Pool.imap.return_value = batches
mock_Pool.imap_unordered.return_value = batches
with mock.patch("multiprocessing.Pool", mock_Pool):
with multicore.Pool(augseq, processes=1) as pool:
gen = _generate_batches()
if call_unordered:
_ = list(pool.imap_batches_unordered(gen))
else:
_ = list(pool.imap_batches(gen))
if call_unordered:
to_check = mock_Pool.imap_unordered
else:
to_check = mock_Pool.imap
assert to_check.call_count == 1
assert to_check.call_args[0][0] == multicore._Pool_starworker
arg_batches = list(to_check.call_args[0][1]) # convert generator to list, make it subscriptable
# args, arg 1 (batches with ids), tuple 0, entry 0 in tuple (=> batch id)
assert arg_batches[0][0] == 0
# tuple 0, entry 1 in tuple (=> batch)
assert np.array_equal(arg_batches[0][1].images_unaug, batches[0].images_unaug)
# tuple 1, entry 0 in tuple (=> batch id)
assert arg_batches[1][0] == 1
# tuple 1, entry 1 in tuple (=> batch)
assert np.array_equal(arg_batches[1][1].images_unaug, batches[1].images_unaug)
def test_imap_batches(self):
self._test_imap_batches_both(call_unordered=False)
def test_imap_batches_unordered(self):
self._test_imap_batches_both(call_unordered=True)
def _assert_each_augmentation_not_more_than_once(self, batches_aug):
sum_to_vecs = defaultdict(list)
for batch in batches_aug:
assert not np.array_equal(batch.images_aug[0], batch.images_aug[1])
vec = batch.images_aug.flatten()
vecsum = int(np.sum(vec))
if vecsum in sum_to_vecs:
for other_vec in sum_to_vecs[vecsum]:
assert not np.array_equal(vec, other_vec)
else:
sum_to_vecs[vecsum].append(vec)
def test_augmentations_with_seed_match(self):
augseq = iaa.AddElementwise((0, 255))
image = np.zeros((10, 10, 1), dtype=np.uint8)
batch = ia.Batch(images=np.uint8([image, image]))
batches = [batch.deepcopy() for _ in sm.xrange(60)]
# seed=1
with multicore.Pool(augseq, processes=2, maxtasksperchild=30, seed=1) as pool:
batches_aug1 = pool.map_batches(batches, chunksize=2)
# seed=1
with multicore.Pool(augseq, processes=2, seed=1) as pool:
batches_aug2 = pool.map_batches(batches, chunksize=1)
# seed=2
with multicore.Pool(augseq, processes=2, seed=2) as pool:
batches_aug3 = pool.map_batches(batches, chunksize=1)
assert len(batches_aug1) == 60
assert len(batches_aug2) == 60
assert len(batches_aug3) == 60
for b1, b2, b3 in zip(batches_aug1, batches_aug2, batches_aug3):
# images were augmented
assert not np.array_equal(b1.images_unaug, b1.images_aug)
assert not np.array_equal(b2.images_unaug, b2.images_aug)
assert not np.array_equal(b3.images_unaug, b3.images_aug)
# original images still the same
assert np.array_equal(b1.images_unaug, batch.images_unaug)
assert np.array_equal(b2.images_unaug, batch.images_unaug)
assert np.array_equal(b3.images_unaug, batch.images_unaug)
# augmentations for same seed are the same
assert np.array_equal(b1.images_aug, b2.images_aug)
# augmentations for different seeds are different
assert not np.array_equal(b1.images_aug, b3.images_aug)
# make sure that batches for the two pools with same seed did not repeat within results (only between the
# results of the two pools)
for batches_aug in [batches_aug1, batches_aug2, batches_aug3]:
self._assert_each_augmentation_not_more_than_once(batches_aug)
def test_augmentations_with_seed_match_for_images_and_keypoints(self):
augseq = iaa.AddElementwise((0, 255))
image = np.zeros((10, 10, 1), dtype=np.uint8)
# keypoints here will not be changed by augseq, but they will induce deterministic mode to start in
# augment_batches() as each batch contains images AND keypoints
kps = ia.KeypointsOnImage([ia.Keypoint(x=2, y=0)], shape=(10, 10, 1))
batch = ia.Batch(images=np.uint8([image, image]), keypoints=[kps, kps])
batches = [batch.deepcopy() for _ in sm.xrange(60)]
# seed=1
with multicore.Pool(augseq, processes=2, maxtasksperchild=30, seed=1) as pool:
batches_aug1 = pool.map_batches(batches, chunksize=2)
# seed=1
with multicore.Pool(augseq, processes=2, seed=1) as pool:
batches_aug2 = pool.map_batches(batches, chunksize=1)
# seed=2
with multicore.Pool(augseq, processes=2, seed=2) as pool:
batches_aug3 = pool.map_batches(batches, chunksize=1)
assert len(batches_aug1) == 60
assert len(batches_aug2) == 60
assert len(batches_aug3) == 60
for batches_aug in [batches_aug1, batches_aug2, batches_aug3]:
for batch in batches_aug:
for keypoints_aug in batch.keypoints_aug:
assert keypoints_aug.keypoints[0].x == 2
assert keypoints_aug.keypoints[0].y == 0
for b1, b2, b3 in zip(batches_aug1, batches_aug2, batches_aug3):
# images were augmented
assert not np.array_equal(b1.images_unaug, b1.images_aug)
assert not np.array_equal(b2.images_unaug, b2.images_aug)
assert not np.array_equal(b3.images_unaug, b3.images_aug)
# original images still the same
assert np.array_equal(b1.images_unaug, batch.images_unaug)
assert np.array_equal(b2.images_unaug, batch.images_unaug)
assert np.array_equal(b3.images_unaug, batch.images_unaug)
# augmentations for same seed are the same
assert np.array_equal(b1.images_aug, b2.images_aug)
# augmentations for different seeds are different
assert not np.array_equal(b1.images_aug, b3.images_aug)
# make sure that batches for the two pools with same seed did not repeat within results (only between the
# results of the two pools)
for batches_aug in [batches_aug1, batches_aug2, batches_aug3]:
self._assert_each_augmentation_not_more_than_once(batches_aug)
def test_augmentations_without_seed_differ(self):
augseq = iaa.AddElementwise((0, 255))
image = np.zeros((10, 10, 1), dtype=np.uint8)
batch = ia.Batch(images=np.uint8([image, image]))
batches = [batch.deepcopy() for _ in sm.xrange(20)]
with multicore.Pool(augseq, processes=2, maxtasksperchild=5) as pool:
batches_aug = pool.map_batches(batches, chunksize=2)
with multicore.Pool(augseq, processes=2) as pool:
batches_aug.extend(pool.map_batches(batches, chunksize=1))
assert len(batches_aug) == 2*20
self._assert_each_augmentation_not_more_than_once(batches_aug)
def test_augmentations_without_seed_differ_for_images_and_keypoints(self):
augseq = iaa.AddElementwise((0, 255))
image = np.zeros((10, 10, 1), dtype=np.uint8)
# keypoints here will not be changed by augseq, but they will induce deterministic mode to start in
# augment_batches() as each batch contains images AND keypoints
kps = ia.KeypointsOnImage([ia.Keypoint(x=2, y=0)], shape=(10, 10, 1))
batch = ia.Batch(images=np.uint8([image, image]), keypoints=[kps, kps])
batches = [batch.deepcopy() for _ in sm.xrange(20)]
with multicore.Pool(augseq, processes=2, maxtasksperchild=5) as pool:
batches_aug = pool.map_batches(batches, chunksize=2)
with multicore.Pool(augseq, processes=2) as pool:
batches_aug.extend(pool.map_batches(batches, chunksize=1))
assert len(batches_aug) == 2*20
for batch in batches_aug:
for keypoints_aug in batch.keypoints_aug:
assert keypoints_aug.keypoints[0].x == 2
assert keypoints_aug.keypoints[0].y == 0
self._assert_each_augmentation_not_more_than_once(batches_aug)
def test_inputs_not_lost(self):
"""Test to make sure that inputs (e.g. images) are never lost."""
def _assert_contains_all_ids(batches_aug):
# batch.images_unaug
ids = set()
for batch_aug in batches_aug:
ids.add(int(batch_aug.images_unaug.flat[0]))
ids.add(int(batch_aug.images_unaug.flat[1]))
for idx in sm.xrange(2*100):
assert idx in ids
assert len(ids) == 200
# batch.images_aug
ids = set()
for batch_aug in batches_aug:
ids.add(int(batch_aug.images_aug.flat[0]))
ids.add(int(batch_aug.images_aug.flat[1]))
for idx in sm.xrange(2*100):
assert idx in ids
assert len(ids) == 200
augseq = iaa.Noop()
image = np.zeros((1, 1, 1), dtype=np.uint8)
# creates batches containing images with ids from 0 to 199 (one pair of consecutive ids per batch)
batches = [ia.Batch(images=np.uint8([image + b_idx*2, image + b_idx*2+1]))
for b_idx in sm.xrange(100)]
with multicore.Pool(augseq, processes=2, maxtasksperchild=25) as pool:
batches_aug = pool.map_batches(batches)
_assert_contains_all_ids(batches_aug)
with multicore.Pool(augseq, processes=2, maxtasksperchild=25, seed=1) as pool:
batches_aug = pool.map_batches(batches)
_assert_contains_all_ids(batches_aug)
with multicore.Pool(augseq, processes=3, seed=2) as pool:
batches_aug = pool.map_batches(batches)
_assert_contains_all_ids(batches_aug)
with multicore.Pool(augseq, processes=2, seed=None) as pool:
batches_aug = pool.map_batches(batches)
_assert_contains_all_ids(batches_aug)
batches_aug = pool.map_batches(batches)
_assert_contains_all_ids(batches_aug)
def test_close(self):
augseq = iaa.Noop()
with multicore.Pool(augseq, processes=2) as pool:
pool.close()
def test_terminate(self):
augseq = iaa.Noop()
with multicore.Pool(augseq, processes=2) as pool:
pool.terminate()
def test_join(self):
augseq = iaa.Noop()
with multicore.Pool(augseq, processes=2) as pool:
pool.close()
pool.join()
def test_BatchLoader():
reseed()
def _load_func():
for _ in sm.xrange(20):
yield ia.Batch(images=np.zeros((2, 4, 4, 3), dtype=np.uint8))
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as caught_warnings:
for nb_workers in [1, 2]:
# repeat these tests many times to catch rarer race conditions
for _ in sm.xrange(5):
loader = multicore.BatchLoader(_load_func, queue_size=2, nb_workers=nb_workers, threaded=True)
loaded = []
counter = 0
while (not loader.all_finished() or not loader.queue.empty()) and counter < 1000:
try:
batch = loader.queue.get(timeout=0.001)
loaded.append(batch)
except:
pass
counter += 1
assert len(loaded) == 20*nb_workers, \
"Expected %d to be loaded by threads, got %d for %d workers at counter %d." % (
20*nb_workers, len(loaded), nb_workers, counter
)
loader = multicore.BatchLoader(_load_func, queue_size=200, nb_workers=nb_workers, threaded=True)
loader.terminate()
assert loader.all_finished()
loader = multicore.BatchLoader(_load_func, queue_size=2, nb_workers=nb_workers, threaded=False)
loaded = []
counter = 0
while (not loader.all_finished() or not loader.queue.empty()) and counter < 1000:
try:
batch = loader.queue.get(timeout=0.001)
loaded.append(batch)
except:
pass
counter += 1
assert len(loaded) == 20*nb_workers, \
"Expected %d to be loaded by background processes, got %d for %d workers at counter %d." % (
20*nb_workers, len(loaded), nb_workers, counter
)
loader = multicore.BatchLoader(_load_func, queue_size=200, nb_workers=nb_workers, threaded=False)
loader.terminate()
assert loader.all_finished()
assert len(caught_warnings) > 0
for warning in caught_warnings:
assert "is deprecated" in str(warning.message)
def test_BackgroundAugmenter__augment_images_worker():
reseed()
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as caught_warnings:
def gen():
yield ia.Batch(images=np.zeros((1, 4, 4, 3), dtype=np.uint8))
bl = multicore.BatchLoader(gen(), queue_size=2)
bgaug = multicore.BackgroundAugmenter(bl, iaa.Noop(), queue_size=1, nb_workers=1)
queue_source = multiprocessing.Queue(2)
queue_target = multiprocessing.Queue(2)
queue_source.put(
pickle.dumps(
ia.Batch(images=np.zeros((1, 4, 8, 3), dtype=np.uint8)),
protocol=-1
)
)
queue_source.put(pickle.dumps(None, protocol=-1))
bgaug._augment_images_worker(iaa.Add(1), queue_source, queue_target, 1)
batch_aug = pickle.loads(queue_target.get())
assert isinstance(batch_aug, ia.Batch)
assert batch_aug.images_unaug is not None
assert batch_aug.images_unaug.dtype == np.uint8
assert batch_aug.images_unaug.shape == (1, 4, 8, 3)
assert np.array_equal(batch_aug.images_unaug, np.zeros((1, 4, 8, 3), dtype=np.uint8))
assert batch_aug.images_aug is not None
assert batch_aug.images_aug.dtype == np.uint8
assert batch_aug.images_aug.shape == (1, 4, 8, 3)
assert np.array_equal(batch_aug.images_aug, np.zeros((1, 4, 8, 3), dtype=np.uint8) + 1)
finished_signal = pickle.loads(queue_target.get())
assert finished_signal is None
source_finished_signal = pickle.loads(queue_source.get())
assert source_finished_signal is None
assert queue_source.empty()
assert queue_target.empty()
queue_source.close()
queue_target.close()
queue_source.join_thread()
queue_target.join_thread()
bl.terminate()
bgaug.terminate()
assert len(caught_warnings) > 0
for warning in caught_warnings:
assert "is deprecated" in str(warning.message)
if __name__ == "__main__":
main()
|
from lib.directives import DIRECTIVES
class Node:
def __init__(self, key=None):
self.key = key if key else ""
self.keysplit = []
self.raw = None
self.inner = None
self.directive = None
self.incontext = None
self.func = None
self.escape = None
def _finish(self):
self.keysplit = self.key.split(".")
class RootNode(Node):
def __init__(self):
super().__init__()
self.inner = []
class TextNode(Node):
def __init__(self, text):
super().__init__()
self.text = text
class TagNode(Node):
def __init__(self, raw, inner):
super().__init__()
self.raw = raw
self.inner = inner.strip()
self.directive = 0
self.incontext = False
self.func = None
self.format = ""
self.escape = False
# ignore empties (by marking as comment)
if not len(self.inner):
self.directive = DIRECTIVES.COMMENT
else:
self.key = self.inner
# leading directive
if self.inner[0] in DIRECTIVES.TO_VALUE:
self.directive = DIRECTIVES.TO_VALUE[self.inner[0]]
if not self.directive:
self.directive = 0
elif self.directive == DIRECTIVES.LIST and self.inner[1] in DIRECTIVES.TO_VALUE and \
DIRECTIVES.TO_VALUE[self.inner[1]] == DIRECTIVES.SECTION_INC:
# special case of list and section
self.directive = DIRECTIVES.LIST_SECTION
self.key = self.key[2:]
elif self.directive in (
DIRECTIVES.IN_CONTEXT, # handled separately as can be doubled-up
DIRECTIVES.PASS_CONTEXT, # here and below are not leading directives
DIRECTIVES.FORMAT,
DIRECTIVES.ESCAPE
):
self.directive = 0;
# self one doubles as exclusive section so special case
elif self.directive == DIRECTIVES.ROOT_PARTIAL:
self.directive = DIRECTIVES.SECTION_EXC
else:
self.key = self.key[1:]
# in-context-directive
if self.key[0] == DIRECTIVES.TO_SYMBOL[DIRECTIVES.IN_CONTEXT]:
self.incontext = True
self.key = self.key[1:]
if self.directive == DIRECTIVES.PARTIAL and self.incontext:
raise Exception("Invalid tag: cannot have partial directive as in-context at {0}".format(self.raw))
# context directive
sym = DIRECTIVES.TO_SYMBOL[DIRECTIVES.PASS_CONTEXT]
split = self.key.split(sym);
# note pure context tag {{.}} can be split with empty first {{.~tofunc}}
if len(split) > 1:
if len(split) > 2:
raise Exception("Invalid tag: multiple function context directives at {0}".format(self.raw))
if (not split[0] and not self.incontext) or not split[1] or split[1][0] == sym[0]:
raise Exception("Invalid tag: malformatted function context directive at {0}".format(self.raw))
self.key = split[0]
self.func = split[1]
# format directive
sym = DIRECTIVES.TO_SYMBOL[DIRECTIVES.FORMAT]
split = (self.func if self.func else self.key).split(sym);
# leading or ending with format directive, assume part of name
if len(split) == 2:
if not split[0] and not self.incontext:
split = [split[1]]
elif not split[1]:
split = [split[0]]
if len(split) > 1:
if len(split) > 2:
raise Exception("Invalid tag: multiple format directives at {0}".format(self.raw))
if (not split[0] and not self.incontext) or not split[1] or split[1][0] == sym[0]:
raise Exception("Invalid tag: malformatted format directive at {0}".format(self.raw))
self.format = split[1]
if self.func:
self.func = split[0]
else:
self.key = split[0]
# escape directive
sym = DIRECTIVES.TO_SYMBOL[DIRECTIVES.ESCAPE]
split = self.func if self.func else self.key
if split.endswith(sym):
self.escape = True
split = split[0:-1]
if self.func:
self.func = split
else:
self.key = split
if self.format.endswith(sym):
self.escape = True;
self.format = self.format[0:-1]
# convert pass-to-function key to node
if self.func:
self.func = PassToFunctionNode(self.func, self)
# final key check
self.key = self.key.strip()
if not len(self.key) and not self.incontext:
# can't be empty except special case for pure context {{.}}
raise Exception("Invalid tag: empty evaluation at {0}".format(self.raw))
# this fills keysplit
self._finish()
class PassToFunctionNode(Node):
def __init__(self, key, context_node=None):
if isinstance(key, PassToFunctionNode):
super().__init__(key.key)
self.incontext = key.incontext
else:
super().__init__(key)
self.incontext = False
# function can have context directive, but can't be pure context -- e.g. {{data~.}}
if self.key[0] == DIRECTIVES.TO_SYMBOL[DIRECTIVES.IN_CONTEXT]:
self.key = self.key[1:]
self.incontext = True
if not len(self.key) and not self.incontext:
"Invalid tag: empty evaluation at {0}".format(context_node.raw)
self._finish()
class PartialNode(Node):
def __init__(self, tag):
super().__init__()
if tag.incontext:
raise Exception("Partial tag cannot be paired with in-context directive at {0}".format(tag.raw))
if tag.format:
raise Exception("Partial tag cannot be paired with format directive at {0}".format(tag.raw))
if tag.escape:
raise Exception("Partial tag cannot be paired with escape directive at {0}".format(tag.raw))
if tag.func:
raise Exception("Partial tag cannot be paired with pass-to-function directive at {0}".format(tag.raw))
self.directive = DIRECTIVES.PARTIAL
self.raw = tag.raw
self.inner = tag.inner
self.key = tag.key
self.incontext = True # partials default to in-context
if self.key.endswith(DIRECTIVES.TO_SYMBOL[DIRECTIVES.ROOT_PARTIAL]):
self.key = self.key[0:-1]
self.incontext = False
if not len(self.key):
raise Exception("Empty partial tag at {0}".format(tag.raw))
self._finish()
class SectionNode(Node):
def __init__(self, tag, parent):
super().__init__(tag.key)
self.raw = tag.raw
self.inner = []
self.incontext = tag.incontext
self.parent = parent
if isinstance(tag, SectionNode):
self.func = PassToFunctionNode(tag.func) if tag.func else None
self.inclusive = tag.inclusive
self.open = tag.open
self.list = tag.list
else:
self.func = tag.func
self.inclusive = tag.directive == DIRECTIVES.SECTION_INC or tag.directive == DIRECTIVES.LIST_SECTION
self.open = tag
self.list = tag.directive == DIRECTIVES.LIST_SECTION
if tag.format:
raise Exception("Invalid tag: format passed to section tag {0}".format(tag.raw))
if tag.escape:
raise Exception("Invalid tag: escape directive passed to section tag {0}".format(tag.raw))
if tag.directive not in (DIRECTIVES.SECTION_INC, DIRECTIVES.SECTION_EXC, DIRECTIVES.LIST_SECTION):
raise Exception("Template error: parsing invalid section tag {0}".format(tag.raw))
self._finish()
|
from enum import IntEnum;
class OgreMeshChunkID(IntEnum):
"""
Definition of the OGRE .mesh file format
.mesh files are binary files (for read efficiency at runtime) and are arranged into chunks
of data, very like 3D Studio's format.
A chunk always consists of:
unsigned short CHUNK_ID : one of the following chunk ids identifying the chunk
unsigned long LENGTH : length of the chunk in bytes, including this header
void* DATA : the data, which may contain other sub-chunks (various data types)
A .mesh file can contain both the definition of the Mesh itself, and optionally the definitions
of the materials is uses (although these can be omitted, if so the Mesh assumes that at runtime the
Materials referred to by name in the Mesh are loaded/created from another source)
A .mesh file only contains a single mesh, which can itself have multiple submeshes.
"""
M_HEADER = 0x1000;
M_MESH = 0x3000;
M_SUBMESH = 0x4000;
M_SUBMESH_OPERATION = 0x4010;
M_SUBMESH_BONE_ASSIGNMENT = 0x4100;
M_SUBMESH_TEXTURE_ALIAS = 0x4200;
M_GEOMETRY = 0x5000;
M_GEOMETRY_VERTEX_DECLARATION = 0x5100;
M_GEOMETRY_VERTEX_ELEMENT = 0x5110;
M_GEOMETRY_VERTEX_BUFFER = 0x5200;
M_GEOMETRY_VERTEX_BUFFER_DATA = 0x5210;
M_MESH_SKELETON_LINK = 0x6000;
M_MESH_BONE_ASSIGNMENT = 0x7000;
M_MESH_LOD_LEVEL = 0x8000;
M_MESH_LOD_USAGE = 0x8100;
M_MESH_LOD_MANUAL = 0x8110;
M_MESH_LOD_GENERATED = 0x8120;
M_MESH_BOUNDS = 0x9000;
M_SUBMESH_NAME_TABLE = 0xA000;
M_SUBMESH_NAME_TABLE_ELEMENT = 0xA100;
M_EDGE_LISTS = 0xB000;
M_EDGE_LIST_LOD = 0xB100;
M_EDGE_GROUP = 0xB110;
M_POSES = 0xC000;
M_POSE = 0xC100;
M_POSE_VERTEX = 0xC111;
M_ANIMATIONS = 0xD000;
M_ANIMATION = 0xD100;
M_ANIMATION_BASEINFO = 0xD105;
M_ANIMATION_TRACK = 0xD110;
M_ANIMATION_MORPH_KEYFRAME = 0xD111;
M_ANIMATION_POSE_KEYFRAME = 0xD112;
M_ANIMATION_POSE_REF = 0xD113;
M_TABLE_EXTREMES = 0xE000;
M_GEOMETRY_NORMALS = 0x5100;
M_GEOMETRY_COLOURS = 0x5200;
M_GEOMETRY_TEXCOORDS = 0x5300;
|
# -*- coding: UTF-8 -*-
"""
A set of tests for the vmware.py module
"""
import unittest
from unittest.mock import patch, MagicMock
from vlab_inventory_api.lib.worker import vmware
class TestVMware(unittest.TestCase):
"""A suite of test cases for the vmware.py module"""
@patch.object(vmware.virtual_machine, 'get_info')
@patch.object(vmware, 'vCenter')
def test_show_inventory(self, fake_vCenter, fake_get_info):
"""``show_inventory`` returns a dictionary when the user has VMs"""
fake_get_info.return_value = {}
fake_vm = MagicMock()
fake_vm.name = 'myVM'
fake_folder = MagicMock()
fake_folder.childEntity = [fake_vm]
fake_vCenter.return_value.__enter__.return_value.get_by_name.return_value = fake_folder
result = vmware.show_inventory(username='bob')
expected = {'myVM' : {}}
self.assertEqual(result, expected)
@patch.object(vmware, 'vCenter')
def test_create_inventory(self, fake_vCenter):
"""``create_inventory`` returns None when everything works as expected"""
result = vmware.create_inventory(username='bob')
expected = None
self.assertEqual(result, expected)
@patch.object(vmware, 'nuke_folder')
@patch.object(vmware, 'vCenter')
def test_delete_inventory(self, fake_vCenter, fake_nuke_folder):
"""``delete_inventory`` returns None when everything works as expected"""
fake_logger = MagicMock()
fake_folder = MagicMock()
fake_vCenter.return_value.__enter__.return_value.get_by_name.return_value = fake_folder
result = vmware.delete_inventory(username='alice', logger=fake_logger)
expected = None
self.assertEqual(result, expected)
@patch.object(vmware, 'nuke_folder')
@patch.object(vmware, 'vCenter')
def test_delete_invalid_power_state(self, fake_vCenter, fake_nuke_folder):
"""``delete_inventory`` returns an error if a VM is not powered off"""
fake_logger = MagicMock()
fake_nuke_folder.side_effect = [vmware.vim.fault.InvalidState(msg='testing')]
fake_folder = MagicMock()
fake_vCenter.return_value.__enter__.return_value.get_by_name.return_value = fake_folder
result = vmware.delete_inventory(username='alice', logger=fake_logger)
expected = 'testing'
self.assertEqual(result, expected)
@patch.object(vmware, 'nuke_folder')
@patch.object(vmware, 'vCenter')
def test_delete_already_gone(self, fake_vCenter, fake_nuke_folder):
"""``delete_inventory`` returns an error if the user has no inventory records"""
fake_logger = MagicMock()
fake_nuke_folder.side_effect = [FileNotFoundError()]
fake_folder = MagicMock()
fake_vCenter.return_value.__enter__.return_value.get_by_name.return_value = fake_folder
result = vmware.delete_inventory(username='alice', logger=fake_logger)
expected = 'User alice has no folder'
self.assertEqual(result, expected)
@patch.object(vmware, 'nuke_folder')
@patch.object(vmware, 'vCenter')
def test_delete_failure(self, fake_vCenter, fake_nuke_folder):
"""``delete_inventory`` returns an error if there was a system failure"""
fake_logger = MagicMock()
fake_nuke_folder.side_effect = [RuntimeError('testing')]
fake_folder = MagicMock()
fake_vCenter.return_value.__enter__.return_value.get_by_name.return_value = fake_folder
result = vmware.delete_inventory(username='alice', logger=fake_logger)
expected = 'testing'
self.assertEqual(result, expected)
@patch.object(vmware, 'consume_task')
def test_nuke_folder(self, fake_consume_task):
"""``nuke_folder`` calls Destory on the folder object"""
fake_folder = MagicMock()
vmware.nuke_folder(fake_folder)
self.assertTrue(fake_folder.Destroy.called)
if __name__ == '__main__':
unittest.main()
|
import os, json, re
import discord, asyncio
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
from urllib import parse
from riot_api import *
app = discord.Client()
mstatus = 0
botid = ""
token = os.getenv("TOKEN")
lol_apikey = os.getenv("API_KEY")
if not token:
json_data = open(os.getcwd() + "/token/.config.json", encoding='utf-8').read()
config_json = json.loads(json_data)
token = config_json["token"]
@app.event
async def on_ready():
global botid
print('Logged in as')
print(app.user.name)
print(app.user.id)
print('------')
game = discord.Game("Game Helper | !helpme")
botid = await app.application_info()
await app.change_presence(status=discord.Status.online, activity=game)
@app.event
async def on_message(message):
global mstatus, botid
if message.author.bot and message.author.id == botid.id:
if mstatus == 1:
await message.add_reaction("\u2b55") # O
await message.add_reaction("\u274c") # X
mstatus = mstatus - 1
else:
return None
if message.content == "!helpme":
desc_text = "{0} \n{1} \n{2} \n{3} \n{4}".format("!helpme : 명령어 목록 불러오기", \
"!owsearch : 오버워치 전적 검색하기", \
"!lolsearch : 롤 소환사 검색하기", \
"!muteall : 현재 보이스 채널에 있는 유저들 모두 음소거 시키기", \
"!unmuteall : 모든 사용자 음소거 해제하기")
embed = discord.Embed(title="명령어 목록", description=desc_text, color=0x6FA8DC)
await message.channel.send(embed=embed)
if message.content == "!owsearch":
embed = discord.Embed(title="Overwatch 점수 검색", description="'배틀태그#숫자' 형식으로 입력해주세요.", color=0x82CC62)
embed.set_image(url="https://bnetcmsus-a.akamaihd.net/cms/blog_header/q4/Q4K237E1EGPI1467079634956.jpg")
await message.channel.send(embed=embed)
def check(m):
return m.author == message.author and m.channel == message.channel
try:
m = await app.wait_for('message',timeout=25.0, check=check)
except asyncio.TimeoutError:
await message.channel.send("시간초과!")
else:
battletag_bool = bool(re.search('.[#][0-9]', m.content))
if battletag_bool:
battletag = m.content.replace("#", "-")
async with message.channel.typing():
req = Request("https://playoverwatch.com/ko-kr/career/pc/" + parse.quote(battletag))
res = urlopen(req)
bs = BeautifulSoup(res, "html.parser")
roles = bs.findAll("div", attrs={"class": "competitive-rank-tier"})
scores = bs.findAll("div", attrs={"class": "competitive-rank-level"})
public_status = bs.findAll("p", attrs={"class": "masthead-permission-level-text"})
competitive_roles = [i.get("data-ow-tooltip-text") for i in roles[:len(roles)//2]]
competitive_score = [i.text for i in scores[:len(scores)//2]]
if not public_status:
await message.channel.send("프로필이 존재하지 않습니다. 배틀태그와 뒤에 숫자를 다시 확인해 주세요.")
else:
if public_status[0].text == "비공개 프로필":
await message.channel.send("비공개 프로필입니다. 프로필 공개 설정을 공개로 바꾼 뒤에 사용해 주세요.")
else:
comp_data = bs.find("div", attrs={"id": "competitive","data-mode": "competitive"})
heroes = comp_data.findAll("div", attrs={"class": "ProgressBar-title"})
play_time = comp_data.findAll("div", attrs={"class": "ProgressBar-description"})
comp_heroes = []
for h in heroes:
comp_heroes.append([h])
for i in range(len(play_time)):
comp_heroes[i].append(play_time[i])
score_result = ""
top_five_result = ""
top_five = [[d[0].text, d[1].text] for d in comp_heroes] if len(comp_heroes) <= 5 else [[d[0].text, d[1].text] for d in comp_heroes[:5]]
def format_time(s):
t = s.split(":")
if len(t) == 2:
# MM:SS
return "{0} 분".format(str(int(t[0])))
elif len(t) == 3:
# HH:MM:SS
return "{0} 시간".format(str(int(t[0])))
else:
return "0 분"
if len(competitive_roles) == 0 and len(competitive_score) == 0:
score_result = "아직 배치를 덜본것 같군요! 점수가 없습니다."
else:
for i in range(len(competitive_roles)):
score_result = score_result + competitive_roles[i] + " : " + competitive_score[i] + "\n"
score_result = score_result + "입니다."
for i, h in enumerate(top_five):
top_five_result += "{0}. {1}: {2}\n".format(str(i+1), h[0], format_time(h[1]))
embed = discord.Embed(title=battletag.split("-")[0] + " 님의 현재 시즌 경쟁전 점수", description=score_result, color=0x82CC62)
embed2 = discord.Embed(title="경쟁전 상위 영웅", description=top_five_result, color=0x82CC62)
await message.channel.send(embed=embed)
await message.channel.send(embed=embed2)
else:
# Invalid
await message.channel.send("배틀태그가 유효하지 않습니다.")
if message.content == "!lolsearch":
embed = discord.Embed(title="롤 소환사 검색", description="소환사 이름을 입력하세요 \n단, 소환사 이름만 입력하세요!", color=0x82CC62)
await message.channel.send(embed=embed)
def check(m):
return m.author == message.author and m.channel == message.channel
try:
m = await app.wait_for('message',timeout=25.0, check=check)
except asyncio.TimeoutError:
await message.channel.send("시간초과!")
else:
async with message.channel.typing():
summoner = Summoner(m.content)
if summoner.account_id == "":
# Misinput. Aborting
await message.channel.send("소환사 이름을 잘못 입력하셨습니다.")
else:
info = summoner.summoner_info
if info:
info = info[0]
msg = info.get("message")
if not msg:
name = info.get("summonerName", "NULL")
tier = info.get("tier", "")
rank = info.get("rank" "")
win_rate = "{} %".format(int(summoner.recent_winning_rate*100))
result_url = "http://www.op.gg/summoner/userName=" + parse.quote(name)
desc_text = "소환사 이름 : {0}\n \
티어 : {1} {2}\n \
최근 랭크 게임 승률 : {3}\n".format(name, tier, rank, win_rate)
embed = discord.Embed(title="소환사 검색 결과", description=desc_text, url=result_url, color=0x82CC62)
else:
desc_text = "없는 소환사 입니다."
embed = discord.Embed(title="소환사 검색 결과", description=desc_text, color=0x82CC62)
await message.channel.send(embed=embed)
else:
name = summoner.account.get("name")
if name:
# Exists, but no recent comp play
desc_text = "소환사 이름 : {0}\n \
티어 : 플레이 내역이 없습니다.\n \
최근 랭크 게임 승률 : 플레이 내역이 없습니다.\n".format(name)
embed = discord.Embed(title="소환사 검색 결과", description=desc_text, color=0x82CC62)
await message.channel.send(embed=embed)
else:
#???
await message.channel.send("???????")
if message.content == "!muteall":
if message.author.voice is None:
await message.channel.send("이 기능을 사용하려면 보이스 채널에 들어가 있어야 합니다!")
else:
mstatus = mstatus + 1
embed = discord.Embed(title="Among Us 전용 전체 음소거 기능", description="현재 음성채널에 있는 모든 사용자를 음소거하겠습니까? \n원하시면 :o:, 아니면 :x:를 눌러주세요.", color=0xFFD966)
await message.channel.send(embed=embed)
def check(reaction, user):
return user == message.author and (str(reaction.emoji) == "\u2b55" or str(reaction.emoji) == "\u274c")
try:
reaction, user = await app.wait_for('reaction_add', timeout=10.0, check=check)
except asyncio.TimeoutError:
await message.channel.send("시간초과!")
else:
if str(reaction.emoji) == "\u2b55":
# await message.channel.send(administrator_id)
member_list = message.author.voice.channel.members
async with message.channel.typing():
for member in member_list:
await member.edit(mute=True, reason="Among Us Player Mute All")
await message.channel.send("음소거 완료!")
elif str(reaction.emoji) == "\u274c":
await message.channel.send("싫음 소환하지를 마. 귀찮게.")
if message.content == "!unmuteall":
member_list = message.author.voice.channel.members
async with message.channel.typing():
for member in member_list:
await member.edit(mute=False)
await message.channel.send("음소거 해제 완료!")
if "개" in message.content or message.content == "!doge":
await message.channel.send("https://i.kym-cdn.com/entries/icons/original/000/013/564/doge.jpg")
app.run(token)
|
from dart_version_manager.commands.build_command import app as build_app
from dart_version_manager.commands.major_command import app as major_app
from dart_version_manager.commands.minor_command import app as minor_app
from dart_version_manager.commands.patch_command import app as patch_app
from dart_version_manager.commands.pre_release_command import app as pre_release_app
__all__ = [build_app, major_app, minor_app, patch_app, pre_release_app]
|
# -*- coding: utf-8 -*-
from flask_restful import Api
import resources
def create_api(app):
api = Api(app)
api.add_resource(resources.OndeRemar, '/api/onde-remar', '/api/onde-remar/<int:item_id>')
api.add_resource(resources.Produtos, '/api/produtos', '/api/produtos/<int:item_id>')
api.add_resource(resources.Eventos, '/api/eventos', '/api/eventos/<string:item_id>')
api.add_resource(resources.Resultados, '/api/eventos/<string:item_id>/resultados')
api.add_resource(resources.Newsletter, '/api/newsletters')
api.add_resource(resources.Noticia, '/api/noticias', '/api/noticias/<string:item_id>')
api.add_resource(resources.LoginAtleta, '/api/login')
api.add_resource(resources.Atletas, '/api/atletas', '/api/atletas/<int:item_id>', '/api/atletas/<int:item_id>/<string:evento_slug>')
api.add_resource(resources.InscricoesAtletas, '/api/atletas/<int:atleta_id>/inscricoes', '/api/atletas/<int:atleta_id>/inscricoes/<int:inscricao_id>')
api.add_resource(resources.LoginAdmin, '/admin/api/login')
api.add_resource(resources.AtletasAdmin, '/admin/api/atletas', '/admin/api/atletas', '/admin/api/atletas/<int:item_id>')
api.add_resource(resources.EventosAdmin, '/admin/api/eventos', '/admin/api/eventos/<string:item_id>')
api.add_resource(resources.ProvasAdmin, '/admin/api/provas', '/admin/api/provas/<int:item_id>')
api.add_resource(resources.InscricoesAdmin, '/admin/api/inscricoes', '/admin/api/inscricoes/<int:item_id>')
api.add_resource(resources.NewsletterAdmin, '/admin/api/newsletters', '/admin/api/newsletters/<int:item_id>')
api.add_resource(resources.NoticiaAdmin, '/admin/api/noticias', '/admin/api/noticias/<int:item_id>')
api.add_resource(resources.NoticiaImagemAdmin, '/admin/api/noticias/imagens', '/admin/api/noticias/<int:item_id>/imagens/<path:file_name>')
|
from .map import MapFactory, Map, Graph
__all__ = ['MapFactory', 'Map', 'Graph']
|
from collections import defaultdict
count=0
class Graph:
def __init__(self):
self.graph = defaultdict(list)
def addNode(self, init, connect):
self.graph[init].append(connect)
def DFStrigger(self, index, visited, stack):
visited[index] =True
for ele in self.graph[index]:
if(visited[ele]==False):
self.DFStrigger(ele, visited, stack)
stack.append(index)
def DFS(self, stack):
visited=[False]*(count)
for i in range(count):
if(visited[i]==False):
self.DFStrigger(i, visited, stack)
def DFSUtil(self, i, visited):
visited[i]=True
print(i, end=' ')
for k in self.graph[i]:
if(visited[k]==False):
self.DFSUtil(k, visited)
def PrintSCC(self, g, stack):
visited=[False]*(count)
for i in reversed(stack):
if(visited[i]==False):
g.DFSUtil(i, visited)
print("")
def triggerSCC(self, stack):
g = Graph()
for i in self.graph:
for ele in self.graph[i]:
g.addNode(ele, i)
self.PrintSCC(g, stack)
def SCC(self):
stack = []
self.DFS(stack)
#print(stack)
self.triggerSCC(stack)
gr = Graph()
count=4
gr.addNode(0, 1)
gr.addNode(1, 2)
gr.addNode(2, 0)
gr.addNode(3, 2)
gr.SCC()
|
from __future__ import unicode_literals
import frappe
import erpnext
from frappe import auth
import datetime
import json, ast
from frappe.share import add
@frappe.whitelist()
def share_lead(doc, method=None):
'''
users = frappe.db.sql(""" select owner from `tabToDo` where reference_type = 'Lead' and reference_name = '{name}' """.format(name=doc.reference_name), as_dict=1)
read = 1
write = 1
share = 1
everyone = 0
for x in users:
add('Lead', doc.reference_name, x.user, read, write, share, everyone)
'''
|
import unittest
import unittest.mock
import ast
import io
from margate.parser import (Parser, parse_expression, IfNode, ForNode,
ExtendsNode)
from margate.code_generation import (Literal, Sequence, IfBlock,
ForBlock, ExtendsBlock, ReplaceableBlock,
Execution)
class ParserTest(unittest.TestCase):
def test_simple_sequence(self):
parser = Parser()
sequence = parser.parse([Literal("Foo"),
Literal("Bar")])
self.assertEqual(2, len(sequence.elements))
def test_parse_if_block(self):
parser = Parser()
sequence = parser.parse([Literal("Foo"),
Execution("if True"),
Literal("Bar"),
Execution("endif"),
Literal("Baz")])
self.assertEqual(sequence.elements[0],
Literal("Foo"))
self.assertIsInstance(sequence.elements[1],
IfBlock)
# TODO There doesn't seem to be an easy way to verify the
# contents of the AST object.
self.assertEqual(sequence.elements[1].sequence.elements[0],
Literal("Bar"))
self.assertEqual(sequence.elements[2],
Literal("Baz"))
def test_parse_for_loop(self):
parser = Parser()
sequence = parser.parse([Execution("for x in things"),
Literal("bar"),
Execution("endfor")])
expected_sequence = Sequence()
block = ForBlock(ForNode('x', 'things'))
block.sequence.add_element(Literal("bar"))
expected_sequence.add_element(block)
self.assertEqual(sequence.elements,
expected_sequence.elements)
def test_parse_nested(self):
parser = Parser()
sequence = parser.parse([Execution("for x in things"),
Execution("if x % 2"),
Execution("endif"),
Execution("endfor")])
self.assertEqual(1,
len(sequence.elements))
self.assertIsInstance(sequence.elements[0],
ForBlock)
self.assertIsInstance(sequence.elements[0].sequence.elements[0],
IfBlock)
self.assertEqual(1,
len(sequence.elements[0].sequence.elements))
def test_expression_parser(self):
"""Test the expression parser used within the {% %} node"""
node = parse_expression(["if", "True"])
self.assertIsInstance(node, IfNode)
self.assertEqual(node.expression.body.value,
True)
node = parse_expression(["for", "var", "in", "collection"])
self.assertIsInstance(node, ForNode)
self.assertEqual(node, ForNode("var", "collection"))
node = parse_expression(["if", "x", "<", "y"])
self.assertIsInstance(node, IfNode)
self.assertEqual(ast.dump(node.expression),
"Expression(body=Compare("
"left=Name(id='x', ctx=Load()), ops=[Lt()],"
" comparators=[Name(id='y', ctx=Load())]))")
node = parse_expression(["extends", '"other.html"'])
self.assertIsInstance(node, ExtendsNode)
self.assertEqual(node.template_name,
"other.html")
|
import ast
import json
import logging
import os.path
import re
from six.moves import urllib
from check_mk_web_api.activate_mode import ActivateMode
from check_mk_web_api.exception import CheckMkWebApiResponseException, CheckMkWebApiException, \
CheckMkWebApiAuthenticationException
from check_mk_web_api.no_none_value_dict import NoNoneValueDict
class WebApiBase:
"""
Abstraction for Check_Mk Web API
# Arguments
check_mk_url (str): URL to Check_Mk web application, multiple formats are supported
username (str): Name of user to connect as. Make sure this is an automation user.
secret (str): Secret for automation user. This is different from the password!
# Examples
```python
WebApi('http://checkmk.company.com/monitor/check_mk/webapi.py', 'automation', 'secret')
```
```python
WebApi('http://checkmk.company.com/monitor/check_mk', 'automation', 'secret')
```
```python
WebApi('http://checkmk.company.com/monitor', 'automation', 'secret')
```
"""
__DISCOVERY_REGEX = {
'added': re.compile(r'.*Added (\d+),.*'),
'removed': re.compile(r'.*Removed (\d+),.*'),
'kept': re.compile(r'.*Kept (\d+),.*'),
'new_count': re.compile(r'.*New Count (\d+)$')
}
def __init__(self, check_mk_url, username, secret):
check_mk_url = check_mk_url.rstrip('/')
if check_mk_url.endswith('.py'): # ends with /webapi.py
self.web_api_base = check_mk_url
elif check_mk_url.endswith('check_mk'): # ends with /$SITE_NAME/check_mk
self.web_api_base = os.path.join(check_mk_url, 'webapi.py')
else: # ends with /$SITE_NAME
self.web_api_base = os.path.join(check_mk_url, 'check_mk', 'webapi.py')
self.web_view_base = self.web_api_base.replace('webapi', 'view')
self.username = username
self.secret = secret
@staticmethod
def __build_request_data(data, request_format):
if not data:
return None
if request_format == 'json':
request_string = 'request=' + json.dumps(data)
elif request_format == 'python':
request_string = 'request=' + str(data)
else:
return CheckMkWebApiResponseException(data)
return request_string.encode()
def __build_request_path(self, query_params=None):
path = self.web_api_base + '?'
query_params = self.__check_query_params(query_params)
query_params.update({
'_username': self.username,
'_secret': self.secret
})
query_string = urllib.parse.urlencode(query_params)
path += query_string
return path
def __build_view_request_path(self, query_params):
path = self.web_view_base + '?'
query_params = self.__check_query_params(query_params)
query_params['output_format'] = query_params.get('output_format', 'json')
query_params.update({
'_username': self.username,
'_secret': self.secret,
})
query_string = urllib.parse.urlencode(query_params)
path += query_string
return path
def __check_query_params(self, query_params):
if not query_params:
return {}
return dict(query_params)
def __parse_response_body(self, body, query_params):
if 'output_format' in query_params:
if query_params['output_format'] == 'python':
return ast.literal_eval(body)
if query_params['output_format'] == 'json':
try:
return json.loads(body)
except json.decoder.JSONDecodeError as error:
return body
return body
def __decode_response(self, response, query_params={'output_format': 'json'}):
if response.code != 200:
raise CheckMkWebApiResponseException(response)
body = response.read().decode()
if body.startswith('Authentication error:'):
raise CheckMkWebApiAuthenticationException(body)
body_dict = self.__parse_response_body(body, query_params)
# Views return json lists and not dicts of information.
# Validate the result is a list, return result
if isinstance(body_dict, list):
return body_dict
result = body_dict['result']
if body_dict['result_code'] == 0:
return result
raise CheckMkWebApiException(result)
def __make_call(self, query, data):
"""
Wrapper for all calls to CheckMK service
# Arguments
query: unstructured query from internal calls
# Raises
CheckMkWebApiResponseException: Raised when the HTTP status code != 200
CheckMkWebApiException: Raised when the action's result_code != 0
"""
query_params = self.__check_query_params(query)
request_format = query_params.get('request_format', 'json')
query_params['output_format'] = query_params.get('output_format', 'json')
built_request = [
self.__build_request_path(query_params),
WebApiBase.__build_request_data(data, request_format)
]
logging.debug('Request built url and arguments', built_request)
response = urllib.request.urlopen(
*built_request
)
return self.__decode_response(response, query_params)
def make_view_request(self, query, data=None):
"""
Make calls to services that require view.py url's
# Arguments
query: block of query params to append to url
data: data to post to form in a dict format
# Raises
CheckMkWebApiResponseException: Raised when the HTTP status code != 200
CheckMkWebApiException: Raised when the action's result_code != 0
"""
query_params = self.__check_query_params(query)
query_params['output_format'] = query_params.get('output_format', 'json')
request_format = query_params.get('request_format', 'json')
built_request = [
self.__build_view_request_path(query_params), # call to correct endpoint
WebApiBase.__build_request_data(data, request_format)
]
print('Request built url and arguments')
print(built_request)
response = urllib.request.urlopen(
*built_request
)
return self.__decode_response(response)
def make_view_name_request(self, view_name, query=None, data=None):
"""
Make calls to get View
# Arguments
viewName: name of view to get e.g. downtimes
# Raises
CheckMkWebApiResponseException: Raised when the HTTP status code != 200
CheckMkWebApiException: Raised when the action's result_code != 0
"""
built_request = [
self.__build_view_request_path({'view_name': view_name}), # call to correct endpoint
None
]
logging.debug('Request built url and arguments', built_request)
response = urllib.request.urlopen(
*built_request
)
# TODO: investigate query parameters req for code response
return self.__decode_response(response)
def make_request(self, action, query_params=None, data=None):
"""
Make arbitrary request to Check_Mk Web API
# Arguments
action (str): Action request, e.g. add_host
query_params (dict): dict of path parameters
data (dict): dict that will be sent as request body
# Raises
CheckMkWebApiResponseException: Raised when the HTTP status code != 200
CheckMkWebApiException: Raised when the action's result_code != 0
"""
query_params = self.__check_query_params(query_params)
query_params.update({'action': action})
return self.__make_call(query_params, data)
def activate_changes(self, mode=ActivateMode.DIRTY,
sites=None, allow_foreign_changes=False):
"""
Activates all changes previously done
# Arguments
mode (ActivateMode): see #WebApi.ActivateMode
sites (list): List of sites to activates changes on
allow_foreign_changes (bool): If True changes of other users will be applied as well
"""
data = NoNoneValueDict({
'sites': sites
})
query_params = {
'mode': mode.value,
'allow_foreign_changes': 1 if allow_foreign_changes else 0
}
return self.make_request('activate_changes', query_params=query_params, data=data)
|
import numpy as np
from .shader_program import ShaderProgram
STATIC_VERTEX_SHADER = """
#version 400 core
in vec3 position;
out vec3 color;
uniform mat4 transformationMatrix;
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
uniform vec3 vColor;
void main(void) {
gl_Position = projectionMatrix * viewMatrix * transformationMatrix * vec4(position, 1.0);
color = vColor;
}
"""
STATIC_FRAGMENT_SHADER = """
#version 400 core
in vec3 color;
out vec4 out_Color;
void main(void) {
out_Color = vec4(color, 1.0f);
}
"""
class StaticShader(ShaderProgram):
def __init__(self) -> None:
self.__location_v_color: int = -1
super().__init__(STATIC_VERTEX_SHADER, STATIC_FRAGMENT_SHADER)
def load_vertex_color(self, color: np.ndarray) -> None:
StaticShader._load_vector(self.__location_v_color, color)
def _bind_attributes(self) -> None:
self._bind_attribute(0, "position")
def _get_uniform_locations(self) -> None:
super()._get_uniform_locations()
self.__location_v_color = self._get_uniform_location("vColor")
|
#!/usr/bin/env python
#
# ___INFO__MARK_BEGIN__
#######################################################################################
# Copyright 2016-2021 Univa Corporation (acquired and owned by Altair Engineering Inc.)
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################################
# ___INFO__MARK_END__
#
import tempfile
import types
from .utils import needs_uge
from .utils import generate_random_string
from .utils import create_config_file
from .utils import load_values
from uge.api.qconf_api import QconfApi
from uge.config.config_manager import ConfigManager
from uge.log.log_manager import LogManager
from uge.exceptions.object_not_found import ObjectNotFound
from uge.exceptions.object_already_exists import ObjectAlreadyExists
create_config_file()
API = QconfApi()
PE_NAME = '%s.q' % generate_random_string(6)
CONFIG_MANAGER = ConfigManager.get_instance()
LOG_MANAGER = LogManager.get_instance()
VALUES_DICT = load_values('test_values.json')
print(VALUES_DICT)
@needs_uge
def test_object_not_found():
try:
pe = API.get_pe('__non_existent_pe__')
assert (False)
except ObjectNotFound as ex:
# ok
pass
def test_generate_pe():
pe = API.generate_pe(PE_NAME)
assert (pe.data['pe_name'] == PE_NAME)
def test_add_pe():
try:
pel = API.list_pes()
except ObjectNotFound as ex:
# no pes defined
pel = []
pe = API.add_pe(name=PE_NAME)
assert (pe.data['pe_name'] == PE_NAME)
pel2 = API.list_pes()
assert (len(pel2) == len(pel) + 1)
assert (pel2.count(PE_NAME) == 1)
def test_list_pes():
pel = API.list_pes()
assert (pel is not None)
def test_object_already_exists():
try:
pe = API.add_pe(name=PE_NAME)
assert (False)
except ObjectAlreadyExists as ex:
# ok
pass
def test_get_pe():
pe = API.get_pe(PE_NAME)
assert (pe.data['pe_name'] == PE_NAME)
def test_generate_pe_from_json():
pe = API.get_pe(PE_NAME)
json = pe.to_json()
pe2 = API.generate_object(json)
assert (pe2.__class__.__name__ == pe.__class__.__name__)
for key in list(pe.data.keys()):
v = pe.data[key]
v2 = pe2.data[key]
if type(v) == list:
assert (len(v) == len(v2))
for s in v:
assert (v2.count(s) == 1)
elif type(v) == dict:
for key in list(v.keys()):
assert (str(v[key]) == str(v2[key]))
else:
assert (str(v) == str(v2))
def test_modify_pe():
pe = API.get_pe(PE_NAME)
slots = pe.data['slots']
pe = API.modify_pe(name=PE_NAME, data={'slots': slots + 1})
slots2 = pe.data['slots']
assert (slots2 == slots + 1)
def test_get_acls():
pel = API.list_pes()
pes = API.get_pes()
for pe in pes:
print("#############################################")
print(pe.to_uge())
assert (pe.data['pe_name'] in pel)
def test_write_pes():
try:
tdir = tempfile.mkdtemp()
print("*************************** " + tdir)
pe_names = VALUES_DICT['pe_names']
pes = API.get_pes()
for pe in pes:
print("Before #############################################")
print(pe.to_uge())
new_pes = []
for name in pe_names:
npe = API.generate_pe(name=name)
new_pes.append(npe)
API.mk_pes_dir(tdir)
API.write_pes(new_pes, tdir)
API.add_pes_from_dir(tdir)
API.modify_pes_from_dir(tdir)
pes = API.get_pes()
for pe in pes:
print("After #############################################")
print(pe.to_uge())
pes = API.list_pes()
for name in pe_names:
assert (name in pes)
print("pe found: " + name)
finally:
API.delete_pes_from_dir(tdir)
API.rm_pes_dir(tdir)
def test_add_pes():
try:
new_pes = []
pe_names = VALUES_DICT['pe_names']
for name in pe_names:
npe = API.generate_pe(name=name)
new_pes.append(npe)
# print all pes currently in the cluster
pes = API.get_pes()
for pe in pes:
print("Before #############################################")
print(pe.to_uge())
# add pes
API.add_pes(new_pes)
API.modify_pes(new_pes)
# print all pes currently in the cluster
pes = API.get_pes()
for pe in pes:
print("After #############################################")
print(pe.to_uge())
# check that cals have been added
pes = API.list_pes()
for name in pe_names:
assert (name in pes)
print("pe found: " + name)
finally:
API.delete_pes(new_pes)
def test_delete_pe():
pel = API.list_pes()
API.delete_pe(PE_NAME)
try:
pel2 = API.list_pes()
except ObjectNotFound as ex:
# no pes defined
pel2 = []
assert (len(pel2) == len(pel) - 1)
assert (pel2.count(PE_NAME) == 0)
|
def timemat(n,m):
import numpy
import time
t = time.time()
X = numpy.random.rand(m, n+1)
Y = numpy.random.rand(m, 1)
theta = numpy.linalg.inv(X.T @ X) @ X.T @ Y
return time.time() -t
if __name__ == '__main__':
import sys
n = 1000
m = 100
if len(sys.argv) > 2:
n = int(sys.argv[1])
m = int(sys.argv[2])
print(timemat(n,m))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A script for
"""
import os, sys
import zipfile
def toBytes(para):
return bytes(para, 'utf-8')
def getPass(para):
import getpass
return getpass.getpass(para)
def decompress(filename):
print("Archive: ", filename)
file = zipfile.ZipFile(filename, "r")
pwd = getPass("Password: ")
for info in file.infolist():
name = info.filename
utf8name = name
try:
utf8name = utf8name.encode('cp437')
utf8name = utf8name.decode('gbk')
except:
pass
print(" extracting: " + utf8name)
pathname = os.path.dirname(utf8name)
if not os.path.exists(pathname) and pathname!= "":
print(" creating: ", pathname)
os.makedirs(pathname)
data = file.read(name, pwd.encode('gbk'))
if not os.path.exists(utf8name):
fo = open(utf8name, "wb")
fo.write(data)
fo.close()
file.close()
print("")
if __name__ == "__main__":
if len(sys.argv) < 2:
exit(1)
for i in range(1, len(sys.argv)):
decompress(sys.argv[i])
|
from typing import List, Dict
from klgists.common import flatten
from klgists.common.exceptions import BadConfigException
class BoardLayout:
"""The pins and ports on an Arduino board.
Defines which correspond to input and output stimuli and sensors.
Does not know about sensors or stimuli themselves, only about their pins and names.
"""
def __init__(
self,
digital_ports: Dict[int, List[int]], analog_ports: Dict[int, List[int]], status_led_pin: int,
digital_stimuli: Dict[str, int]=None, analog_stimuli: Dict[str, int]=None,
digital_sensors: Dict[str, int]=None, analog_sensors: Dict[str, int]=None,
startup_pins: List[int]=None,
):
self.digital_ports = digital_ports
self.analog_ports = analog_ports
self.status_led_pin = status_led_pin
self.digital_stimuli = {} if digital_stimuli is None else digital_stimuli # type: Dict[str, int]
self.analog_stimuli = {} if analog_stimuli is None else analog_stimuli # type: Dict[str, int]
self.stimuli = self.digital_stimuli.copy()
self.stimuli.update(self.analog_stimuli)
self.digital_sensors = {} if digital_sensors is None else digital_sensors # type: Dict[str, int]
self.analog_sensors = {} if analog_sensors is None else analog_sensors # type: Dict[str, int]
self.startup_pins = {} if startup_pins is None else startup_pins # type: List[int]
# overlap
output_overlap = set(digital_stimuli).intersection(analog_stimuli)
input_overlap = set(digital_sensors).intersection(analog_sensors)
# TODO wrong error type
if len(output_overlap) != 0:
raise BadConfigException("There is overlap between digital and analog stimulus pins {}".format(output_overlap))
if len(input_overlap) != 0:
raise BadConfigException("There is overlap between digital and analog sensor pins {}".format(input_overlap))
# allowed pins
self.allowed_digital_pins = flatten([[pin for pin in allowed] for port, allowed in digital_ports.items()]) # type: List[int]
self.allowed_analog_pins = flatten([[pin for pin in allowed] for port, allowed in analog_ports.items()]) # type: List[int]
def __repr__(self) -> str:
return "BoardLayout(digital_ports={}, analog_ports={}, digital_out={}, analog_out={}, digital_in={}, digital_out={}, startup={})"\
.format(self.digital_ports, self.analog_ports, self.digital_stimuli, self.analog_stimuli, self.digital_sensors, self.analog_sensors, self.startup_pins)
def __str__(self): return repr(self)
__all__ = ['BoardLayout']
|
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
import sys
import unittest
import numpy as np
import torch
import torch.nn.functional as F
import lava.lib.dl.slayer as slayer
verbose = True if (('-v' in sys.argv) or ('--verbose' in sys.argv)) else False
seed = np.random.randint(1000)
# seed = 902
np.random.seed(seed)
if verbose:
print(f'{seed=}')
if torch.cuda.is_available():
device = torch.device('cuda')
else:
if verbose:
print(
'CUDA is not available in the system. '
'Testing for CPU version only.'
)
device = torch.device('cpu')
# neuron parameters
threshold = 1
scale = (1 << 12)
decay = np.random.random() * scale
decay = torch.FloatTensor([decay]).to(device)
state = torch.FloatTensor([0]).to(device)
# create input
time = torch.FloatTensor(np.arange(200)).to(device)
# expand to (batch, neuron, time) tensor
spike_input = torch.autograd.Variable(
torch.zeros([5, 4, len(time)]),
requires_grad=True
).to(device)
spike_input.data[..., np.random.randint(spike_input.shape[-1], size=5)] = 1
weight = torch.FloatTensor(
5 * np.random.random(size=spike_input.shape[-1]) - 0.5
).reshape([1, 1, spike_input.shape[-1]]).to(device)
w_input = slayer.utils.quantize(weight) * spike_input
if verbose:
print(f'{decay=}')
# get the dynamics response
output0 = slayer.neuron.dynamics.leaky_integrator.dynamics(
w_input, decay=decay, state=state, w_scale=scale,
)
output = slayer.neuron.dynamics.leaky_integrator.dynamics(
w_input, decay=decay, state=state, w_scale=scale, threshold=threshold,
debug=True
)
spike = (output >= threshold).to(output.dtype)
class TestIF(unittest.TestCase):
def test_input_output_range(self):
if verbose:
print(spike_input.sum(), spike_input.flatten())
if verbose:
print(spike.sum(), spike.flatten())
self.assertTrue(
spike_input.sum().item() > 0,
'There was zero input spike. Check the test setting.'
)
self.assertTrue(
spike.sum().item() > 0,
'There was zero ouptut spike. Check the test setting.'
)
def test_leak(self):
leak_num = output0[..., 1:] - w_input[..., 1:]
leak_den = output0[..., :-1]
valid = torch.abs(leak_den) > 10 / scale
est_decay = torch.mean(1 - leak_num[valid] / leak_den[valid]) * scale
rel_error = np.abs(
(est_decay.item() - decay.item()) / max(decay.item(), 512)
)
if verbose:
print(f'{rel_error=}')
print(f'{est_decay=}')
print(f'{decay=}')
self.assertTrue(
rel_error < 1e-1, # the estimate is crude
f'Expected estimated decay to match. '
f'Found {est_decay=} and {decay=}'
)
def test_reset(self):
spike_inds = (w_input[..., 1:] == output[..., 1:])
spike_template = spike[..., :-1]
spike_template[spike_inds] = 0
error = torch.norm(spike_template).item()
if verbose:
print(f'{error=}')
self.assertTrue(
error < 1e-3,
f'Expect reset points to match. Found {error=}.'
)
def test_integer_states(self):
# there should be no quantization error
# when states are scaled with s_scale
output_error = torch.norm(torch.floor(output * scale) - output * scale)
self.assertTrue(
output_error < 1e-5,
f'Voltage calculation has issues with scaling. '
f'De-Scaling must result in integer states. '
f'Error was {output_error}'
)
def test_backward(self):
spike_target = spike.clone().detach()
output_target = output.clone().detach()
spike_target[
..., np.random.randint(spike_input.shape[-1], size=5)
] = 1
output_target[
..., np.random.randint(spike_input.shape[-1], size=5)
] -= 1
loss = F.mse_loss(spike, spike_target) \
+ F.mse_loss(output, output_target)
loss.backward()
# just looking for errors
# self.assertTrue(True, 'Encountered errors.')
|
from time import sleep
from smbus import SMBus
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
bus = 0
class PiTouch:
def __init__(self):
if GPIO.RPI_REVISION == 1: # Although PiTouch doesn't fit, keep for compatibility
i2c_bus = 0
elif GPIO.RPI_REVISION == 2: # As well as the rev 2
i2c_bus = 1
elif GPIO.RPI_REVISION == 3: # For model B+
i2c_bus = 1
else:
print "Unable to determine Raspberry Pi revision."
exit
self.bus = SMBus(i2c_bus)
self.bus.write_byte_data(0x40, 0x00, 0x10)
self.bus.write_byte_data(0x40, 0xFE, 0x00)
sleep(0.01)
self.bus.write_byte_data(0x40, 0x00, 0x00)
self.bus.write_byte_data(0x40, 0x01, 0x04)
sleep(0.01)
## Fin init PCA
GPIO.setup(11,GPIO.IN, pull_up_down=GPIO.PUD_UP)
def brightness(self,input):
if input == 0:
return 4096
else:
return (input - 0) * (4094 - 0) // (100 - 0) + 1
def light(self,chan,light):
light = self.brightness(light)
self.bus.write_byte_data(0x40, chan, light & 0xFF)
self.bus.write_byte_data(0x40, chan+1, light >> 8)
self.bus.write_byte_data(0x40, chan+2, 0 & 0xFF)
self.bus.write_byte_data(0x40, chan+3, 0 >> 8)
def green(self,light,pad=0):
#3, 0, 10, 13
if pad == 0:
self.light(0x06+(4*3),light)
self.light(0x06+(4*0),light)
self.light(0x06+(4*10),light)
self.light(0x06+(4*13),light)
elif pad == 1:
self.light(0x06+(4*3),light)
elif pad == 2:
self.light(0x06+(4*0),light)
elif pad == 3:
self.light(0x06+(4*10),light)
elif pad == 4:
self.light(0x06+(4*13),light)
def blue(self,light,pad=0):
#4, 1, 11, 14
if pad == 0:
self.light(0x06+(4*4),light)
self.light(0x06+(4*1),light)
self.light(0x06+(4*11),light)
self.light(0x06+(4*14),light)
elif pad == 1:
self.light(0x06+(4*4),light)
elif pad == 2:
self.light(0x06+(4*1),light)
elif pad == 3:
self.light(0x06+(4*11),light)
elif pad == 4:
self.light(0x06+(4*14),light)
def red(self,light,pad=0):
#5, 2, 12, 15
if pad == 0:
self.light(0x06+(4*5),light)
self.light(0x06+(4*2),light)
self.light(0x06+(4*12),light)
self.light(0x06+(4*15),light)
elif pad == 1:
self.light(0x06+(4*5),light)
elif pad == 2:
self.light(0x06+(4*2),light)
elif pad == 3:
self.light(0x06+(4*12),light)
elif pad == 4:
self.light(0x06+(4*15),light)
def all(self,light):
light = self.brightness(light)
self.bus.write_byte_data(0x40, 0xFA, light & 0xFF)
self.bus.write_byte_data(0x40, 0xFB, light >> 8)
self.bus.write_byte_data(0x40, 0xFC, 0 & 0xFF)
self.bus.write_byte_data(0x40, 0xFD, 0 >> 8)
def read(self):
readpad = self.bus.read_byte_data(0x1b, 0x03)
if readpad == 8:
return 1
if readpad == 16:
return 2
if readpad == 4:
return 3
if readpad == 2:
return 4
def touch(self):
while True:
if GPIO.input(11) == 0:
read = self.read()
if 4 >= read > 0:
return self.read()
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import date
from psycopg2 import IntegrityError, ProgrammingError
import odoo
from odoo.exceptions import UserError, ValidationError, AccessError
from odoo.tools import mute_logger
from odoo.tests import common
class TestServerActionsBase(common.TransactionCase):
def setUp(self):
super(TestServerActionsBase, self).setUp()
# Data on which we will run the server action
self.test_country = self.env['res.country'].create({
'name': 'TestingCountry',
'code': 'TY',
'address_format': 'SuperFormat',
})
self.test_partner = self.env['res.partner'].create({
'name': 'TestingPartner',
'city': 'OrigCity',
'country_id': self.test_country.id,
})
self.context = {
'active_model': 'res.partner',
'active_id': self.test_partner.id,
}
# Model data
Model = self.env['ir.model']
Fields = self.env['ir.model.fields']
self.res_partner_model = Model.search([('model', '=', 'res.partner')])
self.res_partner_name_field = Fields.search([('model', '=', 'res.partner'), ('name', '=', 'name')])
self.res_partner_city_field = Fields.search([('model', '=', 'res.partner'), ('name', '=', 'city')])
self.res_partner_country_field = Fields.search([('model', '=', 'res.partner'), ('name', '=', 'country_id')])
self.res_partner_parent_field = Fields.search([('model', '=', 'res.partner'), ('name', '=', 'parent_id')])
self.res_partner_children_field = Fields.search([('model', '=', 'res.partner'), ('name', '=', 'child_ids')])
self.res_partner_category_field = Fields.search([('model', '=', 'res.partner'), ('name', '=', 'category_id')])
self.res_country_model = Model.search([('model', '=', 'res.country')])
self.res_country_name_field = Fields.search([('model', '=', 'res.country'), ('name', '=', 'name')])
self.res_country_code_field = Fields.search([('model', '=', 'res.country'), ('name', '=', 'code')])
self.res_partner_category_model = Model.search([('model', '=', 'res.partner.category')])
self.res_partner_category_name_field = Fields.search([('model', '=', 'res.partner.category'), ('name', '=', 'name')])
# create server action to
self.action = self.env['ir.actions.server'].create({
'name': 'TestAction',
'model_id': self.res_partner_model.id,
'model_name': 'res.partner',
'state': 'code',
'code': 'record.write({"comment": "MyComment"})',
})
class TestServerActions(TestServerActionsBase):
def test_00_action(self):
self.action.with_context(self.context).run()
self.assertEqual(self.test_partner.comment, 'MyComment', 'ir_actions_server: invalid condition check')
self.test_partner.write({'comment': False})
# Do: create contextual action
self.action.create_action()
self.assertEqual(self.action.binding_model_id.model, 'res.partner')
# Do: remove contextual action
self.action.unlink_action()
self.assertFalse(self.action.binding_model_id)
def test_10_code(self):
self.action.write({
'state': 'code',
'code': ("partner_name = record.name + '_code'\n"
"record.env['res.partner'].create({'name': partner_name})"),
})
run_res = self.action.with_context(self.context).run()
self.assertFalse(run_res, 'ir_actions_server: code server action correctly finished should return False')
partners = self.test_partner.search([('name', 'ilike', 'TestingPartner_code')])
self.assertEqual(len(partners), 1, 'ir_actions_server: 1 new partner should have been created')
def test_20_crud_create(self):
# Do: create a new record in another model
self.action.write({
'state': 'object_create',
'crud_model_id': self.res_country_model.id,
'link_field_id': False,
'fields_lines': [(5,),
(0, 0, {'col1': self.res_country_name_field.id, 'value': 'record.name', 'evaluation_type': 'equation'}),
(0, 0, {'col1': self.res_country_code_field.id, 'value': 'record.name[0:2]', 'evaluation_type': 'equation'})],
})
run_res = self.action.with_context(self.context).run()
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new country created
country = self.test_country.search([('name', 'ilike', 'TestingPartner')])
self.assertEqual(len(country), 1, 'ir_actions_server: TODO')
self.assertEqual(country.code, 'TE', 'ir_actions_server: TODO')
def test_20_crud_create_link_many2one(self):
_city = 'TestCity'
_name = 'TestNew'
# Do: create a new record in the same model and link it with a many2one
self.action.write({
'state': 'object_create',
'crud_model_id': self.action.model_id.id,
'link_field_id': self.res_partner_parent_field.id,
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field.id, 'value': _name}),
(0, 0, {'col1': self.res_partner_city_field.id, 'value': _city})],
})
run_res = self.action.with_context(self.context).run()
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
partner = self.test_partner.search([('name', 'ilike', _name)])
self.assertEqual(len(partner), 1, 'ir_actions_server: TODO')
self.assertEqual(partner.city, _city, 'ir_actions_server: TODO')
# Test: new partner linked
self.assertEqual(self.test_partner.parent_id, partner, 'ir_actions_server: TODO')
def test_20_crud_create_link_one2many(self):
_name = 'TestNew'
# Do: create a new record in the same model and link it with a one2many
self.action.write({
'state': 'object_create',
'crud_model_id': self.action.model_id.id,
'link_field_id': self.res_partner_children_field.id,
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field.id, 'value': _name})],
})
run_res = self.action.with_context(self.context).run()
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new partner created
partner = self.test_partner.search([('name', 'ilike', _name)])
self.assertEqual(len(partner), 1, 'ir_actions_server: TODO')
self.assertEqual(partner.name, _name, 'ir_actions_server: TODO')
# Test: new partner linked
self.assertIn(partner, self.test_partner.child_ids, 'ir_actions_server: TODO')
def test_20_crud_create_link_many2many(self):
# Do: create a new record in another model
self.action.write({
'state': 'object_create',
'crud_model_id': self.res_partner_category_model.id,
'link_field_id': self.res_partner_category_field.id,
'fields_lines': [(0, 0, {'col1': self.res_partner_category_name_field.id, 'value': 'record.name', 'evaluation_type': 'equation'})],
})
run_res = self.action.with_context(self.context).run()
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: new category created
category = self.env['res.partner.category'].search([('name', 'ilike', 'TestingPartner')])
self.assertEqual(len(category), 1, 'ir_actions_server: TODO')
self.assertIn(category, self.test_partner.category_id)
def test_30_crud_write(self):
_name = 'TestNew'
# Do: update partner name
self.action.write({
'state': 'object_write',
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field.id, 'value': _name})],
})
run_res = self.action.with_context(self.context).run()
self.assertFalse(run_res, 'ir_actions_server: create record action correctly finished should return False')
# Test: partner updated
partner = self.test_partner.search([('name', 'ilike', _name)])
self.assertEqual(len(partner), 1, 'ir_actions_server: TODO')
self.assertEqual(partner.city, 'OrigCity', 'ir_actions_server: TODO')
@mute_logger('odoo.addons.base.models.ir_model', 'odoo.models')
def test_40_multi(self):
# Data: 2 server actions that will be nested
action1 = self.action.create({
'name': 'Subaction1',
'sequence': 1,
'model_id': self.res_partner_model.id,
'state': 'code',
'code': 'action = {"type": "ir.actions.act_window"}',
})
action2 = self.action.create({
'name': 'Subaction2',
'sequence': 2,
'model_id': self.res_partner_model.id,
'crud_model_id': self.res_partner_model.id,
'state': 'object_create',
'fields_lines': [(0, 0, {'col1': self.res_partner_name_field.id, 'value': 'RaoulettePoiluchette'}),
(0, 0, {'col1': self.res_partner_city_field.id, 'value': 'TestingCity'})],
})
action3 = self.action.create({
'name': 'Subaction3',
'sequence': 3,
'model_id': self.res_partner_model.id,
'state': 'code',
'code': 'action = {"type": "ir.actions.act_url"}',
})
self.action.write({
'state': 'multi',
'child_ids': [(6, 0, [action1.id, action2.id, action3.id])],
})
# Do: run the action
res = self.action.with_context(self.context).run()
# Test: new partner created
# currently res_partner overrides default['name'] whatever its value
partner = self.test_partner.search([('name', 'ilike', 'RaoulettePoiluchette')])
self.assertEqual(len(partner), 1)
# Test: action returned
self.assertEqual(res.get('type'), 'ir.actions.act_url')
# Test loops
with self.assertRaises(ValidationError):
self.action.write({
'child_ids': [(6, 0, [self.action.id])]
})
def test_50_groups(self):
""" check the action is returned only for groups dedicated to user """
Actions = self.env['ir.actions.actions']
group0 = self.env['res.groups'].create({'name': 'country group'})
self.context = {
'active_model': 'res.country',
'active_id': self.test_country.id,
}
# Do: update model and group
self.action.write({
'model_id': self.res_country_model.id,
'binding_model_id': self.res_country_model.id,
'groups_id': [(4, group0.id, 0)],
'code': 'record.write({"vat_label": "VatFromTest"})',
})
# Test: action is not returned
bindings = Actions.get_bindings('res.country')
self.assertFalse(bindings)
with self.assertRaises(AccessError):
self.action.with_context(self.context).run()
self.assertFalse(self.test_country.vat_label)
# add group to the user, and test again
self.env.user.write({'groups_id': [(4, group0.id)]})
bindings = Actions.get_bindings('res.country')
self.assertItemsEqual(bindings.get('action'), self.action.read())
self.action.with_context(self.context).run()
self.assertEqual(self.test_country.vat_label, 'VatFromTest', 'vat label should be changed to VatFromTest')
def test_60_sort(self):
""" check the actions sorted by sequence """
Actions = self.env['ir.actions.actions']
# Do: update model
self.action.write({
'model_id': self.res_country_model.id,
'binding_model_id': self.res_country_model.id,
})
self.action2 = self.action.copy({'name': 'TestAction2', 'sequence': 1})
# Test: action returned by sequence
bindings = Actions.get_bindings('res.country')
self.assertEqual([vals.get('name') for vals in bindings['action']], ['TestAction2', 'TestAction'])
self.assertEqual([vals.get('sequence') for vals in bindings['action']], [1, 5])
def test_70_copy_action(self):
# first check that the base case (reset state) works normally
r = self.env['ir.actions.todo'].create({
'action_id': self.action.id,
'state': 'done',
})
self.assertEqual(r.state, 'done')
self.assertEqual(
r.copy().state, 'open',
"by default state should be reset by copy"
)
# then check that on server action we've changed that
self.assertEqual(
self.action.copy().state, 'code',
"copying a server action should not reset the state"
)
def test_80_permission(self):
self.action.write({
'state': 'code',
'code': """record.write({'date': datetime.date.today()})""",
})
user_demo = self.env.ref("base.user_demo")
self_demo = self.action.with_user(user_demo.id)
# can write on contact partner
self.test_partner.type = "contact"
self.test_partner.with_user(user_demo.id).check_access_rule("write")
self_demo.with_context(self.context).run()
self.assertEqual(self.test_partner.date, date.today())
# but can not write on private address
self.test_partner.type = "private"
with self.assertRaises(AccessError):
self.test_partner.with_user(user_demo.id).check_access_rule("write")
# nor execute a server action on it
with self.assertRaises(AccessError), mute_logger('odoo.addons.base.models.ir_actions'):
self_demo.with_context(self.context).run()
class TestCustomFields(common.TransactionCase):
MODEL = 'res.partner'
COMODEL = 'res.users'
def setUp(self):
# check that the registry is properly reset
registry = odoo.registry()
fnames = set(registry[self.MODEL]._fields)
@self.addCleanup
def check_registry():
assert set(registry[self.MODEL]._fields) == fnames
super(TestCustomFields, self).setUp()
# use a test cursor instead of a real cursor
self.registry.enter_test_mode(self.cr)
self.addCleanup(self.registry.leave_test_mode)
def create_field(self, name, *, field_type='char'):
""" create a custom field and return it """
model = self.env['ir.model'].search([('model', '=', self.MODEL)])
field = self.env['ir.model.fields'].create({
'model_id': model.id,
'name': name,
'field_description': name,
'ttype': field_type,
})
self.assertIn(name, self.env[self.MODEL]._fields)
return field
def create_view(self, name):
""" create a view with the given field name """
return self.env['ir.ui.view'].create({
'name': 'yet another view',
'model': self.MODEL,
'arch': '<tree string="X"><field name="%s"/></tree>' % name,
})
def test_create_custom(self):
""" custom field names must be start with 'x_' """
with self.assertRaises(ValidationError):
self.create_field('foo')
def test_rename_custom(self):
""" custom field names must be start with 'x_' """
field = self.create_field('x_foo')
with self.assertRaises(ValidationError):
field.name = 'foo'
def test_create_valid(self):
""" field names must be valid pg identifiers """
with self.assertRaises(ValidationError):
self.create_field('x_foo bar')
def test_rename_valid(self):
""" field names must be valid pg identifiers """
field = self.create_field('x_foo')
with self.assertRaises(ValidationError):
field.name = 'x_foo bar'
def test_create_unique(self):
""" one cannot create two fields with the same name on a given model """
self.create_field('x_foo')
with self.assertRaises(IntegrityError), mute_logger('odoo.sql_db'):
self.create_field('x_foo')
def test_rename_unique(self):
""" one cannot create two fields with the same name on a given model """
field1 = self.create_field('x_foo')
field2 = self.create_field('x_bar')
with self.assertRaises(IntegrityError), mute_logger('odoo.sql_db'):
field2.name = field1.name
def test_remove_without_view(self):
""" try removing a custom field that does not occur in views """
field = self.create_field('x_foo')
field.unlink()
def test_rename_without_view(self):
""" try renaming a custom field that does not occur in views """
field = self.create_field('x_foo')
field.name = 'x_bar'
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_remove_with_view(self):
""" try removing a custom field that occurs in a view """
field = self.create_field('x_foo')
self.create_view('x_foo')
# try to delete the field, this should fail but not modify the registry
with self.assertRaises(UserError):
field.unlink()
self.assertIn('x_foo', self.env[self.MODEL]._fields)
@mute_logger('odoo.addons.base.models.ir_ui_view')
def test_rename_with_view(self):
""" try renaming a custom field that occurs in a view """
field = self.create_field('x_foo')
self.create_view('x_foo')
# try to delete the field, this should fail but not modify the registry
with self.assertRaises(UserError):
field.name = 'x_bar'
self.assertIn('x_foo', self.env[self.MODEL]._fields)
def test_unlink_with_inverse(self):
""" create a custom o2m and then delete its m2o inverse """
model = self.env['ir.model']._get(self.MODEL)
comodel = self.env['ir.model']._get(self.COMODEL)
m2o_field = self.env['ir.model.fields'].create({
'model_id': comodel.id,
'name': 'x_my_m2o',
'field_description': 'my_m2o',
'ttype': 'many2one',
'relation': self.MODEL,
})
o2m_field = self.env['ir.model.fields'].create({
'model_id': model.id,
'name': 'x_my_o2m',
'field_description': 'my_o2m',
'ttype': 'one2many',
'relation': self.COMODEL,
'relation_field': m2o_field.name,
})
# normal mode: you cannot break dependencies
with self.assertRaises(UserError):
m2o_field.unlink()
# uninstall mode: unlink dependant fields
m2o_field.with_context(_force_unlink=True).unlink()
self.assertFalse(o2m_field.exists())
def test_unlink_with_dependant(self):
""" create a computed field, then delete its dependency """
# Also applies to compute fields
comodel = self.env['ir.model'].search([('model', '=', self.COMODEL)])
field = self.create_field('x_my_char')
dependant = self.env['ir.model.fields'].create({
'model_id': comodel.id,
'name': 'x_oh_boy',
'field_description': 'x_oh_boy',
'ttype': 'char',
'related': 'partner_id.x_my_char',
})
# normal mode: you cannot break dependencies
with self.assertRaises(UserError):
field.unlink()
# uninstall mode: unlink dependant fields
field.with_context(_force_unlink=True).unlink()
self.assertFalse(dependant.exists())
def test_create_binary(self):
""" binary custom fields should be created as attachment=True to avoid
bloating the DB when creating e.g. image fields via studio
"""
self.create_field('x_image', field_type='binary')
custom_binary = self.env[self.MODEL]._fields['x_image']
self.assertTrue(custom_binary.attachment)
def test_related_field(self):
""" create a custom related field, and check filled values """
#
# Add a custom field equivalent to the following definition:
#
# class Partner(models.Model)
# _inherit = 'res.partner'
# x_oh_boy = fields.Char(related="country_id.code", store=True)
#
# pick N=100 records in comodel
countries = self.env['res.country'].search([('code', '!=', False)], limit=100)
self.assertEqual(len(countries), 100, "Not enough records in comodel 'res.country'")
# create records in model, with N distinct values for the related field
partners = self.env['res.partner'].create([
{'name': country.code, 'country_id': country.id} for country in countries
])
partners.flush()
# determine how many queries it takes to create a non-computed field
query_count = self.cr.sql_log_count
self.env['ir.model.fields'].create({
'model_id': self.env['ir.model']._get_id('res.partner'),
'name': 'x_oh_box',
'field_description': 'x_oh_box',
'ttype': 'char',
})
query_count = self.cr.sql_log_count - query_count
# create the related field, and assert it only takes 1 extra queries
with self.assertQueryCount(query_count + 1):
self.env['ir.model.fields'].create({
'model_id': self.env['ir.model']._get_id('res.partner'),
'name': 'x_oh_boy',
'field_description': 'x_oh_boy',
'ttype': 'char',
'related': 'country_id.code',
'store': True,
})
# check the computed values
for partner in partners:
self.assertEqual(partner.x_oh_boy, partner.country_id.code)
def test_selection(self):
""" custom selection field """
Model = self.env[self.MODEL]
model = self.env['ir.model'].search([('model', '=', self.MODEL)])
field = self.env['ir.model.fields'].create({
'model_id': model.id,
'name': 'x_sel',
'field_description': "Custom Selection",
'ttype': 'selection',
'selection_ids': [
(0, 0, {'value': 'foo', 'name': 'Foo', 'sequence': 0}),
(0, 0, {'value': 'bar', 'name': 'Bar', 'sequence': 1}),
],
})
x_sel = Model._fields['x_sel']
self.assertEqual(x_sel.type, 'selection')
self.assertEqual(x_sel.selection, [('foo', 'Foo'), ('bar', 'Bar')])
# add selection value 'baz'
field.selection_ids.create({
'field_id': field.id, 'value': 'baz', 'name': 'Baz', 'sequence': 2,
})
x_sel = Model._fields['x_sel']
self.assertEqual(x_sel.type, 'selection')
self.assertEqual(x_sel.selection, [('foo', 'Foo'), ('bar', 'Bar'), ('baz', 'Baz')])
# assign values to records
rec1 = Model.create({'name': 'Rec1', 'x_sel': 'foo'})
rec2 = Model.create({'name': 'Rec2', 'x_sel': 'bar'})
rec3 = Model.create({'name': 'Rec3', 'x_sel': 'baz'})
self.assertEqual(rec1.x_sel, 'foo')
self.assertEqual(rec2.x_sel, 'bar')
self.assertEqual(rec3.x_sel, 'baz')
# remove selection value 'foo'
field.selection_ids[0].unlink()
x_sel = Model._fields['x_sel']
self.assertEqual(x_sel.type, 'selection')
self.assertEqual(x_sel.selection, [('bar', 'Bar'), ('baz', 'Baz')])
self.assertEqual(rec1.x_sel, False)
self.assertEqual(rec2.x_sel, 'bar')
self.assertEqual(rec3.x_sel, 'baz')
# update selection value 'bar'
field.selection_ids[0].value = 'quux'
x_sel = Model._fields['x_sel']
self.assertEqual(x_sel.type, 'selection')
self.assertEqual(x_sel.selection, [('quux', 'Bar'), ('baz', 'Baz')])
self.assertEqual(rec1.x_sel, False)
self.assertEqual(rec2.x_sel, 'quux')
self.assertEqual(rec3.x_sel, 'baz')
|
import cv2
import time
cap = cv2.VideoCapture('C:/Users/Administrator/Documents/GOMCam/parking.mp4')
cap.set(3, 800)
cap.set(4, 448)
if not cap.read():
print("none")
if cap.read():
count_2 = 0
while True:
time.sleep(0.3)
count_2 +=1
ret, img = cap.read()
if img is None:
print("image is none")
else:
img5 = cv2.GaussianBlur(img, (5, 5), 0)
gray = cv2.cvtColor(img5, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 52, 104, apertureSize=3)
image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
count = 0
for cnt in contours:
(x, y, w, h) = cv2.boundingRect(cnt)
le = max(w,h)+10
area = cv2.contourArea(cnt) # Contour Line면적
hull = cv2.convexHull(cnt) # Convex hull line
hull_area = cv2.contourArea(hull) # Convex hull 면적
if hull_area > 0:
solidity = int (100*(area) / hull_area)
if solidity>0 and w>42 and h>42:
x_1 = int (x+(w-le)/2)
x_2 = int (x+(w+le)/2)
y_1 = int (y+(h-le)/2)
y_2 = int (y+(h+le)/2)
if x_1 >300 and 290 >y_2 and 185> y_1 > 80:
cv2.rectangle(img, (x_1, y_1), (x_2, y_2), (255, 0, 0), 4)
img_trim = img[y_1: y_2, x_1:x_2]
img_trim = cv2.resize(img_trim, (32,32))
name = "./images/" + str(solidity) +"_"+str(y_1)+"_"+str(y_2)+"_"+str(count) + "_" + str(count_2) + "_.png"
cv2.imwrite(name,img_trim)
count += 1
if cv2.waitKey(0) & 0xFF == ord(' '):
continue
cv2.imshow('img', img)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cap.release()
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print("fail")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.