content
stringlengths 5
1.05M
|
|---|
import numpy as np
import pandas as pd
from sklearn.metrics.regression import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.model_selection import KFold
from mlens.visualization import corrmat
from utils.io_utils import save_csv, pickle_file, FITTED_MODEL_PATH, REPORT_PATH, REPORT_FORMAT, \
tag_filename_with_datetime, DUMP_FORMAT
def log_rmse_scorer(y_true, y_pred, **kwargs):
return np.sqrt(mean_squared_error(np.log(y_true), np.log(y_pred), **kwargs))
def difference_loss(y_true, y_pred):
return np.subtract(y_true, y_pred)
LOG_RMSE_SCORER = make_scorer(log_rmse_scorer, greater_is_better=False)
def check_models_correlation(models, x_train, y_train):
models_losses = pd.DataFrame()
for m in models:
models_losses[m.steps[-1][0]] = cv_loss(m, x_train, y_train)
corrmat(models_losses.corr(method='pearson'))
def cv_loss(model, x_train, y_train, loss=difference_loss):
cv_gen = KFold()
losses = np.empty(shape=[len(x_train)])
for train_idx, val_idx in cv_gen.split(x_train):
model.fit(x_train.iloc[train_idx], y_train[train_idx])
losses[val_idx] = (loss(y_train[val_idx], model.predict(x_train.iloc[val_idx])))
return losses
def store_cv_results(cv_results, basic_filename, additional_info=''):
PARAM_NAMES = ['params', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'split3_train_score',
'split4_train_score', 'mean_train_score', 'std_train_score']
cv_results = {i: cv_results[i] for i in cv_results if i not in PARAM_NAMES}
results_report = pd.DataFrame.from_dict(cv_results)
filename = tag_filename_with_datetime(basic_filename, REPORT_FORMAT, additional_info)
save_csv(results_report, filename, REPORT_PATH)
def store_fitted_model(fitted_model, additional_info=''):
basic_filename = type(fitted_model).__name__
filename = tag_filename_with_datetime(basic_filename, DUMP_FORMAT, additional_info)
pickle_file(fitted_model, filename, FITTED_MODEL_PATH)
|
class Solution(object):
def XXX(self, root):
self.res = True
def search_depth(node):
if not node:
return 0
left_depth = search_depth(node.left) + 1
right_depth = search_depth(node.right) + 1
ans = abs(left_depth - right_depth)
if ans > 1:
self.res = False
return 0
return max(left_depth, right_depth)
search_depth(root)
return self.res
|
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
import typing
from pydantic import Field
from ..base_object import BaseObject
class SetChatDescription(BaseObject):
"""
Changes information about a chat. Available for basic groups, supergroups, and channels. Requires can_change_info administrator right
:param chat_id: Identifier of the chat
:type chat_id: :class:`int`
:param param_description: New chat description; 0-255 characters, defaults to None
:type param_description: :class:`str`, optional
"""
ID: str = Field("setChatDescription", alias="@type")
chat_id: int
param_description: typing.Optional[str] = Field(None, max_length=255)
@staticmethod
def read(q: dict) -> SetChatDescription:
return SetChatDescription.construct(**q)
|
# Write a Python program to get the volume of a sphere with radius input from user.
import math
r = int(input("Enter a radius: "))
constant = 4 / 3
ans = constant * math.pi * math.pow(r, 3)
print(ans)
|
__prog__ = 's3pypi'
__version__ = u'0.7.1'
|
from collections import OrderedDict
import json
from autobahn.twisted.websocket import WebSocketClientFactory
from delorean import Delorean
import treq
from twisted.internet import defer, reactor
from twisted.python import log
import gryphon.data_service.consts as consts
import gryphon.data_service.util as util
from gryphon.data_service.websocket_client import EmeraldWebSocketClientProtocol
from gryphon.data_service.pollers.orderbook.websocket.websocket_orderbook_poller import WebsocketOrderbookPoller
from gryphon.lib.logger import get_logger
logger = get_logger(__name__)
class BitstampOrderbookWebsocket(EmeraldWebSocketClientProtocol, WebsocketOrderbookPoller):
def __init__(self):
self.exchange_name = u'BITSTAMP_BTC_USD'
def connect_to_websocket(self):
logger.info('Connecting to websocket')
factory = WebSocketClientFactory(
'ws://ws.pusherapp.com:80/app/de504dc5763aeef9ff52?protocol=6',
debug=False,
)
# This actually creates a new Instance of BitstampOrderbook.
factory.protocol = type(self)
reactor.connectTCP("ws.pusherapp.com", 80, factory)
def subscribe_to_websocket(self):
logger.info('Subscribing to websocket')
data = {
'channel': 'diff_order_book',
}
event = {
'event': 'pusher:subscribe',
'data': data,
}
self.sendMessage(json.dumps(event))
# REQUEST POLLER FUNCTIONS
def get_response(self, response):
"""
Reimplemented this and handle_response_error from the RequestPoller base class
because we need to be able to recover from errors that occur in the setup
process for a websocket poller. Previously an error in the initial REST call
would prevent the poller from initializing properly.
"""
# Parse the Response and check for errors
d = treq.content(response)
# We want to add parse_float=Decimal, but it currently breaks json writing
# in later code paths
d.addCallback(json.loads)
d.addCallback(self.parse_response)
d.addErrback(self.handle_response_error, response)
return d
def handle_response_error(self, error_text, response, *args, **kwargs):
d = treq.text_content(response).addCallback(
self.log_response,
u'Error in Response from URL:%s %s' % (self.url, error_text),
log.err,
)
d.addCallback(self.retry_request)
def retry_request(self, *args):
log.msg('Retrying initial orderbook call - Looking for Orderbook Timestamp (%s) to be greater than First Change Timestamp (%s)' % (self.orderbook_timestamp, self.first_change_timestamp))
reactor.callLater(0.5, self.get_request)
def parse_response(self, resp_obj):
"""
This function will collect the baseline orderbook from the http endpoint.
{"timestamp": "1412095328", "bids": [["382.74", "4.85241530"],
"""
self.orderbook_timestamp = int(resp_obj['timestamp'])
if (self.orderbook_timestamp > self.first_change_timestamp
and self.message_count > self.message_count_buffer):
self.orderbook = {
'bids': self.parse_orders(resp_obj['bids']),
'asks': self.parse_orders(resp_obj['asks']),
}
else:
# Get the orderbook again since it was too old.
self.retry_request()
# WEBSOCKET CLIENT FUNCTIONS
@defer.inlineCallbacks
def onOpen(self):
logger.info('Connected to websocket')
self.url = 'https://priv-api.bitstamp.net/api/order_book/'
self.redis = yield util.setup_redis()
binding_key = '%s.orderbook.tinker' % self.exchange_name.lower()
self.producer = yield util.setup_producer(consts.ORDERBOOK_QUEUE, binding_key)
# Reset everything
self.orderbook = ''
self.orderbook_timestamp = None
self.orderbook_change_backlog = {}
self.first_change_timestamp = None
self.message_count = 0
self.message_count_buffer = 3
self.last_amqp_push = 0
yield self.redis.set(self.orderbook_key, None)
# Start fetching the base orderbook from self.url the request poller will call
# parse_response with the response.
self.get_request()
self.subscribe_to_websocket()
@defer.inlineCallbacks
def onMessage(self, payload, isBinary):
payload = json.loads(payload)
if payload['event'] == u'data':
should_continue = yield self.check_should_continue()
if not should_continue:
yield self.redis.set(self.orderbook_key, None)
return
self.message_count += 1
orderbook_change = json.loads(payload['data'])
orderbook_change['bids'] = [
['{0:.2f}'.format(float(b[0])), b[1]] for b in orderbook_change['bids']
]
orderbook_change['asks'] = [
['{0:.2f}'.format(float(b[0])), b[1]] for b in orderbook_change['asks']
]
current_timestamp = Delorean().epoch
if not self.first_change_timestamp:
self.first_change_timestamp = current_timestamp
if self.orderbook and not self.orderbook_change_backlog:
# NO Backlog of changes, apply the change.
self.apply_change_to_orderbook(orderbook_change)
self.publish_orderbook()
return
elif self.orderbook and self.orderbook_change_backlog:
log.msg('Working down the backlog')
# Adding current update to backlog.
self.orderbook_change_backlog[current_timestamp] = orderbook_change
# Working down the backlog.
orderbook_backlog_timestamps = sorted(
self.orderbook_change_backlog.keys(),
)
for ts in orderbook_backlog_timestamps:
if ts > self.orderbook_timestamp:
log.msg('Applying Change from TS:%s to Orderbook TS:%s' % (
ts,
self.orderbook_timestamp,
))
self.apply_change_to_orderbook(
self.orderbook_change_backlog.pop(ts),
)
else:
# This update is too old. get rid of it.
self.orderbook_change_backlog.pop(ts)
self.publish_orderbook()
return
else:
log.msg('Bitstamp Orderbook Not Ready. Orderbook TS:%s must be > %s' % (
self.orderbook_timestamp,
self.first_change_timestamp,
))
current_timestamp = Delorean().epoch
self.orderbook_change_backlog[current_timestamp] = orderbook_change
return
else:
log.msg('Payload: %s' % payload)
# HELPER FUNCTIONS
def apply_change_to_orderbook(self, change):
bids_changes = self.parse_orders(change['bids'])
asks_changes = self.parse_orders(change['asks'])
# Remove the 0 volumes from the orderbook.
self.orderbook['bids'].update(bids_changes)
for k, v in self.orderbook['bids'].iteritems():
if v == "0":
self.orderbook['bids'].pop(k)
# Re-sort the bids.
self.orderbook['bids'] = OrderedDict(sorted(
self.orderbook['bids'].iteritems(),
key=lambda (k, v): float(k),
reverse=True,
))
self.orderbook['asks'].update(asks_changes)
for k, v in self.orderbook['asks'].iteritems():
if v == "0":
self.orderbook['asks'].pop(k)
# Re-sort the asks.
self.orderbook['asks'] = OrderedDict(
sorted(self.orderbook['asks'].iteritems(), key=lambda (k, v): float(k)),
)
def parse_orders(self, orders):
"""Returns a price keyed orders"""
orders_dict = OrderedDict()
for order in orders:
orders_dict[order[0]] = order[1]
return orders_dict
def get_orderbook_to_publish(self):
"""Returns a string keyed orderbook from price keyed"""
price_key_orderbook = self.orderbook
return {
'bids': [[k, v, ''] for k, v in price_key_orderbook['bids'].iteritems()],
'asks': [[k, v, ''] for k, v in price_key_orderbook['asks'].iteritems()],
}
|
''' This is the script to make figure 1. of the paper
This script is a a good exampel of how to implement 1-loop calculations.
See line 24 (or around line 24 ) for the call to FAST-PT
J. E. McEwen
email: jmcewen314@gmail.com
'''
import numpy as np
#from matter_power_spt import one_loop
import FASTPT
from time import time
# load the input power spectrum data
d=np.loadtxt('Pk_test.dat')
k=d[:,0]
P=d[:,1]
# use if you want to interpolate data
#from scipy.interpolate import interp1d
#power=interp1d(k,P)
#k=np.logspace(np.log10(k[0]),np.log10(k[-1]),3000)
#P=power(k)
#print d[:,0]-k
P_window=np.array([.2,.2])
C_window=.65
nu=-2; n_pad=1000
# initialize the FASTPT class
fastpt=FASTPT.FASTPT(k,nu,low_extrap=-5,high_extrap=5,n_pad=n_pad,verbose=True)
t1=time()
P_spt=fastpt.one_loop(P,C_window=C_window)
t2=time()
print('time'), t2-t1
print('To make a one-loop power spectrum for ', k.size, ' grid points, using FAST-PT takes ', t2-t1, 'seconds.')
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
fig=plt.figure(figsize=(16,10))
x1=10**(-2.5)
x2=10
ax1=fig.add_subplot(211)
ax1.set_ylim(1e-2,1e3)
ax1.set_xlim(x1,x2)
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_ylabel(r'$P_{22}(k)+ P_{13}(k)$ [Mpc/$h$]$^3$', size=30)
ax1.tick_params(axis='both', which='major', labelsize=30)
ax1.tick_params(axis='both', width=2, length=10)
ax1.tick_params(axis='both', which='minor', width=1, length=5)
ax1.xaxis.set_major_formatter(FormatStrFormatter('%2.2f'))
ax1.xaxis.labelpad = 20
ax1.set_xticklabels([])
ax1.plot(k,P_spt, lw=2,color='black', label=r'$P_{22}(k) + P_{13}(k)$, FAST-PT ' )
ax1.plot(k,-P_spt, '--',lw=2, color='black', alpha=.5 )
plt.grid()
ax2=fig.add_subplot(212)
ax2.set_xscale('log')
ax2.set_xlabel(r'$k$ [$h$/Mpc]', size=30)
ax2.set_ylim(.99,1.01)
ax2.set_xlim(x1,x2)
ax2.tick_params(axis='both', which='major', labelsize=30)
ax2.tick_params(axis='both', width=2, length=10)
ax2.tick_params(axis='both', which='minor', width=1, length=5)
ax2.xaxis.set_major_formatter(FormatStrFormatter('%2.2f'))
ax2.xaxis.labelpad = 20
ax2.plot(d[:,0],P_spt/(d[:,2]+d[:,3]),lw=2, color='black', alpha=.5, label='ratio to conventional method')
plt.legend(loc=3,fontsize=30)
plt.grid()
plt.tight_layout()
plt.show()
fig.savefig('example_plot.pdf')
|
#!/usr/bin/env python3
#https://codeforces.com/problemset/problem/1312/A
divok = lambda l: l[0]%l[1]==0
[print('YES' if divok(list(map(int,input().split()))) else 'NO') for _ in range(int(input()))]
|
# -*- coding: utf-8 -*-
"""
Contains the Email class that handles generation of the email message
per configuration defined in the Coordinator class.
"""
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email import encoders
import os
from sys import getsizeof
class PayloadGenerator(object):
"""
This class is responsible for generating random payloads of different
types and sizes.
"""
def __init__(self, coordinator):
"""
Instantiate the PayloadGenerator object and link it to a Coordinator.
"""
self.coordinator = coordinator
self._lorem_ipsum = ''
def _load_lorem(self):
"""Check to see if we've loaded the lorem ipsum text, and if not,
load it."""
if self._lorem_ipsum != '':
return
with open('lorem.txt', 'r') as lorem:
lines = lorem.readlines()
for line in lines:
self._lorem_ipsum += line.strip()
def get_random_text(self, bytecount):
"""Return a chunk of text with a specified byte count."""
out = ""
i = 0
self._load_lorem()
while getsizeof(out) < bytecount:
if i >= len(self._lorem_ipsum):
i = 0
out += self._lorem_ipsum[i]
i += 1
return out
class Email(object):
"""
This class is responsible for constructing a MIMEMultipart message
given details defined in the Coordinator class and the Header class.
It is able to output the final email message as a string.
"""
def __init__(self, coordinator, headers):
"""Instantiate the Email object given the Coordinator and headers."""
self.coordinator = coordinator
self.headers = headers
self.mimemulti = MIMEMultipart()
def add_text(self, text):
"""Attach a chunk of text to the message."""
mimetext = MIMEText(text)
self.mimemulti.attach(mimetext)
def add_header(self, header, value, **options):
"""Add a header to the message header section."""
self.mimemulti.add_header(header, value, **options)
def add_attachment(self, filename):
"""Add a file attachment."""
# I'm absolutely sure I stole this code off stackoverflow somewhere
# about 2 years ago, but I have absolutely no idea where.
# Credit to StackOverflow for this method.
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(filename, 'rb').read())
encoders.encode_base64(part) # modifies in-place. magic.
filepath = os.path.basename(filename)
part.add_header('Content-Disposition',
'attachment; filename="{}"'.format(filepath))
self.mimemulti.attach(part)
def pull_data_from_coordinator(self):
"""Pull in the data from the coordinator."""
self.add_text(self.coordinator.contents['text'])
for attach in self.coordinator.contents['attach'].split(','):
if not attach:
continue
attach = attach.strip()
self.add_attachment(attach)
self.headers.dump_headers_to_email()
# subject is technically a header in MIME...
self.add_header('subject', self.coordinator.contents['subject'])
def getmime(self):
"""Returns the MIMEMultipart object."""
return self.mimemulti
def as_string(self):
"""Returns the stored email message as a string."""
return self.mimemulti.as_string()
|
import sys
import re
from pathlib import Path
p = re.compile('^[ぁ-ゟ]+')
path = Path(sys.argv[1])
name = path.name
print(name)
kpath = path.parent / f'K{name}'
hpath = path.parent / f'H{name}'
with path.open() as f:
with open(str(kpath), 'w') as kf:
with open(str(hpath), 'w') as hf:
for line in f:
if p.match(line):
hf.write(line)
else:
kf.write(line)
|
"""
Collection of PyTorch activation functions, wrapped to fit Ivy syntax and signature.
"""
from typing import Optional
# global
import numpy as np
import torch
# local
import ivy
def relu(x: torch.Tensor,
out: Optional[torch.Tensor] = None)\
-> torch.Tensor:
ret = torch.relu(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def leaky_relu(x: torch.Tensor, alpha: Optional[float] = 0.2)\
-> torch.Tensor:
return torch.nn.functional.leaky_relu(x, alpha)
def gelu(x, approximate: bool = True):
if approximate:
return 0.5 * x * (1 + torch.tanh(((2 / np.pi) ** 0.5) * (x + 0.044715 * x ** 3)))
return torch.nn.functional.gelu(x)
def tanh(x: torch.Tensor)\
-> torch.Tensor:
return torch.tanh(x)
def sigmoid(x):
return torch.sigmoid(x)
def softmax(x, axis: int = -1):
return torch.softmax(x, axis)
def softplus(x: torch.Tensor)\
-> torch.Tensor:
return torch.nn.functional.softplus(x)
|
"""
URL configuration for django-cloudprojects.
"""
from django.conf import settings
from django.urls import include, path
urlpatterns = []
if 'django_saml' in settings.INSTALLED_APPS:
urlpatterns = [
path('saml/', include('django_saml.urls')),
]
urlpatterns += [
path('', include('allauth.urls')),
]
|
import pandas as pd
from constants import PARTS_DIR, DAY
from featnames import LOOKUP, START_TIME, END_TIME, START_PRICE, DEC_PRICE, \
ACC_PRICE, START_DATE
from processing.util import get_lstgs
from utils import topickle, input_partition, load_feats
def create_lookup(lstgs=None):
# load data
listings = load_feats('listings', lstgs=lstgs)
# start time instead of start date
start_time = listings[START_DATE].astype('int64') * DAY
start_time = start_time.rename(START_TIME)
# subset features
lookup = listings[[START_PRICE, DEC_PRICE, ACC_PRICE]]
lookup = pd.concat([lookup, start_time, listings[END_TIME]], axis=1)
return lookup
def main():
part = input_partition()
print('{}/{}'.format(part, LOOKUP))
lookup = create_lookup(lstgs=get_lstgs(part))
topickle(lookup, PARTS_DIR + '{}/{}.pkl'.format(part, LOOKUP))
if __name__ == "__main__":
main()
|
import torch
from torch import nn as nn, Tensor
import os
import pandas as pd
import numpy as np
class PartialSelectiveLoss(nn.Module):
def __init__(self, args):
super(PartialSelectiveLoss, self).__init__()
self.args = args
self.clip = args.clip
self.gamma_pos = args.gamma_pos
self.gamma_neg = args.gamma_neg
self.gamma_unann = args.gamma_unann
self.alpha_pos = args.alpha_pos
self.alpha_neg = args.alpha_neg
self.alpha_unann = args.alpha_unann
self.targets_weights = None
if args.prior_path is not None:
print("Prior file was found in given path.")
df = pd.read_csv(args.prior_path)
self.prior_classes = dict(zip(df.values[:, 0], df.values[:, 1]))
print("Prior file was loaded successfully. ")
def forward(self, logits, targets):
# Positive, Negative and Un-annotated indexes
targets_pos = (targets == 1).float()
targets_neg = (targets == 0).float()
targets_unann = (targets == -1).float()
# Activation
xs_pos = torch.sigmoid(logits)
xs_neg = 1.0 - xs_pos
if self.clip is not None and self.clip > 0:
xs_neg.add_(self.clip).clamp_(max=1)
prior_classes = None
if hasattr(self, "prior_classes"):
prior_classes = torch.tensor(list(self.prior_classes.values())).cuda()
targets_weights = self.targets_weights
targets_weights, xs_neg = edit_targets_parital_labels(self.args, targets, targets_weights, xs_neg,
prior_classes=prior_classes)
# Loss calculation
BCE_pos = self.alpha_pos * targets_pos * torch.log(torch.clamp(xs_pos, min=1e-8))
BCE_neg = self.alpha_neg * targets_neg * torch.log(torch.clamp(xs_neg, min=1e-8))
BCE_unann = self.alpha_unann * targets_unann * torch.log(torch.clamp(xs_neg, min=1e-8))
BCE_loss = BCE_pos + BCE_neg + BCE_unann
# Adding asymmetric gamma weights
with torch.no_grad():
asymmetric_w = torch.pow(1 - xs_pos * targets_pos - xs_neg * (targets_neg + targets_unann),
self.gamma_pos * targets_pos + self.gamma_neg * targets_neg +
self.gamma_unann * targets_unann)
BCE_loss *= asymmetric_w
# partial labels weights
BCE_loss *= targets_weights
return -BCE_loss.sum()
def edit_targets_parital_labels(args, targets, targets_weights, xs_neg, prior_classes=None):
# targets_weights is and internal state of AsymmetricLoss class. we don't want to re-allocate it every batch
if args.partial_loss_mode is None:
targets_weights = 1.0
elif args.partial_loss_mode == 'negative':
# set all unsure targets as negative
targets_weights = 1.0
elif args.partial_loss_mode == 'ignore':
# remove all unsure targets (targets_weights=0)
targets_weights = torch.ones(targets.shape, device=torch.device('cuda'))
targets_weights[targets == -1] = 0
elif args.partial_loss_mode == 'ignore_normalize_classes':
# remove all unsure targets and normalize by Durand et al. https://arxiv.org/pdf/1902.09720.pdfs
alpha_norm, beta_norm = 1, 1
targets_weights = torch.ones(targets.shape, device=torch.device('cuda'))
n_annotated = 1 + torch.sum(targets != -1, axis=1) # Add 1 to avoid dividing by zero
g_norm = alpha_norm * (1 / n_annotated) + beta_norm
n_classes = targets_weights.shape[1]
targets_weights *= g_norm.repeat([n_classes, 1]).T
targets_weights[targets == -1] = 0
elif args.partial_loss_mode == 'selective':
if targets_weights is None or targets_weights.shape != targets.shape:
targets_weights = torch.ones(targets.shape, device=torch.device('cuda'))
else:
targets_weights[:] = 1.0
num_top_k = args.likelihood_topk * targets_weights.shape[0]
xs_neg_prob = xs_neg
if prior_classes is not None:
if args.prior_threshold:
idx_ignore = torch.where(prior_classes > args.prior_threshold)[0]
targets_weights[:, idx_ignore] = 0
targets_weights += (targets != -1).float()
targets_weights = targets_weights.bool()
negative_backprop_fun_jit(targets, xs_neg_prob, targets_weights, num_top_k)
# set all unsure targets as negative
# targets[targets == -1] = 0
return targets_weights, xs_neg
# @torch.jit.script
def negative_backprop_fun_jit(targets: Tensor, xs_neg_prob: Tensor, targets_weights: Tensor, num_top_k: int):
with torch.no_grad():
targets_flatten = targets.flatten()
cond_flatten = torch.where(targets_flatten == -1)[0]
targets_weights_flatten = targets_weights.flatten()
xs_neg_prob_flatten = xs_neg_prob.flatten()
ind_class_sort = torch.argsort(xs_neg_prob_flatten[cond_flatten])
targets_weights_flatten[
cond_flatten[ind_class_sort[:num_top_k]]] = 0
class ComputePrior:
def __init__(self, classes):
self.classes = classes
n_classes = len(self.classes)
self.sum_pred_train = torch.zeros(n_classes).cuda()
self.sum_pred_val = torch.zeros(n_classes).cuda()
self.cnt_samples_train, self.cnt_samples_val = .0, .0
self.avg_pred_train, self.avg_pred_val = None, None
self.path_dest = "./outputs"
self.path_local = "/class_prior/"
def update(self, logits, training=True):
with torch.no_grad():
preds = torch.sigmoid(logits).detach()
if training:
self.sum_pred_train += torch.sum(preds, axis=0)
self.cnt_samples_train += preds.shape[0]
self.avg_pred_train = self.sum_pred_train / self.cnt_samples_train
else:
self.sum_pred_val += torch.sum(preds, axis=0)
self.cnt_samples_val += preds.shape[0]
self.avg_pred_val = self.sum_pred_val / self.cnt_samples_val
def save_prior(self):
print('Prior (train), first 5 classes: {}'.format(self.avg_pred_train[:5]))
# Save data frames as csv files
if not os.path.exists(self.path_dest):
os.makedirs(self.path_dest)
df_train = pd.DataFrame({"Classes": list(self.classes.values()),
"avg_pred": self.avg_pred_train.cpu()})
df_train.to_csv(path_or_buf=os.path.join(self.path_dest, "train_avg_preds.csv"),
sep=',', header=True, index=False, encoding='utf-8')
if self.avg_pred_val is not None:
df_val = pd.DataFrame({"Classes": list(self.classes.values()),
"avg_pred": self.avg_pred_val.cpu()})
df_val.to_csv(path_or_buf=os.path.join(self.path_dest, "val_avg_preds.csv"),
sep=',', header=True, index=False, encoding='utf-8')
def get_top_freq_classes(self):
n_top = 10
top_idx = torch.argsort(-self.avg_pred_train.cpu())[:n_top]
top_classes = np.array(list(self.classes.values()))[top_idx]
print('Prior (train), first {} classes: {}'.format(n_top, top_classes))
|
#!/usr/bin/env python3
import sys
import cv2
import rospy
import base64
import random
import numpy as np
sys.path.append(r'./database')
from database import *
from rosproject.srv import Img
from obj_detect import object_detection
def object_detect_server():
rospy.init_node('object_detect_server', anonymous = True)
rospy.Service('object_detect', Img, detect)
rospy.spin()
def detect(request):
image_b64 = request.input
# generate file name for image
file = generateImageName()
file += '.jpeg'
print(file)
# origin, taged and result path
origin_image_path = 'database/origins/' + file
taged_image_path = 'database/tagged/' + file
result_image_path = 'database/results/' + file
print(len(result_image_path))
# origin image transfrom format from base64 to jpeg
imgdata = base64.b64decode(image_b64)
img_np = np.fromstring(imgdata, np.uint8)
img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
# save jpeg image
cv2.imwrite(origin_image_path, img)
cv2.imwrite(taged_image_path, img)
print('Cloud server has successfully received image')
# object detection
image_b64 = object_detection(origin_image_path)
print('Cloud server has completed object detection')
# result image transform format from base64 to jpeg
imgdata = base64.b64decode(image_b64)
img_np = np.fromstring(imgdata, np.uint8)
img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
cv2.imwrite(result_image_path, img)
# insert database table origins, taged and results
# connect to the database
mydb = database()
mydb.createTable()
mydb.insert("origins", "'" + origin_image_path + "'")
mydb.insert("tagged", "'" + taged_image_path + "', FALSE")
mydb.insert("results", "'" + result_image_path + "'")
print('Cloud server has inserted records into database!')
return image_b64
def generateImageName():
"""
generate a random string of length 32 for image name
"""
name = ''
nameLength = 32
chars = 'ABCDEFGHIGKLMNOPQRSTUVWXYZabcdefghigklmnopqrstuvwxyz0123456789'
length = len(chars) - 1
for i in range(nameLength):
name += chars[random.randint(0, length)]
return name
if __name__ == '__main__':
object_detect_server()
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
import find_newest_files
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import os
import sys
class Window(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
# -- inputs --------------------
vbox = QVBoxLayout() # vbox.setContentsMargins(2,2,2,2)
inpframe = QFrame()
inplayout = QGridLayout()
inplayout.setContentsMargins(0, 0, 0, 0)
inpframe.setLayout(inplayout)
folder = QLabel('Folder')
self.folderEdit = QLineEdit()
self.folderEdit.setText(os.getcwd())
iconfile = os.path.join(os.path.dirname(
__file__), '..', '..', 'ico', 'boomy-foldersearch.png')
folderBtn = QPushButton()
folderBtn.setIcon(QIcon(iconfile))
folderBtn.pressed.connect(self.chDir)
inplayout.addWidget(folder, 1, 0)
inplayout.addWidget(self.folderEdit, 1, 1)
inplayout.addWidget(folderBtn, 1, 2)
vbox.addWidget(inpframe)
# ------- console ----------------
self.stdout, sys.stdout = sys.stdout, self
# self.stderr, sys.stderr = sys.stderr, self # enlever cette ligne si prb
self.buf = ''
self.console = QTextEdit()
font = QFontDatabase.systemFont(QFontDatabase.FixedFont)
self.console.setFont(font)
self.console.setLineWrapMode(QTextEdit.NoWrap)
self.console.setReadOnly(True)
vbox.addWidget(self.console)
# -- GO btn -----------------------
butframe = QFrame()
butlayout = QHBoxLayout()
butlayout.setContentsMargins(0, 0, 0, 0)
butframe.setLayout(butlayout)
butlayout.addStretch(1)
button = QPushButton(self.tr("Go!"))
butlayout.addWidget(button)
button.pressed.connect(self.process)
iconfile = os.path.join(os.path.dirname(
__file__), '..', '..', 'ico', 'boomy-play.png')
button.setIcon(QIcon(iconfile))
vbox.addWidget(butframe)
#
self.setLayout(vbox)
self.resize(600, 600)
def process(self):
self.console.clear()
try:
find_newest_files.newest((self.folderEdit.text(),))
except Exception as e:
print(e)
QMessageBox.information(self, 'Error', str(e))
def chDir(self):
dir = QFileDialog.getExistingDirectory(self, "Choose root folder")
if dir:
# opt.val = dir.replace('/',os.sep)
self.folderEdit.setText(dir)
def write(self, stuff):
#print >> self.stdout, "[write]: %s" % repr(stuff)
if '\n' in stuff:
list(map(self.writeLine, stuff.split("\n")))
else:
self.buf += stuff
qApp.processEvents()
def writeLine(self, stuff):
if len(self.buf):
stuff = self.buf + stuff
self.buf = ''
self.console.append(stuff)
else:
if stuff != '':
self.console.append(stuff)
#print >> self.stdout, "[writeLine]: %s" % repr(stuff)
def main():
app = QApplication(sys.argv)
win = Window()
win.setWindowTitle('find_newest_file')
iconfile = os.path.join(os.path.dirname(__file__),
'..', '..', 'ico', 'boomy-stats.png')
win.setWindowIcon(QIcon(iconfile))
win.show()
app.lastWindowClosed.connect(app.quit)
print("ready.")
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
import discord
from helpers import get_gif
commands = ["drinkcoffee"]
requires_mention = False
accepts_mention = False
description = "KAFFEE!!! :coffee:"
async def execute(message):
gif = get_gif('coffee', lmt=25, pos=0)
embed = discord.Embed()
embed.description = '<:metalcoffee:707941148777512966>'
embed.set_image(url=gif)
await message.channel.send(embed=embed)
|
from PIL import Image, ImageOps
def normalize_image(img):
"""Normalize image between 0 and 255.
Args:
img (PIL image): An image.
Returns:
PIL image: A normalized image.
Examples:
>>> img = Image.open('share/Lenna.png')
>>> vals = img.getextrema() #gets min and max value in the three channels
>>> vals
((54, 255), (3, 248), (8, 225))
>>> img_norm = normalize_image(img)
>>> vals = img_norm.getextrema()
>>> vals
((0, 255), (0, 255), (0, 254))
"""
return ImageOps.autocontrast(img)
def resize_image(img, new_width, new_height):
"""Resize image to a ``new_width`` and ``new_height``.
Args:
img (PIL image): An image.
new_width (int): New width.
new_height (int): New height.
Returns:
PIL image: A resized image.
Examples:
>>> img = Image.open('share/Lenna.png')
>>> img_resized = resize_image(img, 256, 256)
>>> img_resized.size
(256, 256)
"""
img_new = img.resize((new_width, new_height))
return img_new
def equalize_image(img):
"""Equalize the image histogram.
Args:
img (PIL image): An image.
Returns:
PIL image: A equalized image.
Examples:
>>> img = Image.open('share/Lenna.png')
>>> img_eq = equalize_image(img)
"""
return ImageOps.equalize(img)
def crop_image(img, box):
"""Crop a rectangular region from an image.
Args:
img (PIL image): An image.
box (tuple): Left, upper, right, and lower pixel coordinate. The origin of coordinates is
the upper left square.
Returns:
PIL image: A cropped image.
Examples:
>>> img = Image.open('share/Lenna.png')
>>> box = (0, 100, 250, 400)
>>> img_crop = crop_image(img, box)
>>> img_crop.size
(250, 300)
"""
return img.crop(box)
def convert_to_grayscale(img):
"""Convert a color image to grayscale.
Args:
img (PIL image): An image.
Returns:
PIL image: A grayscale image.
Examples:
>>> img = Image.open('share/Lenna.png')
>>> img.mode
'RGB'
>>> img_gray = convert_to_grayscale(img)
>>> img_gray.mode
'L'
"""
return img.convert("L")
|
CORS_ALLOW_CREDENTIALS = True
CORS_ALLOW_ALL_ORIGINS = True
|
from alpyro_msgs import RosMessage
from alpyro_msgs.actionlib_msgs.goalid import GoalID
from alpyro_msgs.nav_msgs.getmapgoal import GetMapGoal
from alpyro_msgs.std_msgs.header import Header
class GetMapActionGoal(RosMessage):
__msg_typ__ = "nav_msgs/GetMapActionGoal"
__msg_def__ = "c3RkX21zZ3MvSGVhZGVyIGhlYWRlcgogIHVpbnQzMiBzZXEKICB0aW1lIHN0YW1wCiAgc3RyaW5nIGZyYW1lX2lkCmFjdGlvbmxpYl9tc2dzL0dvYWxJRCBnb2FsX2lkCiAgdGltZSBzdGFtcAogIHN0cmluZyBpZApuYXZfbXNncy9HZXRNYXBHb2FsIGdvYWwKCg=="
__md5_sum__ = "4b30be6cd12b9e72826df56b481f40e0"
header: Header
goal_id: GoalID
goal: GetMapGoal
|
"""frontend.main"""
import json
from flask import Flask, request, render_template
from concurrent.futures import ThreadPoolExecutor
from agent_connector import Agent
tools = [
'exeinfope',
'pestudio'
]
app = Flask('hhfrontend')
executor = ThreadPoolExecutor(2)
agent_controllers = dict()
with open('agents.json', 'r') as f:
agents = json.loads(f.read())
@app.route('/')
def index():
return render_template('index.html', agents=agents)
@app.route('/agent/<agent_name>')
def agent(agent_name):
# TODO: handle bad agent name request
agent_data = agents[agent_name]
agent_controllers[agent_name] = Agent(agent_name, agent_data['ip'], agent_data['port'])
executor.submit(agent_controllers[agent_name].start)
return render_template('agent.html', tools=tools, agent_name=agent_name)
@app.route('/run/<agent_name>/<tool_name>')
def run(agent_name, tool_name):
agent_controllers[agent_name].tool_to_run = tool_name
print(f'Running {agent_name} {tool_name}')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000)
|
import csv,io
class CiteFile():
global csvfileinmem
csvfileinmem = None
def __init__(self):
self.filename = "C:\git\wikieupmcanalytics\citations-enwiki-20151002.csv"
self.csvfile = open(self.filename, "r", encoding='utf-8', newline='\r\n')
global csvfileinmem
if csvfileinmem == None:
csvfileinmem = self.csvfile.read()
self.csvreader = csv.reader(csvfileinmem.splitlines(), delimiter='\t')
def findPagesIDAppears(self, id ):
"""Given an ID return list of articles which cite it
:param id: string of id to search for
:return: list of articles
"""
if id[:3] == 'PMC':
id = id[3:]
articles = list()
for row in self.csvreader:
if row[5] == id and row[4] == 'pmc':
articles.append(row[1])
return articles
def findRowsWithIDType(self ,type, inlist = None):
if inlist == None: inlist = self.csvreader
outlist = list()
for row in inlist:
if row[4] == type:
outlist.append(row)
return outlist
def findRowsWithID(self, id, inlist = None):
if inlist == None: inlist = self.csvreader
outlist = list()
for row in inlist:
if row[5] == id:
outlist.append(row)
return outlist
def findRowsWithArticle(self, article, inlist = None):
if inlist == None: inlist = self.csvreader
for row in inlist:
outlist = list()
if row[2] == article:
outlist.append(row)
return outlist
|
import click
from eth_wallet.cli.utils_cli import (
get_api,
)
from eth_wallet.configuration import (
Configuration,
)
@click.command()
def list_tokens():
"""List all added tokens."""
configuration = Configuration().load_configuration()
api = get_api()
tokens = api.list_tokens(configuration)
click.echo('ETH')
for token in tokens:
click.echo('%s' % token)
|
from collections import defaultdict
def generate(c1,c2,bitlen):
y = 2**bitlen
a = c1 & ~y
b = c2 & ~y
c = c1 / 2
d = c2 / 2
return (a&~b&~c&~d) | (~a&b&~c&~d) | (~a&~b&c&~d) | (~a&~b&~c&d)
def buildMap(n, generations):
mapping = defaultdict(set)
generations = set(generations)
for i in range(2**(n+1)):
for j in range(2**(n+1)):
generation = generate(i,j,n)
if generation in generations:
mapping[(generation, i)].add(j)
return mapping
def solution(g):
transpose = list(zip(*g)) # transpose
ncols = len(transpose[0])
# turn map into numbers
generations = []
for row in transpose:
vals = []
for i,col in enumerate(row):
if col:
vals.append(2**i)
else:
vals.append(0)
generations.append(sum(vals))
mapping = buildMap(ncols, generations)
preimage = defaultdict(int)
n = 2 ** (ncols+1)
for i in range(n):
preimage[i] = 1
for row in generations:
next_row = defaultdict(int)
for c1 in preimage:
for c2 in mapping[ (row, c1) ]:
next_row[c2] += preimage[c1]
preimage = next_row
return sum(preimage.values())
print(solution([[True, False, True], [False, True, False], [True, False, True]]))
|
"""
Demo/test program for the AE_Button driver.
See https://github.com/sensemakersamsterdam/astroplant_explorer
"""
#
# (c) Sensemakersams.org and others. See https://github.com/sensemakersamsterdam/astroplant_explorer
# Author: Gijs Mos
#
# Warning: if import of ae_* modules fails, then you need to set up PYTHONPATH.
# To test start python, import sys and type sys.path. The ae module directory
# should be included.
# This program shows some features of the Button library class
# From the standard time library we only require sleep()
from time import sleep
# And we need the definitions of our pins
from ae_drivers import AE_Pin
# And we need the Button driver class
from ae_drivers.button import AE_Button
# Then we define and initialize Button 1
btn1 = AE_Button('btn1', 'Just button 1', AE_Pin.D5)
btn1.setup()
# And Button 2
btn2 = AE_Button('btn2', 'Just button 2', AE_Pin.D6)
btn2.setup()
# And 3
# For this button we reverse the action. Use if your button reports open
# as closed vv
btn3 = AE_Button('btn3', 'Just button 3', AE_Pin.D7, inverted=True)
btn3.setup()
print('Button demo.')
# Now demo some of the info features for all three buttons
print('btn1 prints as %s' % (btn1))
print(' and its long description is:', btn1.description)
print('btn2 prints as %s' % (btn2))
print(' and its long description is:', btn2.description)
print('btn3 prints as %s' % (btn3))
print(' and its long description is:', btn3.description)
print('\nType Cntl-C to exit this demo.')
print('Now press (and sometmes hold) buttons 1-3 a few times...')
try:
while True:
print('\nSleeping a while. Use your buttons.')
sleep(8)
print('Now checking yout buttons...')
# Cycle through all three of them.
# Each time through the loop btn acts as one of the buttons from the list.
# Way more elegant as the straight repetition of code a bit above where we printed
# out the button's description.
for btn in [btn1, btn2, btn3]:
print(' %s value()=%s.', (btn.name, btn.value()))
print(' %s pressed_count()=%s.',
(btn.name, btn.pressed_count()))
print(' %s last_press_duration()=%s',
(btn.name, btn.last_press_duration()))
except KeyboardInterrupt:
print('\nBye bye...')
|
# TODO : strict minimal ipython REPL launcher to use aiokraken.
# Nice TUI|GUI can be done in another project.
# !/usr/bin/env python
# Ref : https://ipython.org/ipython-doc/stable/interactive/reference.html#embedding-ipython
"""
__main__ has only the code required to start IPython and provide interactive introspection of aiokraken while running
"""
import click
from aiokraken import RestClient
from aiokraken.assets import Assets
from aiokraken.rest import Server
# TODO : we should probably have a ipython terminal option (ipywidgets without notebook ?)
# Bokeh is also a solution for a server to provide visualization,
# AND an aiohttp server option to provide json feeds (like original server, visualized with some js single page app...)
def ipshell_embed_setup():
from traitlets.config.loader import Config
try:
get_ipython
except NameError:
nested = 0
cfg = Config()
else:
print("Running nested copies of IPython.")
print("The prompts for the nested copy have been modified")
cfg = Config()
nested = 1
# First import the embeddable shell class
from IPython.terminal.embed import InteractiveShellEmbed
# Now create an instance of the embeddable shell. The first argument is a
# string with options exactly as you would type them if you were starting
# IPython at the system command line. Any parameters you want to define for
# configuration can thus be specified here.
ipshell = InteractiveShellEmbed(config=cfg,
banner1='Dropping into IPython',
banner2='To introspect: %whos',
exit_msg='Leaving Interpreter, back to program.')
# Remember the dummy mode to disable all embedded shell calls!
return ipshell
@click.group()
def cli():
pass
@cli.command()
@click.option('--verbose', default=False)
@click.pass_context
def auth(ctx, verbose):
""" simple command to verify auth credentials and optionally store them. """
from aiokraken.config import KRAKEN_API_KEYFILE, load_api_keyfile, save_api_keyfile
# tentative loading of the API key
keystruct = load_api_keyfile()
if keystruct is None:
# no keyfile found
print(f"{KRAKEN_API_KEYFILE} not found !")
# TODO : check for interactive terminal...
apikey = input("APIkey: ")
secret = input("secret: ")
store = input(f"Store it in {KRAKEN_API_KEYFILE} [Y/n] ? ")
if not store:
store = 'Y'
if store in ['Y', 'y']:
keystruct = save_api_keyfile(apikey=apikey, secret=secret)
else:
keystruct = {'key': apikey, 'secret': secret}
# modifying parent context if present (to return)
if ctx.parent:
ctx.parent.params['apikey'] = keystruct.get('key')
ctx.parent.params['secret'] = keystruct.get('secret')
if verbose:
print(f"apikey: {ctx.apikey}")
print(f"secret: {ctx.secret}")
return 0 # exit status code
def one_shot(coro):
import asyncio
loop = asyncio.get_event_loop()
loop.run_until_complete(coro)
loop.close()
async def assets_run():
rest = RestClient(server=Server())
assets = Assets(blacklist=[])
res = await assets(rest_client=rest)
for k, p in res.items():
print(f" - {k}: {p}")
@cli.command()
@click.pass_context
def assets(ctx):
""" retrieve assets """
return one_shot(assets_run())
# For OHLC timeseries display : https://stackoverflow.com/questions/48361554/unknown-error-in-mpl-finance-candlestick-what-to-do
async def markets_run():
rest = RestClient(server=Server())
mkts = Markets()
await mkts()
for k, p in mkts.items():
print(f" - {k}: {p}")
@cli.command()
@click.pass_context
def markets(ctx):
""" retrieve markets"""
return one_shot(markets_run())
async def balance_run(key,secret):
rest = RestClient(server=Server(key=key,
secret=secret))
balance = Balance(blacklist=[])
await balance(rest_client=rest)
for k, p in balance.items():
print(f" - {k}: {p}")
@cli.command()
@click.option('--apikey', default=None)
@click.option('--secret', default=None)
@click.pass_context
def balance(ctx, apikey, secret):
""" retrieve balance for an authentified user"""
if apikey is None or secret is None:
ctx.invoke(auth, verbose=False) # this should fill up arguments
apikey = ctx.params.get('apikey')
secret = ctx.params.get('secret')
# ipshell = ipshell_embed_setup()
# ipshell(f"from {__name__}")
# TODO : some meaningful and useful link between markets and assets ?
return one_shot(balance_run(key=apikey, secret=secret))
# TODO : retrieve private data
# TODO : allow passing orders
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
# we use click to run a simple command
cli()
else:
# or we go full interactive mode (no args)
# First create a config object from the traitlets library
from traitlets.config import Config
c = Config()
# Now we can set options as we would in a config file:
# c.Class.config_value = value
# For example, we can set the exec_lines option of the InteractiveShellApp
# class to run some code when the IPython REPL starts
c.InteractiveShellApp.exec_lines = [
'print("\\nimporting aiokraken...\\n")',
'import aiokraken',
"aiokraken"
]
c.InteractiveShell.colors = 'LightBG'
c.InteractiveShell.confirm_exit = False
c.TerminalIPythonApp.display_banner = False
#TODO : %autoawait to easily run requests
# Now we start ipython with our configuration
import IPython
IPython.start_ipython(config=c, )
|
import itertools as it
import numpy as np
import tensorflow as tf
def get_cifar10(batch_size=16):
print("loading cifar10 data ... ")
from skdata.cifar10.dataset import CIFAR10
cifar10 = CIFAR10()
cifar10.fetch(True)
trn_labels = []
trn_pixels = []
for i in range(1,6):
data = cifar10.unpickle("data_batch_%d" % i)
trn_pixels.append(data['data'])
trn_labels.extend(data['labels'])
trn_pixels = np.vstack(trn_pixels)
trn_pixels = trn_pixels.reshape(-1, 3, 32, 32).astype(np.float32)
tst_data = cifar10.unpickle("test_batch")
tst_labels = tst_data["labels"]
tst_pixels = tst_data["data"]
tst_pixels = tst_pixels.reshape(-1, 3, 32, 32).astype(np.float32)
print("-- trn shape = %s" % list(trn_pixels.shape))
print("-- tst shape = %s" % list(tst_pixels.shape))
# transpose to tensorflow's bhwc order assuming bchw order
trn_pixels = trn_pixels.transpose(0, 2, 3, 1)
tst_pixels = tst_pixels.transpose(0, 2, 3, 1)
trn_set = batch_iterator(it.cycle(zip(trn_pixels, trn_labels)), batch_size, cycle=True, batch_fn=lambda x: zip(*x))
tst_set = (tst_pixels, np.array(tst_labels))
return trn_set, tst_set
def batch_iterator(iterable, size, cycle=False, batch_fn=lambda x: x):
"""
Iterate over a list or iterator in batches
"""
batch = []
# loop to begining upon reaching end of iterable, if cycle flag is set
if cycle is True:
iterable = it.cycle(iterable)
for item in iterable:
batch.append(item)
if len(batch) >= size:
yield batch_fn(batch)
batch = []
if len(batch) > 0:
yield batch_fn(batch)
if __name__ == '__main__':
trn, tst = get_cifar10()
|
from suds.client import Client
from suds import WebFault
class SoapHelper:
def __init__(self, app):
self.app = app
def take_array_project(self, username, password):
client = Client(self.app.service)
try:
client.service.mc_projects_get_user_accessible(username, password)
return list(client.service.mc_projects_get_user_accessible(username, password))
except WebFault:
return False
|
#
# (c) 2008-2020 Matthew Shaw
#
import sys
import os
import re
import logging
import nelly
from .scanner import Scanner
from .program import Program
from .types import *
class Parser(object):
def __init__(self, include_dirs=[]):
self.include_dirs = include_dirs + [ os.path.join(nelly.root, 'grammars') ]
self.pwd = []
# setup the scanner based on the regular expressions
self.scanner = Scanner(os.path.join(nelly.root, 'rules.lex'))
# container for the compiled program
self.program = Program()
self.tokens_stack = []
self.groups_stack = []
self.group_stack = []
self.groups = None
self.group = None
def Parse(self, grammarFile):
grammar = grammarFile.read()
self.pwd.append(os.path.dirname(grammarFile.name))
logging.debug('Parsing %s (%d bytes)', grammarFile.name, len(grammar))
self.tokens = self.scanner.Scan(grammar)
# keep a reference to the tokens for when included files are parsed
self.tokens_stack.append(self.tokens)
# iterate over all the tokens
while self.tokens:
(token,value,line,col) = self.tokens.Next()
# handle all the top-level tokens
if 'nonterminal' == token:
if value.startswith('::'):
value = value[2:]
self._nonterminal(Types.NONTERMINAL, value)
elif 'varterminal' == token:
if value.startswith('::'):
value = value[2:]
self._nonterminal(Types.VARTERMINAL, value)
elif 'include' == token:
self._include()
elif 'start_python_code' == token:
if r'<%pre' == value:
self.program.preamble.append(self._python_code('pre'))
elif r'<%post' == value:
self.program.postscript.append(self._python_code('post'))
else:
raise nelly.error('Please specify pre or post in code section')
elif 'start_comment' == token:
self._comment()
else:
raise nelly.error('Unhandled %s %s at %d:%d', token, repr(value), line, col)
self.tokens_stack.pop()
return self.program
def _nonterminal(self, _type, name):
# create a new container and add it to the program
nonterminal = Nonterminal(_type, name)
self.program.nonterminals[name] = nonterminal
(token,value,line,col) = self.tokens.Next()
# parse any optional arguments for the non-terminal
if 'lparen' == token:
while True:
(token,value,line,col) = self.tokens.Next()
if 'rparen' == token:
break
elif 'comma' == token:
continue
elif 'option' == token:
nonterminal.options.append(value)
if value == 'start':
self.program.start.append(name)
elif 'decorator' == token:
nonterminal.decorators.append(value[1:])
else:
raise nelly.error('Unknown option: %s %s', token, value)
(token,value,line,col) = self.tokens.Next()
if 'colon' != token:
raise nelly.error('Parse error, missing colon at line %d, column %d', line, col)
# parse zero or more expressions until a semicolon is found
self._expressions('pipe', 'semicolon', nonterminal)
def _expressions(self, delimiter, sentinel, nonterminal):
(token,value,line,col) = self.tokens.Peek()
expression = Expression((line,col))
while self.tokens:
(token,value,line,col) = self.tokens.Next()
if sentinel == token:
nonterminal.expressions.append(expression)
break
elif delimiter == token:
nonterminal.expressions.append(expression)
expression = Expression((line,col))
elif 'lparen' == token:
anonterminal = Nonterminal(Types.ANONYMOUS)
expression.Statement(Types.ANONYMOUS, anonterminal)
self._expressions('pipe', 'rparen', anonterminal)
elif token in ['start_single_quote', 'start_double_quote', 'start_triple_quote']:
quote = self._quote()
expression.Statement(Types.TERMINAL, quote)
elif token in ['start_single_bytes', 'start_double_bytes', 'start_triple_bytes']:
byte_quote = self._quote()
expression.Statement(Types.TERMINAL, byte_quote)
elif 'nonterminal' == token:
expression.Statement(Types.NONTERMINAL, value)
elif 'varterminal' == token:
expression.Statement(Types.VARTERMINAL, value)
elif 'backref' == token:
expression.Statement(Types.BACKREFERENCE, value)
elif 'function' == token:
functerminal = Nonterminal(Types.ANONYMOUS)
self._expressions('comma', 'rparen', functerminal)
expression.Statement(Types.FUNCTION, value[1:], functerminal)
elif 'reference' == token:
expression.Statement(Types.REFERENCE, value[1:])
elif 'constant' == token:
expression.Statement(Types.TERMINAL, value)
elif 'start_python_code' == token:
expression.code = self._python_code(nonterminal.name)
elif 'lbracket' == token:
try:
expression.Operation(Types.SLICE, self._slice())
except IndexError:
raise nelly.error('Applying slice to nothing at line %d, column %d', line, col)
elif 'lcurley' == token:
try:
expression.Operation(Types.RANGE, self._range())
except IndexError:
raise nelly.error('Applying range to nothing at line %d, column %d', line, col)
elif 'langle' == token:
expression.Weight(self._weight())
elif 'empty' == token:
pass
else:
raise nelly.error('Unhandled token "%s" at line %d, column %d', token, line, col)
def _quote(self):
# this will always be the quoted value
(token,value,line,col) = self.tokens.Next()
# this will always be the terminal quote
self.tokens.Next()
return value
#
# Slice a string
#
def _slice(self):
front = None
back = None
start = False
(token,value,line,col) = self.tokens.Next()
if 'constant' == token:
front = value
start = True
(token,value,line,col) = self.tokens.Next()
if 'rbracket' == token:
if False == start:
raise nelly.error('Empty slice at line %d, column %d', line, col)
return (front,front+1)
elif 'colon' != token:
raise nelly.error('Missing colon at line %d, column %d', line, col)
(token,value,line,col) = self.tokens.Next()
if 'constant' == token:
back = value
(token,value,line,col) = self.tokens.Next()
elif 'rbracket' != token:
raise nelly.error('Missing ] at line %d, column %d', line, col)
return (front,back)
#
# Repeat a range
#
def _range(self):
lower = 0
upper = 0
(token,value,line,col) = self.tokens.Next()
if 'constant' != token:
raise nelly.error('Missing range at line %d, column %d', line, col)
lower = value
upper = value
(token,value,line,col) = self.tokens.Next()
if 'rcurley' == token:
return (lower,upper)
elif 'comma' != token:
raise nelly.error('Missing comma at line %d, column %d', line, col)
(token,value,line,col) = self.tokens.Next()
if 'constant' == token:
upper = value
else:
raise nelly.error('Missing range at line %d, column %d', line, col)
(token,value,line,col) = self.tokens.Next()
if 'rcurley' != token:
raise nelly.error('Missing } at line %d, column %d', line, col)
if lower > upper:
lower,upper = upper,lower
return (lower,upper)
def _weight(self):
(token,value,line,col) = self.tokens.Next()
if 'constant' != token:
raise nelly.error('Missing weight at line %d, column %d', line, col)
(token,ignore,line,col) = self.tokens.Next()
if 'rangle' != token:
raise nelly.error('Missing > at %d, column %d', line, col)
return value
#
# Compile the Python into a code object
#
def _python_code(self, name):
(token,value,line,col) = self.tokens.Next()
values = [s for s in value.split('\n') if s.strip()] or ['']
# save the whitepsace of the first line
ws = re.compile(r'\s*').match(values[0]).group()
# check indentation
if [s for s in values if not s.startswith(ws)]:
raise nelly.error('Bad indentation in code block at line %d, column %d', line, col)
# strip and rejoin the code
codeblock = '\n'.join(s[len(ws):] for s in values)
# eat the end_python_code token
self.tokens.Next()
try:
return compile(codeblock, '<'+name+'>', 'exec')
except SyntaxError as e:
raise nelly.error('%d: %s: %s', e.lineno, e.msg, repr(e.text))
#
# Include other BNF files
#
def _include(self):
(token,value,line,col) = self.tokens.Next()
# file names are quoted
if token not in ['start_single_quote', 'start_double_quote', 'start_triple_quote']:
raise nelly.error('quoted file path expected')
# get the quoted value
path = self._quote()
# try opening the file in each include directory, ignore errors
content = None
for include_dir in self.pwd[-1:] + self.include_dirs:
try:
fullpath = os.path.join(include_dir, path)
content = open(fullpath, 'r')
logging.debug('Including file %s', repr(fullpath))
break
except:
continue
# if no file was found, throw an error
if None == content:
raise nelly.error('Could not load file %s', repr(path))
# ignore empty file
if not content:
return
# compile it inline
self.Parse(content)
self.pwd.pop()
# restore the current tokens
self.tokens = self.tokens_stack[-1]
#
# Multi-line comments
#
def _comment(self):
# consume and disregard the tokens
while True:
(token,value,line,col) = self.tokens.Next()
if 'start_comment' == token:
self._comment()
if 'end_comment' == token:
return
|
import dash
import pandas as pd
import pathlib
import dash_core_components as dcc
import dash_html_components as html
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__)
# get relative data folder
PATH = pathlib.Path(__file__).parent
print(PATH)
DATA_PATH = PATH.joinpath("data").resolve()
app = dash.Dash(
__name__, meta_tags=[
{"name": "viewport", "content": "width=device-width"}],
external_stylesheets=external_stylesheets
)
server = app.server
app.layout = html.Div([html.H3('Prueba'),
html.H1('Hello Dash'),
html.Div([html.P('Dash converts Python classes into HTML'),
html.P(
"This conversion happens behind the scenes by Dash's JavaScript front-end")
]),
dcc.Markdown('''
#### Dash and Markdown
Dash supports [Markdown](http://commonmark.org/help).
Markdown is a simple way to write and format text.
It includes a syntax for things like **bold text** and *italics*,
[links](http://commonmark.org/help), inline `code` snippets, lists,
quotes, and more.
''')
])
# Main
if __name__ == "__main__":
app.run_server(debug=True)
|
from models import MPNN, DoubleLayerMLP
from mao import MAOdataset
import torch
import torch.nn as nn
import torch.nn.functional as F
import os.path
class MultiMNNP_MOA(nn.Module):
"""
A class for a complex a message-passing graph neural network for MOA classification.
Based on the generalization of Gilmer et al. (2017) proposed by Battaglia et al. (2018)
Extensibely uses and besed on the MNNP module from ./model.py
"""
def __init__(self):
super(MultiMNNP_MOA, self).__init__()
V_attributes = 6
E_attributes = 6
edge_update_nn = DoubleLayerMLP(V_attributes + V_attributes + E_attributes, 64, E_attributes)
vertice_update_nn = DoubleLayerMLP(V_attributes + E_attributes, 32, V_attributes)
output_update_nn = DoubleLayerMLP(V_attributes, 32, 1)
self.l1 = MPNN(V_attributes, E_attributes, edge_update_nn, vertice_update_nn, output_update_nn)
self.l2 = MPNN(V_attributes, E_attributes, edge_update_nn, vertice_update_nn, output_update_nn)
self.l3 = MPNN(V_attributes, E_attributes, edge_update_nn, vertice_update_nn, output_update_nn)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, data):
_, V_new, _ = self.l1.forward(data)
_, V_new, _ = self.l2.forward((V_new, data[1], data[2]))
E, V_new, u = self.l3.forward((V_new, data[1], data[2]))
return self.sigmoid(u)
if __name__ == '__main__':
# Our model is simple enough to train it on CPU
# And also I overused my colab and now cannot connect to a gpu there
device = torch.device('cpu')
dataset = MAOdataset(os.path.dirname(os.path.abspath(__file__)) + '/MAO/')
train_set, test_set = torch.utils.data.random_split(dataset, [dataset.__len__() - dataset.__len__()//4 , dataset.__len__()//4 ])
#dataloaders = {}
#dataloaders['train'] = torch.utils.data.DataLoader(train_set, shuffle=True)
#dataloaders['test'] = torch.utils.data.DataLoader(test_set, shuffle=True)
model = MultiMNNP_MOA()
model.to(device)
optimizer = torch.optim.Adam(list(model.parameters()), lr=0.01, weight_decay=1e-3)
model.train()
for epoch in range(100):
print ("Epoch: " + str(epoch) )
for x, y in train_set:
optimizer.zero_grad()
y_pred = model(x)
loss = F.binary_cross_entropy(y_pred, y)
loss.backward()
optimizer.step()
model.eval()
correct = 0
for x, y in train_set:
y_pred = model(x) > 0.5
if y_pred == y:
correct += 1
print ("Acc: " + str(correct/len(train_set)))
|
# Seeeduino XIAO (SAMD21 Cortex® M0+)
#
# Hardware: https://www.seeedstudio.com/Seeeduino-XIAO-Arduino-Microcontroller-SAMD21-Cortex-M0+-p-4426.html
# CircuitPython: https://circuitpython.org/board/seeeduino_xiao/
#
# Reset: RST pads short-cut two times
#
import os
import microcontroller
import board
import time
from digitalio import DigitalInOut, Direction, Pull
# Get details about the filesystem
fs_stat = os.statvfs("/")
# Collect details about the system
details = {
"machine": os.uname().machine,
"disk": fs_stat[0] * fs_stat[2] / 1024 / 1024,
"free": fs_stat[0] * fs_stat[3] / 1024 / 1024,
}
# Print details about the available pins
# See https://learn.adafruit.com/circuitpython-essentials/circuitpython-pins-and-modules#what-are-all-the-available-names-3082670-19
def show_pin_details():
"""Show pin overview."""
print("")
print("Pin details")
print("===========")
board_pins = []
for pin in dir(microcontroller.pin):
if isinstance(getattr(microcontroller.pin, pin), microcontroller.Pin):
pins = []
for alias in dir(board):
if getattr(board, alias) is getattr(microcontroller.pin, pin):
pins.append("board.{}".format(alias))
if len(pins) > 0:
board_pins.append(" ".join(pins))
for pins in sorted(board_pins):
print(pins)
print("")
def show_details_startup(title, content):
"""Print details on startup to serial."""
print("")
print("{}".format(title))
print("=" * len(title))
for title, data in content.items():
print("{}: {}".format(title, data))
# or simpler
# [print(key,':',value) for key, value in content.items()]
print("")
# Set up the LED
led = DigitalInOut(board.BLUE_LED)
led.direction = Direction.OUTPUT
# Print the details
show_details_startup("System details", details)
show_pin_details()
print("Blink LED in a endless loop ...")
while True:
led.value = False
time.sleep(1)
led.value = True
time.sleep(1)
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from barbicanclient import client as barbican_client
from barbicanclient import containers
from barbicanclient import exceptions
from heat.common import exception
from heat.engine.clients import client_plugin
from heat.engine import constraints
CLIENT_NAME = 'barbican'
class BarbicanClientPlugin(client_plugin.ClientPlugin):
service_types = [KEY_MANAGER] = ['key-manager']
def _create(self):
interface = self._get_client_option(CLIENT_NAME, 'endpoint_type')
client = barbican_client.Client(
session=self.context.keystone_session,
service_type=self.KEY_MANAGER,
interface=interface,
region_name=self._get_region_name())
return client
def is_not_found(self, ex):
return (isinstance(ex, exceptions.HTTPClientError) and
ex.status_code == 404)
def create_generic_container(self, **props):
return containers.Container(
self.client().containers._api, **props)
def create_certificate(self, **props):
return containers.CertificateContainer(
self.client().containers._api, **props)
def create_rsa(self, **props):
return containers.RSAContainer(
self.client().containers._api, **props)
def get_secret_by_ref(self, secret_ref):
try:
secret = self.client().secrets.get(secret_ref)
# Force lazy loading. TODO(therve): replace with to_dict()
secret.name
return secret
except Exception as ex:
if self.is_not_found(ex):
raise exception.EntityNotFound(
entity="Secret",
name=secret_ref)
raise
def get_container_by_ref(self, container_ref):
try:
# TODO(therve): replace with to_dict()
return self.client().containers.get(container_ref)
except Exception as ex:
if self.is_not_found(ex):
raise exception.EntityNotFound(
entity="Container",
name=container_ref)
raise
class SecretConstraint(constraints.BaseCustomConstraint):
resource_client_name = CLIENT_NAME
resource_getter_name = 'get_secret_by_ref'
expected_exceptions = (exception.EntityNotFound,)
class ContainerConstraint(constraints.BaseCustomConstraint):
resource_client_name = CLIENT_NAME
resource_getter_name = 'get_container_by_ref'
expected_exceptions = (exception.EntityNotFound,)
|
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.onyx import onyx_mlag_vip
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxMlagVipModule(TestOnyxModule):
module = onyx_mlag_vip
def setUp(self):
super(TestOnyxMlagVipModule, self).setUp()
self._mlag_enabled = True
self.mock_show_mlag = patch.object(
onyx_mlag_vip.OnyxMLagVipModule,
"_show_mlag")
self.show_mlag = self.mock_show_mlag.start()
self.mock_show_mlag_vip = patch.object(
onyx_mlag_vip.OnyxMLagVipModule,
"_show_mlag_vip")
self.show_mlag_vip = self.mock_show_mlag_vip.start()
self.mock_load_config = patch(
'ansible.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestOnyxMlagVipModule, self).tearDown()
self.mock_show_mlag.stop()
self.mock_show_mlag_vip.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
if self._mlag_enabled:
config_file = 'onyx_mlag_vip_show.cfg'
self.show_mlag_vip.return_value = load_fixture(config_file)
config_file = 'onyx_mlag_show.cfg'
self.show_mlag.return_value = load_fixture(config_file)
else:
self.show_mlag_vip.return_value = None
self.show_mlag.return_value = None
self.load_config.return_value = None
def test_mlag_no_change(self):
set_module_args(dict(ipaddress='10.209.25.107/24',
group_name='neo-mlag-vip-500',
mac_address='00:00:5E:00:01:4E'))
self.execute_module(changed=False)
def test_mlag_change(self):
self._mlag_enabled = False
set_module_args(dict(ipaddress='10.209.25.107/24',
group_name='neo-mlag-vip-500',
mac_address='00:00:5E:00:01:4E',
delay=0))
commands = ['mlag-vip neo-mlag-vip-500 ip 10.209.25.107 /24 force',
'mlag system-mac 00:00:5e:00:01:4e', 'no mlag shutdown']
self.execute_module(changed=True, commands=commands)
def test_mlag_absent_no_change(self):
self._mlag_enabled = False
set_module_args(dict(state='absent'))
self.execute_module(changed=False)
def test_mlag_absent_change(self):
set_module_args(dict(state='absent', delay=0))
commands = ['no mlag-vip']
self.execute_module(changed=True, commands=commands)
|
import unittest
from mplisp import evaluator
class TestSyntax(unittest.TestCase):
def test_string(self):
"""test string"""
input1 = """
(def a \'str\')
(assert-equal! a 'str')
"""
output1 = evaluator.evaluate(input1, None, True)
output2 = evaluator.evaluate(input1, None, False)
self.assertEqual(list(output1), [None, None])
self.assertEqual(list(output2), [None])
|
# -*- coding: utf-8 -*-
# Copyright Noronha Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from types import GeneratorType
from noronha.api.main import NoronhaAPI
from noronha.api.utils import ProjResolver
from noronha.common.annotations import Interactive
from noronha.common.constants import Flag
from noronha.common.errors import PrettyError
from noronha.common.logging import LOG
from noronha.common.parser import StructCleaner
class CommandHandler(Interactive):
interactive_mode: bool = False
struct_cleaner = StructCleaner(nones=[None])
@classmethod
def run(cls, _api_cls, _method, _skip_proj_resolution: bool = False, _error_callback=None, _response_callback=None,
**method_kwargs):
code, error = 0, None
try:
api = cls.init_api(_api_cls)
method, requires_proj = cls.fetch_method(api, _method)
method_kwargs, ref_to_proj = cls.prepare_method_kwargs(method_kwargs)
if requires_proj:
cls.set_proj(api, ref_to_proj)
response = method(**method_kwargs)
if isinstance(response, GeneratorType):
[cls.show_response(res, _response_callback) for res in response]
else:
cls.show_response(response, _response_callback)
except Exception as e:
error = e
code = 1
cls.show_exception(e)
finally:
if LOG.debug_mode and error is not None:
raise error
else:
sys.exit(code)
@classmethod
def set_proj(cls, api: NoronhaAPI, ref_to_proj: str):
if ref_to_proj is None:
resolvers = [ProjResolver.BY_CWD, ProjResolver.BY_CONF]
else:
resolvers = [ProjResolver.BY_NAME]
api.set_proj(ref_to_proj, resolvers)
@classmethod
def init_api(cls, api_cls):
assert issubclass(api_cls, NoronhaAPI)
return api_cls(
scope=NoronhaAPI.Scope.CLI,
interactive=cls.interactive_mode
)
@classmethod
def fetch_method(cls, api: NoronhaAPI, method_name: str):
method = getattr(api, method_name)
requires_proj = getattr(method, Flag.PROJ, False)
return method, requires_proj
@classmethod
def prepare_method_kwargs(cls, method_kwargs: dict):
method_kwargs = cls.struct_cleaner(method_kwargs)
ref_to_proj = method_kwargs.pop('proj', None)
return method_kwargs, ref_to_proj
@classmethod
def show_response(cls, response, callback=None):
if callable(callback):
response = callback(response)
if response is not None:
LOG.echo(response)
@classmethod
def show_exception(cls, exception, callback=None):
if isinstance(exception, PrettyError):
exc = exception.pretty()
else:
exc = PrettyError.parse_exc(exception)
if callable(callback):
detail = callback(exception)
LOG.info(detail)
LOG.error(exc)
CMD = CommandHandler()
|
# Copyright 2018, Red Hat, Inc.
# Ryan Petrello <rpetrell@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tower_cli import models
from tower_cli.cli import types
class Resource(models.BaseResource):
"""A resource for OAuth2 tokens."""
cli_help = 'Manage OAuth2 tokens.'
endpoint = '/tokens/'
internal = True
user = models.Field(type=types.Related('user'), required=True)
application = models.Field(type=types.Related('application'), required=True)
created = models.Field(required=False)
modified = models.Field(required=False)
token = models.Field(required=False)
refresh_token = models.Field(required=False)
expires = models.Field(required=False)
scope = models.Field(required=False)
|
#from django import forms
from django.forms import ModelForm
from .models import Resource, Topic
class ResourceCreateForm(ModelForm):
class Meta:
model = Resource
exclude = ('created_by', 'slug', 'help_text', 'show')
class ResourceUpdateForm(ModelForm):
class Meta:
model = Resource
exclude = ('created_by', 'slug', 'help_text', 'show')
class TopicCreateForm(ModelForm):
class Meta:
model = Topic
exclude = ('slug',)
class TopicUpdateForm(ModelForm):
class Meta:
model = Topic
exclude = ('slug',)
|
from more_itertools.more import *
from more_itertools.recipes import *
|
from Card import *
import random
class Deck:
def __init__(self):
self.cards = []
def build(self):
#2 of each number but 0, two of each special card, and 4 of each wilds.
for c in ["Red", "Blue", "Green", "Yellow"]:
self.cards.append(Card(c, 0))
for v in range(1,10):
self.cards.append(Card(c, v))
self.cards.append(Card(c, v))
for v in ["Reverse", "Skip", "Draw 2"]:
self.cards.append(Card(c, v))
self.cards.append(Card(c, v))
for x in range(4):
self.cards.append(Card("Black", "Wild"))
self.cards.append(Card("Black", "Draw 4"))
self.shuffle()
def shuffle(self):
random.shuffle(self.cards)
def draw(self):
return self.cards.pop(0)
#discard pile only method
def add(self, card):
while card.color == "Black":
new_col = input("What color would you like to change it to?: ").capitalize()
if new_col not in "Red Green Blue Yellow":
print("Invalid color.")
else:
card.color = new_col
self.cards.append(card)
|
from cli import *
from sim_commands import *
def shadow_cmd(obj):
r = [ [ "Base", "Read", "Write" ] ]
read_write = ("PCI", "RAM")
crs = obj.config_register
for i in range(384/2):
subrange = crs[i]
if subrange:
r.append(["%x" % ((640 + i * 2) * 1024), read_write[subrange & 1],
read_write[subrange >> 1]])
print_columns([Just_Left, Just_Left, Just_Left], r)
print "All addresses not listed are forwarded to PCI"
new_command("status", shadow_cmd, [], namespace = "pc-shadow",
type = "inspect commands",
short = "device status",
doc = """
Print the shadow RAM status for each 2kb region between 640kb and 1Mb.""", filename="/mp/simics-3.0/src/devices/pc-shadow/commands.py", linenumber="16")
def get_info(obj):
return []
new_info_command('pc-shadow', get_info)
|
import numpy as np
import sys # for sys.float_info.epsilon
######################################################################
### class QDA
######################################################################
class QDA(object):
def __init__(self):
# Define all instance variables here. Not necessary
self.Xmeans = None
self.Xstds = None
self.mu = None
self.sigma = None
self.sigmaInv = None
self.prior = None
self.discriminantConstant = None
def train(self,X,T):
self.classes = np.unique(T)
self.Xmeans = np.mean(X,0)
self.Xstds = np.std(X,0)
self.Xconstant = self.Xstds == 0
self.XstdsFixed = self.Xstds.copy()
self.XstdsFixed[self.Xconstant] = 1
Xs = (X - self.Xmeans) / self.XstdsFixed
self.mu = []
self.sigma = []
self.sigmaInv = []
self.prior = []
nSamples = X.shape[0]
for k in self.classes:
rowsThisClass = (T == k).reshape((-1))
self.mu.append( np.mean(Xs[rowsThisClass,:],0).reshape((-1,1)) )
if sum(rowsThisClass) == 1:
self.sigma.append(np.eye(Xs.shape[1]))
else:
self.sigma.append( np.cov(Xs[rowsThisClass,:],rowvar=0) )
if self.sigma[-1].size == 1:
self.sigma[-1] = self.sigma[-1].reshape((1,1))
self.sigmaInv.append( np.linalg.pinv(self.sigma[-1]) ) # pinv in case Sigma is singular
self.prior.append( np.sum(rowsThisClass) / float(nSamples) )
self._finishTrain()
def _finishTrain(self):
self.discriminantConstant = []
for ki in range(len(self.classes)):
determinant = np.linalg.det(self.sigma[ki])
if determinant == 0:
# raise np.linalg.LinAlgError('trainQDA(): Singular covariance matrix')
determinant = sys.float_info.epsilon
self.discriminantConstant.append( np.log(self.prior[ki]) - 0.5*np.log(determinant) )
def use(self,X):
nSamples = X.shape[0]
Xs = (X - self.Xmeans) / self.XstdsFixed
discriminants = self._discriminantFunction(Xs)
predictedClass = self.classes[np.argmax( discriminants, axis=1 )]
predictedClass = predictedClass.reshape((-1,1))
D = X.shape[1]
probabilities = np.exp( discriminants - 0.5*D*np.log(2*np.pi) - np.log(np.array(self.prior)) )
return predictedClass,probabilities,discriminants
def _discriminantFunction(self,Xs):
nSamples = Xs.shape[0]
discriminants = np.zeros((nSamples, len(self.classes)))
for ki in range(len(self.classes)):
Xc = Xs - self.mu[ki].reshape((-1))
discriminants[:,ki:ki+1] = self.discriminantConstant[ki] - 0.5 * \
np.sum(np.dot(Xc, self.sigmaInv[ki]) * Xc, axis=1).reshape((-1,1))
return discriminants
def __repr__(self):
if self.mu is None:
return 'QDA not trained.'
else:
return 'QDA trained for classes {}'.format(self.classes)
######################################################################
### class LDA
######################################################################
class LDA(QDA):
def _finishTrain(self):
self.sigmaMean = np.sum(np.stack(self.sigma) * np.array(self.prior)[:,np.newaxis,np.newaxis], axis=0)
self.sigmaMeanInv = np.linalg.pinv(self.sigmaMean)
# print(self.sigma)
# print(self.sigmaMean)
self.discriminantConstant = []
self.discriminantCoefficient = []
for ki in range(len(self.classes)):
sigmaMu = np.dot(self.sigmaMeanInv, self.mu[ki])
self.discriminantConstant.append( -0.5 * np.dot(self.mu[ki].T, sigmaMu) )
self.discriminantCoefficient.append( sigmaMu )
def _discriminantFunction(self,Xs):
nSamples = Xs.shape[0]
discriminants = np.zeros((nSamples, len(self.classes)))
for ki in range(len(self.classes)):
discriminants[:,ki:ki+1] = self.discriminantConstant[ki] + \
np.dot(Xs, self.discriminantCoefficient[ki])
return discriminants
def __repr__(self):
if self.mu is None:
return 'LDA not trained.'
else:
return 'LDA trained for classes {}'.format(self.classes)
######################################################################
### Example use
######################################################################
if __name__ == '__main__':
D = 1 # number of components in each sample
N = 10 # number of samples in each class
X = np.vstack((np.random.normal(0.0,1.0,(N,D)),
np.random.normal(4.0,1.5,(N,D))))
T = np.vstack((np.array([1]*N).reshape((N,1)),
np.array([2]*N).reshape((N,1))))
qda = QDA()
qda.train(X,T)
c,prob,d = qda.use(X)
print('QDA', np.sum(c==T)/X.shape[0] * 100, '% correct')
# print(np.hstack((T,c)))
# print(prob)
# print(d)
lda = LDA()
lda.train(X,T)
c,prob,d = lda.use(X)
print('LDA', np.sum(c==T)/X.shape[0] * 100, '% correct')
# print(np.hstack((T,c)))
# print(prob)
# print(d)
|
#!/usr/bin/env python
import argparse
from collections import defaultdict
import glob
import inspect
import json
import logging
import os
import shutil
from astropy.io import fits
from astropy.time import Time
import numpy as np
import pandas as pd
from tqdm import tqdm
logging.basicConfig(format='%(levelname)-4s '
'[%(module)s.%(funcName)s:%(lineno)d]'
' %(message)s',
level=logging.INFO)
LOG = logging.getLogger('cr_rejection')
LOG.setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('-dir1',
type=str,
help='first directory containing files to process')
parser.add_argument('-dir2',
type=str,
help='second directory containing files to process')
def find_files(longdir=None, shortdir=None, suffix=None):
""" Read in all files in longdir and shortdir with filetype suffix
Parameters
----------
longdir
shortdir
suffix
Returns
-------
"""
flistlong = glob.glob(f'{longdir}/{suffix}')
flistshort = glob.glob(f'{shortdir}/{suffix}')
return flistlong, flistshort
def generate_record(
outputdir,
badinpdq=None,
crmask="yes",
crrejtab=None,
crsigmas='6,5,4',
crthresh=0.75,
crradius=1.0,
initgues='med',
scalense=0,
skysub="mode"
):
""" Generate an output file containing a summary of the input parameters
Parameters
----------
outputdir : str
crsigmas : list
crthresh : float
crradius : float
Returns
-------
"""
frame = inspect.currentframe()
arginfo = inspect.getargvalues(frame)
args = arginfo.args
values = [arginfo.locals[arg] for arg in args]
fout_json = f"{outputdir}/_summary.json"
LOG.info(f"Writing the input parameters to the following file:\n{fout_json}")
data_dict = {}
for key, val in zip(args, values):
data_dict[key] = val
json.dump(data_dict, open(fout_json, mode='w'))
# noinspection PyTypeChecker
def setup_output(flist1, flist2, dir_suffix=''):
""" Setup output directories for each list of files
Parameters
----------
flist1 : list
flist2 : list
Returns
-------
"""
# Generate the output path for each input list
dir1_path = os.path.dirname(flist1[0])
dir1_name = dir1_path.split('/')[-1]
outdir1 = dir1_path.replace(
dir1_name,
f"{dir1_name.split('_')[0]}_{dir_suffix}"
)
try:
os.mkdir(outdir1)
except FileExistsError:
pass
dir2_path = os.path.dirname(flist2[0])
dir2_name = dir2_path.split('/')[-1]
outdir2 = dir2_path.replace(
dir2_name,
f"{dir2_name.split('_')[0]}_{dir_suffix}"
)
try:
os.mkdir(outdir2)
except FileExistsError:
pass
LOG.info(
f"Set up two output directories: \n{outdir1}\n{outdir2}\n {'-'*79}"
)
for f1 in tqdm(flist1, desc=f'Copying to {os.path.basename(outdir1)} '):
fits.setval(f1, keyword='CRREJTAB',
value='/Users/nmiles/hst_cosmic_rays/j3m1403io_crr.fits')
shutil.copy(f1, outdir1)
shutil.copy(f1.replace('flt.fits','spt.fits'), outdir1)
for f2 in tqdm(flist2,desc=f'Copying to {os.path.basename(outdir2)} '):
fits.setval(f2, keyword='CRREJTAB',
value='/Users/nmiles/hst_cosmic_rays/j3m1403io_crr.fits')
shutil.copy(f2, outdir2)
shutil.copy(f2.replace('flt.fits','spt.fits'), outdir2)
return outdir1, outdir2
def sort_flist(flist):
date_time = []
for f in flist:
with fits.open(f) as hdu:
hdr = hdu[0].header
try:
dateobs = hdr['DATE-OBS']
except KeyError:
dateobs = hdr['TDATEOBS']
try:
timeobs = hdr['TIME-OBS']
except KeyError:
timeobs = hdr['TTIMEOBS']
date_time.append(Time(f"{dateobs} {timeobs}",
format='iso').to_datetime())
data = list(zip(flist, date_time))
data.sort(key=lambda val: val[1])
flist, date_time = zip(*data)
return flist
def initialize(dir1, dir2, nimages=None, dir_suffix=None):
# Get string to use for recorded the start of processing
if dir_suffix is None:
tday = Time.now().to_datetime()
dir_suffix = tday.strftime('%b%d')
flist1 = sort_flist(glob.glob(dir1+'*flt.fits'))
flist2 = sort_flist(glob.glob(dir2+'*flt.fits'))
if nimages is not None:
flist1 = flist1[:nimages]
flist2 = flist2[:nimages]
outdir1, outdir2 = setup_output(flist1, flist2, dir_suffix=dir_suffix)
return outdir1, outdir2
def parse_inputs():
args = vars(parser.parse_args())
return args
if __name__ == '__main__':
# args = vars(parser.parse_args())
args = {
'dir1':'/Users/nmiles/hst_cosmic_rays/'
'analyzing_cr_rejection/1100.0_clean/',
'dir2':'/Users/nmiles/hst_cosmic_rays/'
'analyzing_cr_rejection/60.0_clean/'
}
main(**args)
|
from socket import *
import random
serverSocket = socket(AF_INET, SOCK_DGRAM)
serverSocket.bind(('', 12000))
while True:
rand = random.randint(0, 10)
message, address = serverSocket.recvfrom(1024)
message = message.upper()
if rand < 4:
continue
serverSocket.sendto(message, address)
|
# Generated by Django 2.2.10 on 2020-04-30 04:36
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('bank', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='walletaccount',
name='user_id',
field=models.CharField(blank=True, default=uuid.uuid4, max_length=100, unique=True),
),
]
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main experiment for data valuation.
Main experiment of a data valuation application
using "Data Valuation using Reinforcement Learning (DVRL)"
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import keras
import lightgbm
import numpy as np
import pandas as pd
import tensorflow as tf
from dvrl import data_loading
from dvrl import dvrl
from dvrl import dvrl_metrics
def main(args):
"""Main function of DVRL for data valuation experiment.
Args:
args: data_name, train_no, valid_no,
normalization, network parameters, number of examples
"""
# Data loading and sample corruption
data_name = args.data_name
# The number of training and validation samples
dict_no = dict()
dict_no['train'] = args.train_no
dict_no['valid'] = args.valid_no
# Network parameters
parameters = dict()
parameters['hidden_dim'] = args.hidden_dim
parameters['comb_dim'] = args.comb_dim
parameters['iterations'] = args.iterations
parameters['activation'] = tf.nn.relu
parameters['inner_iterations'] = args.inner_iterations
parameters['layer_number'] = args.layer_number
parameters['learning_rate'] = args.learning_rate
parameters['batch_size'] = args.batch_size
parameters['batch_size_predictor'] = args.batch_size_predictor
# The number of examples
n_exp = args.n_exp
# Checkpoint file name
checkpoint_file_name = args.checkpoint_file_name
# Data loading
_ = data_loading.load_tabular_data(data_name, dict_no, 0.0)
print('Finished data loading.')
# Data preprocessing
# Normalization methods: 'minmax' or 'standard'
normalization = args.normalization
# Extracts features and labels. Then, normalizes features
x_train, y_train, x_valid, y_valid, x_test, y_test, col_names = \
data_loading.preprocess_data(normalization, 'train.csv',
'valid.csv', 'test.csv')
print('Finished data preprocess.')
# Run DVRL
# Resets the graph
tf.reset_default_graph()
keras.backend.clear_session()
# Here, we assume a classification problem and we assume a predictor model
# in the form of a simple multi-layer perceptron.
problem = 'classification'
# Predictive model define
pred_model = keras.models.Sequential()
pred_model.add(keras.layers.Dense(parameters['hidden_dim'],
activation='relu'))
pred_model.add(keras.layers.Dense(parameters['hidden_dim'],
activation='relu'))
pred_model.add(keras.layers.Dense(2, activation='softmax'))
pred_model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
# Flags for using stochastic gradient descent / pre-trained model
flags = {'sgd': True, 'pretrain': False}
# Initializes DVRL
dvrl_class = dvrl.Dvrl(x_train, y_train, x_valid, y_valid,
problem, pred_model, parameters,
checkpoint_file_name, flags)
# Trains DVRL
dvrl_class.train_dvrl('auc')
print('Finished dvrl training.')
# Outputs
# Data valuation
dve_out = dvrl_class.data_valuator(x_train, y_train)
print('Finished data valuation.')
# Evaluations
# 1. Data valuation
# Data valuation
sorted_idx = np.argsort(-dve_out)
sorted_x_train = x_train[sorted_idx]
# Indices of top n high valued samples
print('Indices of top ' + str(n_exp) + ' high valued samples: '
+ str(sorted_idx[:n_exp]))
print(pd.DataFrame(data=sorted_x_train[:n_exp, :], index=range(n_exp),
columns=col_names).head())
# Indices of top n low valued samples
print('Indices of top ' + str(n_exp) + ' low valued samples: '
+ str(sorted_idx[-n_exp:]))
print(pd.DataFrame(data=sorted_x_train[-n_exp:, :], index=range(n_exp),
columns=col_names).head())
# 2. Performance after removing high/low values
# Here, as the evaluation model, we use LightGBM.
eval_model = lightgbm.LGBMClassifier()
# Performance after removing high/low values
_ = dvrl_metrics.remove_high_low(dve_out, eval_model, x_train, y_train,
x_valid, y_valid, x_test, y_test,
'accuracy', plot=True)
return
if __name__ == '__main__':
# Inputs for the main function
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_name',
help='data name (adult or blog)',
default='adult',
type=str)
parser.add_argument(
'--normalization',
help='data normalization method',
default='minmax',
type=str)
parser.add_argument(
'--train_no',
help='number of training samples',
default=1000,
type=int)
parser.add_argument(
'--valid_no',
help='number of validation samples',
default=400,
type=int)
parser.add_argument(
'--hidden_dim',
help='dimensions of hidden states',
default=100,
type=int)
parser.add_argument(
'--comb_dim',
help='dimensions of hidden states after combinding with prediction diff',
default=10,
type=int)
parser.add_argument(
'--layer_number',
help='number of network layers',
default=5,
type=int)
parser.add_argument(
'--iterations',
help='number of iterations',
default=2000,
type=int)
parser.add_argument(
'--batch_size',
help='number of batch size for RL',
default=2000,
type=int)
parser.add_argument(
'--inner_iterations',
help='number of iterations',
default=100,
type=int)
parser.add_argument(
'--batch_size_predictor',
help='number of batch size for predictor',
default=256,
type=int)
parser.add_argument(
'--n_exp',
help='number of examples',
default=5,
type=int)
parser.add_argument(
'--learning_rate',
help='learning rates for RL',
default=0.01,
type=float)
parser.add_argument(
'--checkpoint_file_name',
help='file name for saving and loading the trained model',
default='./tmp/model.ckpt',
type=str)
args_in = parser.parse_args()
# Calls main function
main(args_in)
|
# Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import copy
from typing import Any, List
from model_compression_toolkit.core.common import Graph, BaseNode
from model_compression_toolkit.core.common.logger import Logger
def set_bit_widths(mixed_precision_enable: bool,
graph_to_set_bit_widths: Graph,
bit_widths_config: List[int] = None) -> Graph:
"""
Set bit widths configuration to nodes in a graph. For each node, use the desired index
in bit_widths_config to finalize the node weights and activation quantization configuration.
Args:
mixed_precision_enable: Is mixed precision enabled.
graph_to_set_bit_widths: A prepared for quantization graph to set its bit widths.
fw_info: Information needed for quantization about the specific framework (e.g., kernel
channels indices, groups of layers by how they should be quantized, etc.)
bit_widths_config: MP configuration (a list of indices: one for each node's candidate
quantization configuration).
"""
graph = copy.deepcopy(graph_to_set_bit_widths)
if mixed_precision_enable:
assert all([len(n.candidates_quantization_cfg) > 0 for n in graph.get_configurable_sorted_nodes()]), \
"All configurable nodes in graph should have at least one candidate configuration in mixed precision mode"
Logger.info(f'Set bit widths from configuration: {bit_widths_config}')
# Get a list of nodes' names we need to finalize (that they have at least one weight qc candidate).
sorted_nodes_names = graph.get_configurable_sorted_nodes_names()
for node in graph.nodes: # set a specific node qc for each node final weights qc
# If it's reused, take the configuration that the base node has
node_name = node.name if not node.reuse else '_'.join(node.name.split('_')[:-2])
if node_name in sorted_nodes_names: # only configurable nodes are in this list
node_index_in_graph = sorted_nodes_names.index(node_name)
_set_node_final_qc(bit_widths_config,
node,
node_index_in_graph)
elif node.is_activation_quantization_enabled():
# If we are here, this means that we are in weights-only mixed-precision
# (i.e., activations are quantized with fixed bitwidth or not quantized)
# and that this node doesn't have weights to quantize
assert len(node.candidates_quantization_cfg) > 0, \
"Node need to have at least one quantization configuration in order to quantize its activation"
node.final_activation_quantization_cfg = copy.deepcopy(node.candidates_quantization_cfg[0].activation_quantization_cfg)
elif node.is_weights_quantization_enabled():
# If we are here, this means that we are in activation-only mixed-precision
# (i.e., weights are quantized with fixed bitwidth or not quantized)
# and that this node doesn't have activations to quantize
assert len(node.candidates_quantization_cfg) > 0, \
"Node need to have at least one quantization configuration in order to quantize its activation"
node.final_weights_quantization_cfg = copy.deepcopy(node.candidates_quantization_cfg[0].weights_quantization_cfg)
# When working in non-mixed-precision mode, there's only one bitwidth, and we simply set the
# only candidate of the node as its final weight and activation quantization configuration.
else:
for n in graph.nodes:
assert len(n.candidates_quantization_cfg) == 1
n.final_weights_quantization_cfg = copy.deepcopy(n.candidates_quantization_cfg[0].weights_quantization_cfg)
n.final_activation_quantization_cfg = copy.deepcopy(n.candidates_quantization_cfg[0].activation_quantization_cfg)
return graph
def _get_node_qc_by_bit_widths(node: BaseNode,
bit_width_cfg: List[int],
node_index_in_graph: int) -> Any:
"""
Get the node's quantization configuration that
matches to the bit width index as in the MP configuration bit_width_cfg.
If it was not found, return None.
Args:
node: Node to get its quantization configuration candidate.
bit_width_cfg: Configuration which determines the node's desired bit width.
node_index_in_graph: Index of the node in the bit_width_cfg.
Returns:
Node quantization configuration if it was found, or None otherwise.
"""
if node.is_weights_quantization_enabled() or node.is_activation_quantization_enabled():
bit_index_in_cfg = bit_width_cfg[node_index_in_graph]
qc = node.candidates_quantization_cfg[bit_index_in_cfg]
return qc
return None
def _set_node_final_qc(bit_width_cfg: List[int],
node: BaseNode,
node_index_in_graph: int):
"""
Get the node's quantization configuration that
matches to the bit width index as in the MP configuration bit_width_cfg, and use it to finalize the node's
weights and activation quantization config.
If the node quantization config was not found, raise an exception.
Args:
bit_width_cfg: Configuration which determines the node's desired bit width.
node: Node to set its node quantization configuration.
node_index_in_graph: Index of the node in the bit_width_cfg.
"""
node_qc = _get_node_qc_by_bit_widths(node,
bit_width_cfg,
node_index_in_graph)
if node_qc is None:
Logger.critical(f'Node {node.name} quantization configuration from configuration file'
f' was not found in candidates configurations.')
else:
node.final_weights_quantization_cfg = node_qc.weights_quantization_cfg
node.final_activation_quantization_cfg = node_qc.activation_quantization_cfg
|
import json
import math
from scipy.stats import poisson
import sys
import logging
import numpy as np
from mpmath import *
from probabilistic import *
from multiprocessing import Pool, TimeoutError, Manager
from calcGenerativeTask import calculationTask
mp.dps=snakemake.params['dps']
logging.basicConfig(filename=snakemake.log[0], level=logging.DEBUG,format="%(asctime)s:%(levelname)s:%(message)s")
#Read counts
sequenceKmerProfiles = json.load(open(snakemake.input['counts'],'r'))
actual_counts = json.load(open(snakemake.input['observed'],'r'))
#Parse k
k = int(snakemake.params['k'])
baseErrorRate = 0.0
with open(snakemake.input['baseError'],'r') as infile:
baseErrorRate = float(infile.read())
#Thread management
pool = Pool(processes=snakemake.threads)
manager = Manager()
print("Calculating Generative Probabilities using {} Threads".format(snakemake.threads))
probabilities = manager.dict()
#probabilities = {}
#HD-Likelihoods LN-Likelihoods, Caches
hdLikelihoods = manager.dict()
lnLikelihoods = manager.dict()
#hdLikelihoods = {}
#lnLikelihoods = {}
pool.map(calculationTask,[[spaTypeID,sequenceKmerProfiles,hdLikelihoods,lnLikelihoods,probabilities,actual_counts,baseErrorRate,k] for spaTypeID,sequenceKmerProfiles in sequenceKmerProfiles.items()])
pool.close() # Wait for all threads to finish ...
probabilities = dict(probabilities) #convert back to regular dict that can be written to json
with open(snakemake.output['likelihoods'],'w') as outfile:
json.dump(probabilities,outfile)
|
import inspect
import functools
from typing import OrderedDict, Tuple
def _cast(
obj: object,
cast_to: type,
cast_rules: OrderedDict[Tuple[type, type], callable]):
for (input_type, output_type), func in cast_rules.items():
if isinstance(obj, input_type):
return func(obj)
else:
return cast_to(obj)
def _autocast(
cast_rules: OrderedDict[Tuple[type, type], callable] = {},
**cast_to: type) -> callable:
def wrapper(func: callable) -> callable:
argspec = inspect.getfullargspec(func)
arg_names = argspec.args
for key, value in cast_to.items():
if key not in arg_names:
raise ValueError(
f"arg '{key}' not found in {func.__name__}().")
if not isinstance(value, type):
raise TypeError(
f"Cast destination of arg '{key}' in {func.__name__}() "
"must be an instance of type.")
@functools.wraps(func)
def _func_with_typecast(*args, **kwargs):
args_casted = []
for name, arg in zip(arg_names, args):
if name in cast_to:
args_casted.append(_cast(arg, cast_to[name], cast_rules))
else:
args_casted.append(arg)
kwargs_casted = {}
for name, arg in kwargs.items():
if name in cast_to:
kwargs_casted[name] = _cast(arg, cast_to[name], cast_rules)
else:
kwargs_casted[name] = arg
return func(*args_casted, **kwargs_casted)
_func_with_typecast.__signature__ = inspect.signature(func)
return _func_with_typecast
return wrapper
def autocast(**cast_to: type) -> callable:
"""Decorator to automatically cast function arguments.
Parameters
----------
cast_to : type
Specifies casting destination of arguments.
Returns
-------
callable
Function with type casting.
Example
-------
>>> from pyautocast import autocast
>>> @autocast(x=str)
... def func(x):
... assert(isinstance(x, str))
... return "arg 'x' in func() is " + x
...
>>> func(2)
"arg 'x' in func() is 2"
>>> func([1, 2, 3])
"arg 'x' in func() is [1, 2, 3]"
"""
return _autocast({}, **cast_to)
|
from scripttest import TestFileEnvironment
import re
# from filecmp import cmp
bindir = "graphprot/"
script = "graphprot_seqmodel"
# test file environment
datadir = "test/"
testdir = "test/testenv_graphprot_seqmodel/"
# directories relative to test file environment
bindir_rel = "../../" + bindir
datadir_rel = "../../" + datadir
env = TestFileEnvironment(testdir)
def test_invocation_no_params():
"Call without parameters should return usage information."
call = bindir_rel + script
run = env.run(
call,
expect_error=True)
assert run.returncode == 2
assert re.match("usage", run.stderr), "stderr should contain usage information: {}".format(run.stderr)
def test_invocation_nonexisting_input():
"Call with nonexisting input file."
outfile = "shouldcrash"
call = bindir_rel + script + " -vvv fit -p {} -n {} --output-dir ./ --model-file {} --n-iter 1".format(
datadir_rel + "does_not_exist",
datadir_rel + "does_not_exist",
outfile,
)
run = env.run(
call,
expect_error=True,
)
assert run.returncode != 0
def test_fit_optimization_fail():
outfile = "test_simple_fit.model"
call = bindir_rel + script + " -vvv fit -p {} -n {} --output-dir ./ --model-file {} --n-iter 2 --n-inner-iter-estimator 2".format(
datadir_rel + "does_not_exist",
datadir_rel + "does_not_exist",
outfile,
)
# graphprot/graphprot_seqmodel -vvv fit -p test/graphprot_seqmodel_test_fit_no_solution.fa -n test/graphprot_seqmodel_test_fit_no_solution.fa --output-dir manualtest --n-iter 2 --n-inner-iter-estimator 2
run = env.run(
call,
expect_error=True
)
# script should give non-zero return code
assert run.returncode != 0
# script should not create any files
assert len(run.files_created.keys()) == 0
def test_simple_fit():
"Train a model on 10 positive and 10 negative sequences using default paramters."
outfile = "test_simple_fit.model"
call = bindir_rel + script + " -vvv fit -p {} -n {} --output-dir ./ --model-file {} --n-iter 1".format(
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.positives.fa",
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.negatives.fa",
outfile
)
# ../../graphprot/graphprot_seqmodel -vvv fit -p ../../test/PARCLIP_MOV10_Sievers_100seqs.train.positives.fa -n ../../test/PARCLIP_MOV10_Sievers_100seqs.train.negatives.fa --output-dir ./ --model-file test_simple_fit.model --n-iter 1
env.run(call)
call = bindir_rel + script + " -vvv estimate -p {} -n {} --output-dir ./ --model-file {} --cross-validation".format(
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.positives.fa",
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.negatives.fa",
outfile
)
# ../../graphprot/graphprot_seqmodel -vvv estimate -p ../../test/PARCLIP_MOV10_Sievers_1kseqs.train.positives.fa -n ../../test/PARCLIP_MOV10_Sievers_1kseqs.train.negatives.fa --output-dir ./ --model-file test_simple_fit.model --cross-validation
run = env.run(
call,
expect_stderr=True,
)
stdout = open(testdir + outfile + ".cv.out", "w")
stdout.write(run.stdout)
def test_predict_simpletask():
"Fit model and do prediction of training data using default parameters."
model = "test_predict_simpletask.model"
call = bindir_rel + script + " -vvv fit -p {} -n {} --output-dir ./ --model-file {} --n-iter 1".format(
datadir_rel + "simple_positives.fa",
datadir_rel + "simple_negatives.fa",
model,
)
env.run(call,)
call = bindir_rel + script + " -vvv predict --input-file {} --model-file {} --output-dir {}".format(
datadir_rel + "simple_positives.fa",
model,
"test_predict_simpletask",
)
run = env.run(call)
assert "test_predict_simpletask/predictions.txt" in run.files_created
for line in run.files_created["test_predict_simpletask/predictions.txt"].bytes.split("\n"):
try:
prediction, margin, id = line.split()
assert float(margin) >= 0.4, "Error: all margins should be at leat 0.4, the margin for id {} is '{}' in {}.".format(id, margin, run.files_created["test_predict_simpletask/predictions.txt"].bytes)
except ValueError:
pass
def test_predict():
"Predict class of some sequences."
model = "test_predict.model"
call = bindir_rel + script + " -vvv fit -p {} -n {} --output-dir ./ --model-file {} --n-iter 1".format(
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.positives.fa",
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.negatives.fa",
model
)
# ../../graphprot/graphprot_seqmodel -vvv fit -p ../../test/PARCLIP_MOV10_Sievers_100seqs.train.positives.fa -n ../../test/PARCLIP_MOV10_Sievers_100seqs.train.negatives.fa --output-dir ./ --model-file test_simple_fit.model --n-iter 1
env.run(call)
call = bindir_rel + script + " -vvv predict --input-file {} --model-file {} --output-dir {}".format(
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.positives.fa",
model,
"test_predict",
)
run = env.run(call)
assert "test_predict/predictions.txt" in run.files_created
def test_priors_weight_fail_allzero():
"Fit model reweighting by priors, set prior weight extra high to produce very low weights."
# lowest prior is p=0.00031274442646757
# weights w > 1/p are guaranteed to produce zero weights exclusively (-> 3.179)
model = "test_priors_weight_fail_allzero.model"
call = bindir_rel + script + " -vvv fit -p {} -n {} --output-dir ./ --model-file {} --n-iter 1 --kmer-probs {} --kmer-weight 3200".format(
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.positives.fa",
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.negatives.fa",
model,
datadir_rel + "test_graphprot_priors.txt",
)
env.run(call)
def test_priors_weight():
"Fit model reweighting by priors, set prior weight extra high to produce exclusively zero weights."
model = "test_priors.model"
call = bindir_rel + script + " -vvv fit -p {} -n {} --output-dir ./ --model-file {} --n-iter 1 --kmer-probs {}".format(
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.positives.fa",
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.negatives.fa",
model,
datadir_rel + "test_graphprot_priors.txt",
)
run = env.run(call,)
assert model in run.files_created
def test_predictprofile():
"Predict nucleotide-wise margins of some sequences."
model = "test_predict_profile.model"
call = bindir_rel + script + " -vvv fit -p {} -n {} --output-dir {} --model-file {} --n-iter 1".format(
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.positives.fa",
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.negatives.fa",
"test_predict_profile",
model
)
env.run(call)
call = bindir_rel + script + " -vvv predict_profile --input-file {} --model-file {} --output-dir {}".format(
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.positives.fa",
"test_predict_profile/" + model,
"test_predict_profile",
)
run = env.run(call)
assert "test_predict_profile/profile.txt" in run.files_created
def test_predictprofile_with_priors():
"Predict nucleotide-wise margins of some sequences."
model = "test_predict_profile_with_priors.model"
call = bindir_rel + script + " -vvv fit -p {} -n {} --output-dir {} --model-file {} --n-iter 1 --kmer-probs {} --kmer-weight 200".format(
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.positives.fa",
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.negatives.fa",
"test_predict_profile_with_priors",
model,
datadir_rel + "test_graphprot_priors.txt",
)
env.run(call)
call = bindir_rel + script + " -vvv predict_profile --input-file {} --model-file {} --output-dir {}".format(
datadir_rel + "PARCLIP_MOV10_Sievers_10seqs.train.positives.fa",
"test_predict_profile_with_priors/" + model,
"test_predict_profile_with_priors",
)
run = env.run(call)
assert "test_predict_profile_with_priors/profile.txt" in run.files_created
|
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
import matplotlib.pyplot as plt
import deepdish as dd
from sklearn.linear_model import LinearRegression
from collections import deque
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import pickle
import numpy as np
SYMBOL = "DAL"
PATIENCE = 10
MIN_VAR = 2
file_path = "../data/"+SYMBOL+"/history.pckl"
with open(file_path, "rb") as fb:
jfile = pickle.load(fb)
history = jfile['history']
X = []
Y = []
vY = []
for date in history:
_v = history[date]
_open = float(_v['open'])
_close = float(_v['close'])
_high = float(_v['high'])
_low = float(_v['low'])
_volume = int(_v['volume'])
vX = [_open, _close, _high, _low, _volume]
X.append(vX)
Y.append(_close)
vY.append(_volume)
X = X[::-1]
Y = Y[::-1]
X = np.array(X)
Y = np.array(Y)
vY = np.array(vY)
X = X[1:]
Y = Y[1:]
vY = vY[1:]
#### SCALE DATA SET ####
scaler = MinMaxScaler()
scaler.fit(X)
dd.io.save("../data/"+SYMBOL+"/" + "_scaler.h5", scaler)
X = scaler.transform(X)
slice = int(len(X)/20)
X_train = X[0:-slice]
Y_train = Y[0:-slice]
X_test = X[-slice:]
Y_test = Y[-slice:]
vY_train = vY[0:-slice]
vY_test = vY[-slice:]
X_train = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
X_test = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1]))
#### TRAIN NEXT PRICE ####
model = Sequential()
model.add(LSTM(48, input_shape=(1, X_train.shape[2])))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
i = 0
trained = False
last_err = 1e10
#PATIENCE = int(len(X)*0.01) + 10
#err_hist = deque(maxlen=PATIENCE)
PATIENCE = 5
notimp = 0
while not trained:
i+=1
plt.clf()
hist = model.fit(X_train, Y_train, epochs=1, batch_size=1, verbose=2)
pred = model.predict(X_test)
plt.title(SYMBOL)
plt.plot(Y_test, "x", label="real")
plt.plot(pred, label="pred")
plt.legend();
plt.pause(0.1)
err = mean_squared_error(Y_test, pred)
if err >= last_err:
notimp += 1
else:
model.save("../data/" + SYMBOL + "/" + "_model.h5")
plt.savefig("../data/" + SYMBOL + "/trainfig.png")
trained = err > last_err and notimp >= PATIENCE
last_err = err
print("EPOCH", i,"MSE", err, "NOT_IMPROVING", notimp, "PATIENCE", PATIENCE)
if trained:
break
model.save("../data/"+SYMBOL+"/" + "_model.h5")
plt.savefig("../data/"+SYMBOL+"/trainfig.png")
plt.title("Loss")
model.save("../data/"+SYMBOL+"/" + "_model.h5")
plt.plot(hist.history['loss'], label="train");
plt.legend()
pred = model.predict(X_test)
plt.plot(Y_test, "x", label="real")
plt.plot(pred, label="pred")
plt.legend();
plt.show()
#### try to predict the trade volume ####
X_train, X_test, y_train, vy_test = train_test_split(X, vY, test_size=0.20, random_state=44)
lr = LinearRegression().fit(X_train, y_train)
c = lr.coef_
print(lr.coef_, lr.intercept_)
print(lr.coef_[-1])
pred = lr.predict(X_test)
plt.plot(vy_test, "x", label="real")
plt.plot(pred, label="pred")
plt.legend();
plt.show()
|
# 전자레인지
T = int(input())
button = [300, 60, 10]
count = [0, 0, 0]
for i in range(len(button)):
if T >= button[i]:
count[i] += T // button[i]
T %= button[i]
if T != 0:
print(-1)
else:
for j in range(0, len(count)):
print(count[j], end = " ")
|
# Generated by Django 3.0.5 on 2020-04-11 23:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='customuser',
name='user_name',
),
]
|
#!/usr/bin/env python
"""Extract individual letter images using Tesseract box files.
Usage:
./extract_box.py path/to/data.box path/to/data.png outputdir
"""
import errno
import os
import re
import sys
from PIL import Image
import box
def path_for_letter(output_dir, image_path, idx, letter):
image_base = re.sub(r'\..*', '', os.path.basename(image_path))
# The OS X file system is case insensitive.
# This makes sure that 'a' and 'A' get mapped to different directories, and
# that the file name is valid (i.e. doesn't have a slash in it).
safe_char = letter.replace(r'[^a-zA-Z0-9.,"\'\[\]\(\)]', '')
letter = '%03d-%s' % (ord(letter), safe_char)
return os.path.join(output_dir, letter), '%s.%s.png' % (image_base, idx)
# From http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else: raise
if __name__ == '__main__':
_, box_path, image_path, output_dir = sys.argv
boxes = box.load_box_file(box_path)
im = Image.open(image_path)
w, h = im.size
for idx, box in enumerate(boxes):
x1, x2 = box.left, box.right
y1, y2 = h - box.top, h - box.bottom
assert x2 > x1
assert y2 > y1
char_im = im.crop((x1, y1, x2, y2))
out_dir, out_file = path_for_letter(output_dir, image_path, idx, box.letter)
out_path = os.path.join(out_dir, out_file)
mkdir_p(out_dir)
char_im.save(out_path)
print 'Wrote %s' % out_path
|
from .base import set_plots, heatmap, add_watermark
|
from enum import Enum
class PortModeEnum(str, Enum):
POM_UNTAGGED = "POM_UNTAGGED"
POM_TAGGED_STATIC = "POM_TAGGED_STATIC"
POM_FORBIDDEN = "POM_FORBIDDEN"
|
import bs4, requests, sys, urllib, simplejson
def search(searchText, playlist=False):
results = []
query = requests.get("https://www.youtube.com/results?search_query=" + searchText).text
soup = bs4.BeautifulSoup(query, features="html.parser")
div = [d for d in soup.find_all('div') if d.has_attr('class') and 'yt-lockup-dismissable' in d['class']]
for d in div:
img0 = d.find_all('img')[0]
a0 = d.find_all('a')[0]
imgL = img0['src'] if not img0.has_attr('data-thumb') else img0['data-thumb']
a0 = [x for x in d.find_all('a') if x.has_attr('title')][0]
result = (imgL, 'http://www.youtube.com/'+a0['href'], a0['title'])
if '&list=' in result[1] and playlist == False:
pass
else:
results.append(result)
return results
def getPlaylist(url):
videos = []
listId = urllib.parse.parse_qs(urllib.parse.urlparse(url).query)['list'][0]
url = 'https://www.youtube.com/playlist?list=' + listId
html = requests.get(url).text
soup = bs4.BeautifulSoup(html, features='html.parser')
trs = soup.find_all('tr', attrs={'class': 'pl-video yt-uix-tile '})
for tr in trs:
link = 'https://www.youtube.com/watch?v=' + tr['data-video-id']
video = [tr['data-title'], link]
if video[0] != "[Deleted video]" and video[0] != "[Private video]":
videos.append(video)
#print(trs[0])
return videos
def getInfo(link):
query = requests.get(link).text
soup = bs4.BeautifulSoup(query, features="html.parser")
div = soup.find_all("title")[0].text
div = div.replace(" - YouTube", "")
return str(div)
|
import logging
logger = logging.getLogger(__name__)
def transform_subject(subject, loop_size):
value = 1
for i in range(loop_size):
value *= subject
value = value % 20201227
return value
def reverse_loop_size(subject, result):
loop_size = 2
value = subject
while True:
value *= subject
value = value % 20201227
if value == result:
return loop_size
loop_size += 1
def obtain_encryption_key(card_pk, door_pk):
card_loop_size = reverse_loop_size(subject=7, result=card_pk)
door_loop_size = reverse_loop_size(subject=7, result=door_pk)
encryption_a = transform_subject(subject=door_pk, loop_size=card_loop_size)
encryption_b = transform_subject(subject=card_pk, loop_size=door_loop_size)
assert encryption_a == encryption_b
return encryption_a
# def get_loop_size(key):
# i = 1
# subject = 1
# value = 1
# initial_value = 1
#
# while True:
# value = initial_value
# while True:
# value *= subject
# value = value % 20201227
#
# if value == key:
# return
#
# i += 1
|
#!/usr/bin/env python3
########################################################################################################################
##### INFORMATION ######################################################################################################
### @PROJECT_NAME: SPLAT: Speech Processing and Linguistic Analysis Tool ###
### @VERSION_NUMBER: ###
### @PROJECT_SITE: github.com/meyersbs/SPLAT ###
### @AUTHOR_NAME: Benjamin S. Meyers ###
### @CONTACT_EMAIL: ben@splat-library.org ###
### @LICENSE_TYPE: MIT ###
########################################################################################################################
########################################################################################################################
import subprocess
try:
import nltk
from nltk import pos_tag
except ImportError:
print("Oops! It looks like NLTK was not installed. Let's fix that.")
print("Installing NLTK...")
status = subprocess.call(["pip3", "install", "nltk"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if status == 0:
print("NLTK was successfully installed!")
else:
print("Hmm... I couldn't install NLTK for you. You probably don't have root privileges. I suggest running this command:\n\tsudo pip3 install nltk")
try:
from nltk.corpus import stopwords
except ImportError:
print("Oops! It looks like some essential NLTK data was not downloaded. Let's fix that.")
print("Downloading 'stopwords' from NLTK ...")
status = subprocess.call(["python3", "-m", "nltk.downloader", "stopwords"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if status == 0:
print("NLTK data 'stopwords' was successfully downloaded!")
else:
print("Hmm... I couldn't download the essential NLTK data for you. I suggest running this command:\n\tpython3"
"-m nltk.downloader stopwords")
try:
from nltk.corpus import names
except ImportError:
print("Oops! It looks like some essential NLTK data was not downloaded. Let's fix that.")
print("Downloading 'names' from NLTK ...")
status = subprocess.call(["python3", "-m", "nltk.downloader", "names"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if status == 0:
print("NLTK data 'names' was successfully downloaded!")
else:
print("Hmm... I couldn't download the essential NLTK data for you. I suggest running this command:\n\tpython3"
"-m nltk.downloader names")
try:
from nltk.corpus import cmudict
except ImportError:
print("Oops! It looks like some essential NLTK data was not downloaded. Let's fix that.")
print("Downloading 'cmudict' from NLTK ...")
status = subprocess.call(["python3", "-m", "nltk.downloader", "cmudict"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if status == 0:
print("NLTK data 'cmudict' was successfully downloaded!")
else:
print("Hmm... I couldn't download the essential NLTK data for you. I suggest running this command:\n\tpython3"
"-m nltk.downloader cmudict")
try:
from nltk.corpus import brown
except ImportError:
print("Oops! It looks like some essential NLTK data was not downloaded. Let's fix that.")
print("Downloading 'brown' from NLTK ...")
status = subprocess.call(["python3", "-m", "nltk.downloader", "brown"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if status == 0:
print("NLTK data 'brown' was successfully downloaded!")
else:
print("Hmm... I couldn't download the essential NLTK data for you. I suggest running this command:\n\tpython3"
"-m nltk.downloader brown")
try:
from nltk.tokenize import punkt
except ImportError:
print("Oops! It looks like some essential NLTK data was not downloaded. Let's fix that.")
print("Downloading 'punkt' from NLTK ...")
status = subprocess.call(["python3", "-m", "nltk.downloader", "punkt"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if status == 0:
print("NLTK data 'punkt' was successfully downloaded!")
else:
print("Hmm... I couldn't download the essential NLTK data for you. I suggest running this command:\n\tpython3"
"-m nltk.downloader punkt")
try:
#from nltk.corpus import averaged_perceptron_tagger
from nltk.tag import PerceptronTagger
except ImportError:
print("Oops! It looks like some essential NLTK data was not downloaded. Let's fix that.")
print("Downloading 'averaged_perceptron_tagger' from NLTK ...")
status = subprocess.call(["python3", "-m", "nltk.downloader", "averaged_perceptron_tagger"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if status == 0:
print("NLTK data 'averaged_perceptron_tagger' was successfully downloaded!")
else:
print("Hmm... I couldn't download the essential NLTK data for you. I suggest running this command:\n\tpython3"
"-m nltk.downloader averaged_perceptron_tagger")
try:
import matplotlib
except ImportError:
print("Oops! It looks like matplotlib was not installed. Let's fix that.")
print("Installing matplotlib...")
status = subprocess.call(["pip3", "install", "matplotlib"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if status == 0:
print("matplotlib was successfully installed!")
else:
print("Hmm... I couldn't install matplotlib for you. You probably don't have root privileges. I suggest running"
"this command:\n\tsudo pip3 install matplotlib")
java_status = subprocess.call(["which", "java"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if java_status != 0:
print("Java is not installed on your system. Java needs to be installed in order for me to do any part-of-speech"
"tagging.\n\nPlease install java and try again.")
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from ovsdbapp import api
class API(api.API, metaclass=abc.ABCMeta):
"""An API based off of the ovn-ic-nbctl CLI interface
This API basically mirrors the ovn-ic-nbctl operations with these changes:
1. Methods that create objects will return a read-only view of the object
2. Methods which list objects will return a list of read-only view objects
"""
@abc.abstractmethod
def ts_add(self, switch, may_exist=False, **columns):
"""Create a transit switch named 'switch'
:param switch: The name of the switch
:type switch: string or uuid.UUID
:param may_exist: If True, don't fail if the switch already exists
:type may_exist: boolean
:param columns: Additional columns to directly set on the switch
:returns: :class:`Command` with RowView result
"""
@abc.abstractmethod
def ts_del(self, switch, if_exists=False):
"""Delete transit switch 'switch' and all its ports
:param switch: The name or uuid of the switch
:type switch: string or uuid.UUID
:param if_exists: If True, don't fail if the switch doesn't exist
:type if_exists: boolean
:returns: :class:`Command` with no result
"""
@abc.abstractmethod
def ts_list(self):
"""Get all transit switches
:returns: :class:`Command` with RowView list result
"""
@abc.abstractmethod
def ts_get(self, switch):
"""Get transit switch for 'switch'
:returns: :class:`Command` with RowView result
"""
|
from meiga import Error
class GivenInputIsNotValidError(Error):
def __init__(self, message):
self.message = f"{self.__class__.__name__:s}: [{message:s}]"
|
import unittest
import json
import jsonasobj
from dict_compare import compare_dicts
from jsonasobj._jsonobj import as_json, as_dict
test_data = {
"@context": {
"name": "http://xmlns.com/foaf/0.1/name",
"knows": "http://xmlns.com/foaf/0.1/knows",
"menu": {
"@id": "name:foo",
"@type": "@id"
}
},
"@id": "http://me.markus-lanthaler.com/",
"name": "Markus Lanthaler",
"knows": [
{
"name": "Dave Longley",
"menu": "something",
"modelDate" : "01/01/2015"
}
]
}
test_json = str(test_data).replace("'", '"')
test_data_slim = {
"knows": [{"name": "Dave Longley"}]
}
test_json_slim = str(test_data_slim).replace("'", '"')
class ExampleTestCase(unittest.TestCase):
def test_example(self):
pyobj = jsonasobj.loads(str(test_json))
self.assertEqual('Markus Lanthaler', pyobj.name)
self.assertEqual(pyobj.name, pyobj['name'])
self.assertEqual('Dave Longley', pyobj.knows[0].name)
self.assertEqual('http://xmlns.com/foaf/0.1/name', pyobj['@context'].name)
self.assertEqual(json.loads(test_json), json.loads(pyobj._as_json))
self.assertEqual(json.loads(pyobj._as_json), json.loads(as_json(pyobj)))
self.assertTrue(compare_dicts(test_data, pyobj._as_dict))
self.assertTrue(compare_dicts(test_data, as_dict(pyobj)))
def test_example_slim(self):
""" Test a slimmed down version of example for inner list """
pyobj = jsonasobj.loads(test_json_slim)
self.assertEqual('Dave Longley', pyobj.knows[0].name)
if __name__ == '__main__':
unittest.main()
|
import os
get_abs_path = lambda p: os.path.abspath(os.path.join(os.path.dirname(__file__), p))
MIDI_DIR = get_abs_path("../data/raw/")
SEQUENCE_FILE = get_abs_path("../data/interim/notesequences.tfrecord")
OUTPUT_DIR = get_abs_path("../data/processed")
MODEL_DIR = get_abs_path("../models")
GENERATED_DIR = get_abs_path("../data/generated")
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from readability import Document
class NewsPipeline(object):
def process_item(self, item, spider):
return item
class MongoPipeline(object):
collection_name = 'articles'
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(mongo_uri = crawler.settings.get('MONGO_URI'), mongo_db = crawler.settings.get('MONGO_DATABASE', 'news'))
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
self.db[self.collection_name].insert_one(dict(item))
return item
class SanitizerPipeline(object):
"""
Sanitize document using Readability but this is not implemented fully
and not added to the pipeline because it simply suck. Keeps failing to
extract important elements and would still require the use of hand tuned spider selector
for certain fields. We stick to using "surgically precise" hand crafted extraction selectors
which produce way cleaner and usable outpus.
"""
def process_item(self, item, spider):
# item["body"] = Document(item["body"])
return item
|
"""Tests for interpreting and handling job configuration submissions."""
# NOTE: importing entire job_service us to modify module's global variables
from datetime import date
from json import dumps, load, loads
from pathlib import Path
from time import time
from typing import Union
from moto import mock_s3, mock_sqs
from boto3 import client
from lambda_services.job_service import job_service
import pytest
@pytest.fixture
def initialize_input_bucket():
"""Create an input bucket to perform test. Returns name of bucket"""
bucket_name = "pytest_input_bucket"
with mock_s3():
s3_client = client("s3")
s3_client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
"LocationConstraint": "us-west-2",
},
)
yield s3_client, bucket_name
@pytest.fixture
def initialize_output_bucket():
"""Create an output bucket to perform test. Returns name of bucket"""
bucket_name = "pytest_output_bucket"
with mock_s3():
s3_client = client("s3")
s3_client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
"LocationConstraint": "us-west-2",
},
)
yield s3_client, bucket_name
@pytest.fixture
def initialize_input_and_output_bucket():
"""
Create S3 input/output buckets to perform test.
Returns client and bucket names
"""
input_bucket_name = "pytest_input_bucket"
output_bucket_name = "pytest_output_bucket"
with mock_s3():
s3_client = client("s3")
s3_client.create_bucket(
Bucket=input_bucket_name,
CreateBucketConfiguration={
"LocationConstraint": "us-west-2",
},
)
s3_client.create_bucket(
Bucket=output_bucket_name,
CreateBucketConfiguration={
"LocationConstraint": "us-west-2",
},
)
yield s3_client, input_bucket_name, output_bucket_name
@pytest.fixture
def initialize_job_queue():
"""
Create an job queue queue to perform test.
Returns client and name of bucket
"""
queue_name = "pytest_sqs_job_queue"
region_name = "us-west-2"
with mock_sqs():
sqs_client = client("sqs", region_name=region_name)
sqs_client.create_queue(QueueName=queue_name)
yield sqs_client, queue_name, region_name
def upload_data(s3_client, bucket_name: str, object_name: str, data):
"""
Use S3 PUT to upload object data.
"""
s3_client.put_object(
Bucket=bucket_name,
Key=object_name,
Body=data,
)
def download_data(
s3_client, bucket_name: str, object_name: str
) -> Union[str, bytes]:
"""
Use S3 GET to download object data.
Returns the data in string or bytes.
"""
s3_resp: dict = s3_client.get_object(Bucket=bucket_name, Key=object_name)
return s3_resp["Body"].read()
def create_version_bucket_and_file(
bucket_name: str, region_name: str, version_key: str
):
s3_client = client("s3")
s3_client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
"LocationConstraint": region_name,
},
)
with open("tests/input_data/versions.json") as fin:
upload_data(s3_client, bucket_name, version_key, fin.read())
@pytest.fixture
def initialize_version_environment():
version_bucket = "pytest_version_bucket"
version_key = "info/versions.json"
region_name = "us-west-2"
with mock_s3():
create_version_bucket_and_file(
version_bucket, region_name, version_key
)
original_VERSION_BUCKET = job_service.VERSION_BUCKET
original_VERSION_KEY = job_service.VERSION_KEY
job_service.VERSION_BUCKET = version_bucket
job_service.VERSION_KEY = version_key
yield
# Reset state of environment variables
job_service.VERSION_BUCKET = original_VERSION_BUCKET
job_service.VERSION_KEY = original_VERSION_KEY
def test_get_job_info(initialize_input_bucket):
# Retrieve initialized AWS client and bucket name
s3_client, bucket_name = initialize_input_bucket
# Read sample input JSON file into dict
input_name = Path.cwd() / Path(
"tests/input_data/sample_web-pdb2pqr-job.json"
)
expected_pdb2pqr_job_info: dict
with open(input_name) as fin:
expected_pdb2pqr_job_info = load(fin)
# Upload json for job config file
object_name = "pytest/sample_web-pdb2pqr-job.json"
upload_data(
s3_client, bucket_name, object_name, dumps(expected_pdb2pqr_job_info)
)
# Download using get_job_info()
job_info: dict = job_service.get_job_info(
"2021-05-21/sampleId", bucket_name, object_name
)
# Verify output is dictionary and contents match input
# TODO: Eo300 - check if '==' comparison is sufficient
assert job_info == expected_pdb2pqr_job_info
def test_build_status_dict_valid_job(initialize_version_environment):
"""Test funciton for initial status creation for valid jobtypes"""
# Valid job
job_id = "sampleId"
job_tag = f"2021-05-21/{job_id}"
job_type = "apbs"
input_files = ["sampleId.in", "1fas.pqr"]
output_files = []
job_status = "pending"
status_dict: dict = job_service.build_status_dict(
job_id,
job_tag,
job_type,
job_status,
input_files,
output_files,
message=None,
)
assert "jobid" in status_dict
# assert "jobtag" in status_dict
assert "jobtype" in status_dict
assert job_type in status_dict
assert "status" in status_dict[job_type]
assert status_dict[job_type]["status"] == "pending"
assert status_dict[job_type]["endTime"] is None
assert isinstance(status_dict[job_type]["startTime"], float)
assert isinstance(status_dict[job_type]["inputFiles"], list)
assert isinstance(status_dict[job_type]["outputFiles"], list)
def test_build_status_dict_invalid_job(initialize_version_environment):
"""Test funciton for initial status creation for invalid jobtypes"""
# Invalid job
job_id = "sampleId"
job_tag = f"2021-05-21/{job_id}"
job_type = "nonsenseJobType"
input_files = None
output_files = None
job_status = "invalid"
invalid_message = "Invalid job type"
status_dict: dict = job_service.build_status_dict(
job_id,
job_tag,
job_type,
job_status,
input_files,
output_files,
message=invalid_message,
)
assert "status" in status_dict[job_type]
assert "message" in status_dict[job_type]
assert status_dict[job_type]["status"] == "invalid"
assert status_dict[job_type]["startTime"] is None
assert status_dict[job_type]["inputFiles"] is None
assert status_dict[job_type]["outputFiles"] is None
# assert status_dict[job_type]["subtasks"] == None
def test_upload_status_file(initialize_output_bucket):
# Retrieve initialized AWS client and bucket name
s3_client, bucket_name = initialize_output_bucket
# Retrieve original global variable names from module
original_OUTPUT_BUCKET = job_service.OUTPUT_BUCKET
# Create sample status dict
job_id = "sampleId"
job_type = "pdb2pqr"
current_date = date.today().isoformat()
sample_status: dict = {
"jobid": job_id,
"jobtype": job_type,
job_type: {
"status": "pending",
"startTime": time(),
"endTime": None,
"subtasks": [],
"inputFiles": [f"{current_date}/{job_id}/1fas.pdb"],
"outputFiles": [],
},
}
# Upload dict to S3 as JSON
status_objectname: str = f"{current_date}/{job_id}/{job_type}-status.json"
job_service.OUTPUT_BUCKET = bucket_name
job_service.upload_status_file(status_objectname, sample_status)
# Download JSON from S3, parse into dict
downloaded_object_data: str = loads(
download_data(s3_client, bucket_name, status_objectname)
)
# Compare downloaded dict with expected (sample dict)
assert downloaded_object_data == sample_status
# Reset module global variables to original state
job_service.OUTPUT_BUCKET = original_OUTPUT_BUCKET
def test_interpret_job_submission_invalid(
initialize_input_and_output_bucket, initialize_job_queue
):
# Retrieve initialized AWS client and bucket name
(
s3_client,
input_bucket_name,
output_bucket_name,
) = initialize_input_and_output_bucket
sqs_client, queue_name, region_name = initialize_job_queue
# Initialize version-related variables
version_bucket = "pytest_version_bucket"
version_key = "info/versions.json"
create_version_bucket_and_file(version_bucket, region_name, version_key)
original_VERSION_BUCKET = job_service.VERSION_BUCKET
original_VERSION_KEY = job_service.VERSION_KEY
# Retrieve original global variable names from module
original_OUTPUT_BUCKET = job_service.OUTPUT_BUCKET
original_SQS_QUEUE_NAME = job_service.SQS_QUEUE_NAME
original_JOB_QUEUE_REGION = job_service.JOB_QUEUE_REGION
# Initialize job variables
job_id = "sampleId"
job_type = "invalidJobType"
job_date = "2021-05-16"
# Upload JSON for invalid jobtype
input_name = Path.cwd() / Path("tests/input_data/invalid-job.json")
invalid_job_info: dict
with open(input_name) as fin:
invalid_job_info = load(fin)
job_object_name = f"{job_date}/{job_id}/{job_type}-sample-job.json"
upload_data(
s3_client, input_bucket_name, job_object_name, dumps(invalid_job_info)
)
# Setup dict with expected S3 trigger content
s3_event: dict
s3_event_filepath = Path.cwd() / Path(
"tests/input_data/invalid_job-s3_trigger.json"
)
with open(s3_event_filepath) as fin:
s3_event = load(fin)
# Set module globals and inTerpret invalid job trigger
job_service.SQS_QUEUE_NAME = queue_name
job_service.OUTPUT_BUCKET = output_bucket_name
job_service.JOB_QUEUE_REGION = region_name
job_service.VERSION_BUCKET = version_bucket
job_service.VERSION_KEY = version_key
job_service.interpret_job_submission(s3_event, None)
# Obtain SQS message
queue_url: str = sqs_client.get_queue_url(QueueName=queue_name)["QueueUrl"]
queue_message_response = sqs_client.receive_message(
QueueUrl=queue_url, MaxNumberOfMessages=1
)
"""Job type invalid: there should be no messages in queue"""
assert "Messages" not in queue_message_response
# Get status from output bucket
status_object_name = f"{job_date}/{job_id}/{job_type}-status.json"
status_object_data: dict = loads(
download_data(s3_client, output_bucket_name, status_object_name)
)
"""Check for expected values if invalid jobtype"""
assert status_object_data["jobid"] == "sampleId"
assert status_object_data["jobtype"] == job_type
assert job_type in status_object_data
assert "message" in status_object_data[job_type]
assert status_object_data[job_type]["status"] == "invalid"
assert status_object_data[job_type]["inputFiles"] is None
assert status_object_data[job_type]["outputFiles"] is None
assert status_object_data[job_type]["startTime"] is None
assert status_object_data[job_type]["endTime"] is None
# Reset module global variables to original state
job_service.SQS_QUEUE_NAME = original_SQS_QUEUE_NAME
job_service.OUTPUT_BUCKET = original_OUTPUT_BUCKET
job_service.JOB_QUEUE_REGION = original_JOB_QUEUE_REGION
job_service.VERSION_BUCKET = original_VERSION_BUCKET
job_service.VERSION_KEY = original_VERSION_KEY
def initialize_s3_and_sqs_clients(
input_bucket_name: str,
output_bucket_name: str,
queue_name: str,
region_name: str,
):
sqs_client = client("sqs", region_name=region_name)
sqs_client.create_queue(QueueName=queue_name)
s3_client = client("s3")
s3_client.create_bucket(
Bucket=input_bucket_name,
CreateBucketConfiguration={
"LocationConstraint": region_name,
},
)
s3_client.create_bucket(
Bucket=output_bucket_name,
CreateBucketConfiguration={
"LocationConstraint": region_name,
},
)
return s3_client, sqs_client
INPUT_JOB_LIST: list = []
EXPECTED_OUTPUT_LIST: list = []
with open(
Path.cwd() / Path("tests/input_data/test-job_service-input.json")
) as fin:
INPUT_JOB_LIST = load(fin)
with open(
Path.cwd() / Path("tests/expected_data/test-job_service-output.json")
) as fin:
EXPECTED_OUTPUT_LIST = load(fin)
@mock_s3
@mock_sqs
@pytest.mark.parametrize(
"apbs_test_job,expected_output",
list(zip(INPUT_JOB_LIST, EXPECTED_OUTPUT_LIST)),
)
def test_interpret_job_submission_success(
apbs_test_job: dict, expected_output: dict
):
s3_event: dict = apbs_test_job["trigger"]
job_info: dict = apbs_test_job["job"]
expected_sqs_message: dict = expected_output["sqs_message"]
expected_status: dict = expected_output["initial_status"]
input_bucket_name = "pytest_input_bucket"
output_bucket_name = "pytest_output_bucket"
version_bucket_name = "pytest_version_bucket"
version_object_key = "info/versions.json"
queue_name = "pytest_sqs_job_queue"
region_name = "us-west-2"
job_tag: str = expected_sqs_message["job_tag"]
job_type: str = expected_sqs_message["job_type"]
s3_client, sqs_client = initialize_s3_and_sqs_clients(
input_bucket_name, output_bucket_name, queue_name, region_name
)
create_version_bucket_and_file(
version_bucket_name, region_name, version_object_key
)
# Retrieve original global variable names from module
original_OUTPUT_BUCKET = job_service.OUTPUT_BUCKET
original_SQS_QUEUE_NAME = job_service.SQS_QUEUE_NAME
original_JOB_QUEUE_REGION = job_service.JOB_QUEUE_REGION
original_VERSION_BUCKET = job_service.VERSION_BUCKET
original_VERSION_KEY = job_service.VERSION_KEY
# Upload job JSON to input bucket
job_object_name: str = s3_event["Records"][0]["s3"]["object"]["key"]
upload_data(
s3_client,
input_bucket_name,
job_object_name,
dumps(job_info),
)
# Upload additional input data to input bucket
if "upload" in apbs_test_job:
for file_name in apbs_test_job["upload"]["input"]:
file_contents: str = open(
Path.cwd() / Path(f"tests/input_data/{file_name}")
).read()
upload_data(
s3_client,
input_bucket_name,
f"{job_tag}/{file_name}",
file_contents,
)
for file_name in apbs_test_job["upload"]["output"]:
file_contents: str = open(
Path.cwd() / Path(f"tests/input_data/{file_name}")
).read()
upload_data(
s3_client,
output_bucket_name,
f"{job_tag}/{file_name}",
file_contents,
)
# Set module globals and interpret PDB2PQR job trigger
job_service.SQS_QUEUE_NAME = queue_name
job_service.OUTPUT_BUCKET = output_bucket_name
job_service.JOB_QUEUE_REGION = region_name
job_service.VERSION_BUCKET = version_bucket_name
job_service.VERSION_KEY = version_object_key
job_service.interpret_job_submission(s3_event, None)
# Obtain message from SQS and status from S3
queue_url: str = sqs_client.get_queue_url(QueueName=queue_name)["QueueUrl"]
queue_message_response = sqs_client.receive_message(
QueueUrl=queue_url, MaxNumberOfMessages=1
)
# TODO: adjust assertion to handle invalid cases
assert "Messages" in queue_message_response
queue_message = queue_message_response["Messages"][0]
message_contents: dict = loads(queue_message["Body"])
message_receipt_handle = queue_message["ReceiptHandle"]
"""Compare queue contents with expected"""
assert message_contents == expected_sqs_message
# job_id: str = expected_sqs_message["job_id"]
# job_date: str = expected_sqs_message["job_date"]
status_object_name: str = f"{job_tag}/{job_type}-status.json"
status_object_data: dict = loads(
download_data(s3_client, output_bucket_name, status_object_name)
)
"""Check that status contains expected values"""
assert status_object_data["jobid"] == expected_status["jobid"]
assert status_object_data["jobtype"] == expected_status["jobtype"]
assert status_object_data["metadata"] == expected_status["metadata"]
assert job_type in status_object_data
assert status_object_data[job_type]["status"] == "pending"
assert (
status_object_data[job_type]["inputFiles"]
== expected_status[job_type]["inputFiles"]
)
assert (
status_object_data[job_type]["outputFiles"]
== expected_status[job_type]["outputFiles"]
)
# Checking type here since startTime is determined at runtime
assert isinstance(status_object_data[job_type]["startTime"], float)
assert status_object_data[job_type]["endTime"] is None
# Delete message from SQS queue
sqs_client.delete_message(
QueueUrl=queue_url, ReceiptHandle=message_receipt_handle
)
# Reset module global variables to original state
job_service.SQS_QUEUE_NAME = original_SQS_QUEUE_NAME
job_service.OUTPUT_BUCKET = original_OUTPUT_BUCKET
job_service.JOB_QUEUE_REGION = original_JOB_QUEUE_REGION
job_service.VERSION_BUCKET = original_VERSION_BUCKET
job_service.VERSION_KEY = original_VERSION_KEY
|
class HapiError(ValueError):
"""Any problems get thrown as HapiError exceptions with the relevant info inside"""
def __init__(self, result, request):
super(HapiError,self).__init__(result.reason)
self.result = result
self.request = request
def __str__(self):
return "\n---- request ----\n%s %s%s [timeout=%s]\n\n---- body ----\n%s\n\n---- headers ----\n%s\n\n---- result ----\n%s %s\n\n---- body ----\n%s\n\n---- headers ----\n%s" % (
self.request['method'], self.request['host'], self.request['url'], self.request['timeout'],
self.request['data'],
self.request['headers'],
self.result.status, self.result.reason,
self.result.body,
self.result.msg)
def __unicode__(self):
return self.__str__()
|
from xylophone import resources
from xylophone.stages.base import StageBase, ArgumentParser, register_stage
@register_stage("words-dict")
class WordsDict(StageBase):
_parser = ArgumentParser(
usage="xylophone -s words-dict [--obj-add OBJECTS_ADDITIONAL] [--adj-add ADJECTIVES_ADDITIONAL] "
"[--verb-add VERBS_ADDITIONAL] [--obj_source OBJECTS_SOURCE] [--adj_source ADJECTIVES_SOURCE] "
"[--verb_source VERBS_SOURCE]",
description="Words dictionary source.",
add_help=False)
_parser.add_argument("--obj_add", dest="objects_additional", default="")
_parser.add_argument("--adj_add", dest="adjectives_additional", default="")
_parser.add_argument("--verb_add", dest="verbs_additional", default="")
_parser.add_argument("--obj_source", dest="objects_source", default="default/objects.txt")
_parser.add_argument("--adj_source", dest="adjectives_source", default="default/adjectives.txt")
_parser.add_argument("--verb_source", dest="verbs_source", default="default/verbs.txt")
def __init__(self, *args: [str]):
self._args, self._unknown_args = self._parser.parse_known_args(args)
def process(self) -> [str]:
objects = resources.read(self._args.objects_source) + " " + self._args.objects_additional
adjectives = resources.read(self._args.adjectives_source) + " " + self._args.adjectives_additional
verbs = resources.read(self._args.verbs_source) + " " + self._args.verbs_additional
return [*self._unknown_args, "--obj", objects, "--adj", adjectives, "--verb", verbs]
@staticmethod
def help() -> str:
return WordsDict._parser.format_help()
|
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Predicting Android build Performance using Machine Learning',
author='Dheeraj Bajaj',
license='MIT',
)
|
DEBUG = True
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "test",
"USER": "postgres",
"PASSWORD": "",
"HOST": "",
"PORT": "",
}
}
INSTALLED_APPS = (
'banner',
'banner.tests',
'django_comments',
'jmbo',
'layers',
'link',
'photologue',
'category',
'likes',
'secretballot',
'preferences',
'sites_groups',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sites',
'sortedm2m',
)
ROOT_URLCONF = 'banner.tests.urls'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'likes.middleware.SecretBallotUserIpUseragentMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.template.context_processors.request",
"django.contrib.messages.context_processors.messages",
)
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": TEMPLATE_CONTEXT_PROCESSORS,
},
},
]
USE_TZ = True
SITE_ID = 1
STATIC_URL = '/static/'
SECRET_KEY = "SECRET_KEY"
|
from dipsim import multiframe, util
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.patches as patches
import os; import time; start = time.time(); print('Running...')
import matplotlib.gridspec as gridspec
# Main input parameters
col_labels = ['Geometry\n (NA = 0.6, $\\beta=80{}^{\circ}$)', 'Uncertainty Ellipses', r'$\sigma_{\Omega}$ [sr]', 'Median$\{\sigma_{\Omega}\}$ [sr]', 'MAD$\{\sigma_{\Omega}\}$ [sr]', '', '']
fig_labels = ['a)', 'b)', 'c)', 'd)', 'e)', 'f)', 'g)']
n_pts = 5000 #Points on sphere
n_pts_sphere = 50000 # Points on sphere
n_grid_pts = 21
n_line_pts = 50
n_rows, n_cols = 1, len(col_labels)
inch_fig = 5
dpi = 300
# Setup figure and axes
fig = plt.figure(figsize=(2.2*inch_fig, 3*inch_fig))
gs0 = gridspec.GridSpec(3, 1, wspace=0, hspace=0.2, height_ratios=[0.9,1,1])
gs_up = gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=gs0[0], width_ratios=[1, 1, 1, 0.06], wspace=0.1)
gs_middle = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs0[1], width_ratios=[1, 1], wspace=0.4)
gs_down = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs0[2], width_ratios=[1, 1], wspace=0.4)
gs_middle_left = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs_middle[0], width_ratios=[1, 0.05], wspace=0.1)
gs_middle_right = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs_middle[1], width_ratios=[1, 0.05], wspace=0.1)
gs_down_left = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs_down[0], width_ratios=[1, 0.05], wspace=0.1)
gs_down_right = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs_down[1], width_ratios=[1, 0.05], wspace=0.1)
ax0 = plt.subplot(gs_up[0])
ax1 = plt.subplot(gs_up[1])
ax2 = plt.subplot(gs_up[2])
cax2 = plt.subplot(gs_up[3])
ax3 = plt.subplot(gs_middle_left[0])
cax3 = plt.subplot(gs_middle_left[1])
ax4 = plt.subplot(gs_middle_right[0])
cax4 = plt.subplot(gs_middle_right[1])
ax5 = plt.subplot(gs_down_left[0])
cax5 = plt.subplot(gs_down_left[1]); cax5.axis('off');
ax6 = plt.subplot(gs_down_right[0])
cax6 = plt.subplot(gs_down_right[1]); cax6.axis('off');
for ax, col_label, fig_label in zip([ax0, ax1, ax2, ax3, ax4, ax5, ax6], col_labels, fig_labels):
ax.annotate(col_label, xy=(0,0), xytext=(0.5, 1.06), textcoords='axes fraction',
va='center', ha='center', fontsize=14, annotation_clip=False)
ax.annotate(fig_label, xy=(0,0), xytext=(0, 1.06), textcoords='axes fraction',
va='center', ha='center', fontsize=14, annotation_clip=False)
for ax in [ax0, ax1, ax2, ax3, ax4, ax5, ax6]:
ax.tick_params(axis='both', labelsize=14)
for cax in [cax2, cax3, cax4]:
cax.tick_params(axis='both', labelsize=14)
# Calculate a list of points to sample in region
n = 1.33
NA_max = n*np.sin(np.pi/4)
NA = np.linspace(0, n, 1000)
lens_bound = np.rad2deg(2*np.arcsin(NA/n))
cover_bound = np.rad2deg(np.pi - 2*np.arcsin(NA/n))
pts = np.mgrid[n/n_grid_pts/2:n:n_grid_pts*1j,0:180:n_grid_pts*1j].reshape((2, n_grid_pts**2)).T.tolist()
def is_feasible(pt):
if pt[1] < np.rad2deg(np.pi - 2*np.arcsin(pt[0]/n)) + 20 and pt[1] > np.rad2deg(2*np.arcsin(pt[0]/n)) - 20 and pt[0] < NA_max + 0.1:
return True
else:
return False
pts_list = [pt for pt in pts if is_feasible(pt)]
pts = np.array(pts_list).T
# Calculate med and mad for each point
def calc_stats(param):
na = param[0]
angle = param[1]
exp = multiframe.MultiFrameMicroscope(ill_thetas=[np.deg2rad(angle/2), -np.deg2rad(angle/2)], det_thetas=[-np.deg2rad(angle/2), np.deg2rad(angle/2)],
ill_nas=2*[na], det_nas=2*[na],
ill_types=2*['wide'], det_types=2*['lens'],
colors=['(1,0,0)', '(0,0,1)'], n_frames=4,
n_pts=n_pts, max_photons=500, n_samp=1.33)
exp.calc_estimation_stats()
data = exp.sa_uncert
med = np.median(data)
return med, np.median(np.abs(data - med))
med = []
mad = []
for i, pt in enumerate(pts.T):
print('Calculating microscope '+str(i+1)+'/'+str(pts.shape[1]))
x = calc_stats(pt)
med.append(x[0])
mad.append(x[1])
# Plot 2D regions
def plot_2d_regions(ax, cax, pts, data, special_pt=(-1,-1),
line_pt0=None, line_pt1=None):
ax.plot(NA, lens_bound, 'k-', zorder=11)
ax.plot(NA, cover_bound, 'k-', zorder=11)
# Set y ticks
from matplotlib.ticker import FuncFormatter, FixedLocator
def degrees(x, pos):
return str(int(x)) + '${}^{\circ}$'
ax.yaxis.set_major_locator(FixedLocator([0, 45, 90, 135, 180]))
ax.yaxis.set_major_formatter(FuncFormatter(degrees))
from matplotlib.ticker import FuncFormatter, FixedLocator
ax.set_xticks([0, 0.25, 0.5, 0.75, 1.0, 1.33])
ax.set_xticklabels(['0', '0.25', '0.5', '0.75', '1.0', '1.33'])
# Annotation
def my_annotate(ax, annotation, xy, fontsize=9, rotation=0):
ax.annotate(annotation, xy=(0,0), xytext=xy, textcoords='axes fraction',
va='center', ha='center', fontsize=fontsize,
annotation_clip=False, rotation=rotation, zorder=13)
my_annotate(ax, 'NA', (0.5, -0.12), fontsize=14)
my_annotate(ax, '$\\beta$, Angle Between Objectives', (-0.25, 0.5), fontsize=14, rotation=90)
my_annotate(ax, 'Objectives collide\nwith cover slip', (0.65, 0.85), fontsize=14)
my_annotate(ax, 'Objectives collide\nwith each other', (0.65, 0.15), fontsize=14)
my_annotate(ax, 'Feasible', (0.3, 0.5), fontsize=14)
# Calculate colors
color_map='coolwarm'
color_norm='log'
color_min=1e-4
color_max=1e1
if color_norm == 'linear':
norm = matplotlib.colors.Normalize(vmin=color_min, vmax=color_max)
elif color_norm == 'log':
norm = matplotlib.colors.LogNorm(vmin=color_min, vmax=color_max)
elif color_norm == 'linlog':
norm = matplotlib.colors.SymLogNorm(linthresh=linthresh, vmin=-color_max, vmax=color_max)
elif color_norm == 'power':
norm = matplotlib.colors.PowerNorm(gamma=gamma, vmin=data.min(), vmax=data.max())
norm_data = norm(data).data
norm_data2 = np.expand_dims(norm_data, 1)
cmap = matplotlib.cm.get_cmap(color_map)
colors = np.apply_along_axis(cmap, 1, norm_data2)
# Plot scatter for colorbar
sc = ax.scatter(pts[0,:], pts[1,:], c=data, s=0, cmap=cmap, norm=norm,
marker='s', lw=0)
ax.plot([line_pt0[0], line_pt1[0]], [line_pt0[1], line_pt1[1]], '-', color='darkmagenta', lw=3, zorder=1)
ax.plot(special_pt[0], special_pt[1], 'kx', markersize=5)
# Plot patches
width = n/(n_grid_pts)
for i, (pt, c) in enumerate(zip(pts_list, colors)):
if pt[1] == 0:
height = 180/14.5
if pt[1] == 0:
height = 180/(n_grid_pts-0.5)
ax.add_patch(patches.Rectangle((pt[0] - width/2, pt[1] - height/2), width, height, facecolor=c, edgecolor=c))
fig.colorbar(sc, cax=cax, orientation='vertical')
# Mask around lines
ax.fill_between(NA, lens_bound, 0, color='white', zorder=2)
ax.fill_between(NA, cover_bound, 180, color='white', zorder=2)
ax.set(xlim=[0, 1.33], ylim=[0, 180])
# Plot 1D region
def plot_1d_regions(ax, pts, data, special_pt=(-1,-1), y_pos=None, y_lim=None, xtitle=None):
# Set y ticks
from matplotlib.ticker import FuncFormatter, FixedLocator
def degrees(x, pos):
return str(int(x)) + '${}^{\circ}$'
ax.xaxis.set_major_locator(FixedLocator([53, 90, 135, 127]))
ax.xaxis.set_major_formatter(FuncFormatter(degrees))
from matplotlib.ticker import FuncFormatter, FixedLocator
ax.set_yticks(y_pos)
ax.set_yticklabels(["{:.1e}".format(x).replace('e-0', 'e-') for x in y_pos])
# Annotation
def my_annotate(ax, annotation, xy, fontsize=9, rotation=0):
ax.annotate(annotation, xy=(0,0), xytext=xy, textcoords='axes fraction',
va='center', ha='center', fontsize=fontsize,
annotation_clip=False, rotation=rotation, zorder=13)
my_annotate(ax, '$\\beta$, Angle Between Objectives', (0.5, -0.12), fontsize=14)
my_annotate(ax, xtitle, (-0.25, 0.5), fontsize=14, rotation=90)
ax.set(xlim=[53, 127], ylim=y_lim)
ax.plot(pts, data, '-', color='darkmagenta', lw=3, zorder=1)
# Plot first two columns
angle = 80
na = 0.6
exp = multiframe.MultiFrameMicroscope(ill_thetas=[np.deg2rad(angle/2), -np.deg2rad(angle/2)], det_thetas=[-np.deg2rad(angle/2), np.deg2rad(angle/2)],
ill_nas=2*[na], det_nas=2*[na],
ill_types=2*['wide'], det_types=2*['lens'],
colors=['(1,0,0)', '(0,0,1)'], n_frames=4,
n_pts=n_pts_sphere, max_photons=500, n_samp=1.33)
exp.calc_estimation_stats()
# Make scene string
scene_string = exp.scene_string()
line_string = "draw(O--expi(theta, 0));\n"
line_string = line_string.replace('theta', str(np.deg2rad(angle/2)))
scene_string += line_string
line_string = "draw(O--expi(theta, 0));\n"
line_string = line_string.replace('theta', str(np.deg2rad(-angle/2)))
scene_string += line_string
arc_string = 'draw(L=Label("$\\beta$", align=N), arc(O, 0.1*expi(-theta, 0), 0.1*expi(theta, 0), normal=Y));\n'
arc_string = arc_string.replace('theta', str(np.deg2rad(angle/2)))
scene_string += arc_string
util.draw_scene(scene_string, my_ax=ax0, dpi=dpi)
util.draw_scene(exp.ellipse_string(n_pts=250), my_ax=ax1, dpi=dpi)
util.plot_sphere(directions=exp.directions, data=exp.sa_uncert,
color_norm='log', linthresh=1e-4,
color_min=1e-4, color_max=1e1,
my_ax=ax2, my_cax=cax2)
# Find profile points
line_na = 0.6
min_beta = np.rad2deg(2*np.arcsin(line_na/n))
max_beta = 180 - np.rad2deg(2*np.arcsin(line_na/n))
# Plots last two columns
plot_2d_regions(ax3, cax3, pts, med, special_pt=(na, angle), line_pt0=(line_na, min_beta), line_pt1=(line_na, max_beta))
plot_2d_regions(ax4, cax4, pts, mad, special_pt=(na, angle), line_pt0=(line_na, min_beta), line_pt1=(line_na, max_beta))
# Calculate and plot profile
line_beta = np.linspace(min_beta, max_beta, n_line_pts)
line_na = 0.6*np.ones(line_beta.shape)
line_pts = np.vstack([line_na, line_beta])
line_med = []
line_mad = []
for i, pt in enumerate(line_pts.T):
print('Calculating microscope '+str(i+1)+'/'+str(line_pts.shape[1]))
x = calc_stats(pt)
line_med.append(x[0])
line_mad.append(x[1])
plot_1d_regions(ax5, line_beta, line_med, special_pt=angle, y_pos=[4.5e-3, 5e-3, 5.5e-3], y_lim=[4.4e-3, 5.6e-3], xtitle='Median$\{\sigma_{\Omega}\}$ [sr]')
plot_1d_regions(ax6, line_beta, line_mad, special_pt=angle, y_pos=[1e-3, 1.5e-3, 2e-3], y_lim=[8e-4, 2e-3], xtitle='MAD$\{\sigma_{\Omega}\}$ [sr]')
# Label axes and save
print('Saving final figure.')
fig.savefig('../paper/symmetric-widefield.pdf', dpi=250)
print('Total time: '+str(np.round(time.time() - start, 2)))
os.system('say "done"')
|
#!/usr/bin/env python
'''
An example of a tagging service using NER suite.
'''
from argparse import ArgumentParser
from os.path import join as path_join
from os.path import dirname
try:
from json import dumps
except ImportError:
# likely old Python; try to fall back on ujson in brat distrib
from sys import path as sys_path
sys_path.append(path_join(dirname(__file__), '../server/lib/ujson'))
from ujson import dumps
from subprocess import PIPE, Popen
from random import choice, randint
from sys import stderr
from urlparse import urlparse
try:
from urlparse import parse_qs
except ImportError:
# old Python again?
from cgi import parse_qs
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import re
# use the brat sentence splitter
from sentencesplit import sentencebreaks_to_newlines
# and use this hack for converting BIO to standoff
from BIOtoStandoff import BIO_lines_to_standoff
### Constants
DOCUMENT_BOUNDARY = 'END-DOCUMENT'
NERSUITE_SCRIPT = path_join(dirname(__file__), './nersuite_tag.sh')
NERSUITE_COMMAND = [NERSUITE_SCRIPT, '-multidoc', DOCUMENT_BOUNDARY]
ARGPARSER = ArgumentParser(description='An example HTTP tagging service using NERsuite')
ARGPARSER.add_argument('-p', '--port', type=int, default=47111,
help='port to run the HTTP service on (default: 47111)')
###
### Globals
tagger_process = None
def run_tagger(cmd):
# runs the tagger identified by the given command.
global tagger_process
try:
tagger_process = Popen(cmd, stdin=PIPE, stdout=PIPE, bufsize=1)
except Exception, e:
print >> stderr, "Error running '%s':" % cmd, e
raise
def _apply_tagger(text):
global tagger_process, tagger_queue
# the tagger expects a sentence per line, so do basic splitting
try:
splittext = sentencebreaks_to_newlines(text)
except:
# if anything goes wrong, just go with the
# original text instead
print >> stderr, "Warning: sentence splitting failed for input:\n'%s'" % text
splittext = text
print >> tagger_process.stdin, splittext
print >> tagger_process.stdin, DOCUMENT_BOUNDARY
tagger_process.stdin.flush()
response_lines = []
while True:
l = tagger_process.stdout.readline()
l = l.rstrip('\n')
if l == DOCUMENT_BOUNDARY:
break
response_lines.append(l)
try:
tagged_entities = BIO_lines_to_standoff(response_lines, text)
except:
# if anything goes wrong, bail out
print >> stderr, "Warning: BIO-to-standoff conversion failed for BIO:\n'%s'" % '\n'.join(response_lines)
return {}
anns = {}
for t in tagged_entities:
anns["T%d" % t.idNum] = {
'type': t.eType,
'offsets': ((t.startOff, t.endOff), ),
'texts': (t.eText, ),
}
return anns
class NERsuiteTaggerHandler(BaseHTTPRequestHandler):
def do_GET(self):
# Get our query
query = parse_qs(urlparse(self.path).query)
try:
json_dic = _apply_tagger(query['text'][0])
except KeyError:
# We weren't given any text to tag, such is life, return nothing
json_dic = {}
# Write the response
self.send_response(200)
self.send_header('Content-type', 'application/json; charset=utf-8')
self.end_headers()
self.wfile.write(dumps(json_dic))
print >> stderr, ('Generated %d annotations' % len(json_dic))
def log_message(self, format, *args):
return # Too much noise from the default implementation
def main(args):
argp = ARGPARSER.parse_args(args[1:])
print >> stderr, 'Starting NERsuite ...'
run_tagger(NERSUITE_COMMAND)
server_class = HTTPServer
httpd = server_class(('localhost', argp.port), NERsuiteTaggerHandler)
print >> stderr, 'NERsuite tagger service started'
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print >> stderr, 'NERsuite tagger service stopped'
if __name__ == '__main__':
from sys import argv
exit(main(argv))
|
import sys
import numpy as np
import schwimmbad
import multiprocessing
import logging
import time
import fitsio
from mdetsims.metacal import (
MetacalPlusMOF,
MetacalTrueDetect,
MetacalSepDetect)
from mdetsims.run_utils import (
estimate_m_and_c, cut_nones,
measure_shear_metadetect,
measure_shear_metacal_plus_mof)
from metadetect.metadetect import Metadetect
from config import CONFIG
from run_preamble import get_shear_meas_config
(SWAP12, CUT_INTERP, DO_METACAL_MOF, DO_METACAL_SEP,
DO_METACAL_TRUEDETECT,
SHEAR_MEAS_CONFIG, SIM_CLASS) = get_shear_meas_config()
# process CLI arguments
n_sims = int(sys.argv[1])
# logging
if n_sims == 1:
for lib in [__name__, 'ngmix', 'metadetect', 'mdetsims']:
lgr = logging.getLogger(lib)
hdr = logging.StreamHandler(sys.stdout)
hdr.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
lgr.setLevel(logging.DEBUG)
lgr.addHandler(hdr)
LOGGER = logging.getLogger(__name__)
START = time.time()
# deal with MPI
try:
if n_sims > 1:
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
n_ranks = comm.Get_size()
HAVE_MPI = True
else:
raise Exception() # punt to the except clause
except Exception:
n_ranks = 1
rank = 0
comm = None
HAVE_MPI = False
if HAVE_MPI and n_ranks > 1:
n_workers = n_ranks if n_sims > 1 else 1
else:
n_workers = multiprocessing.cpu_count() if n_sims > 1 else 1
USE_MPI = HAVE_MPI and n_ranks > 1
# code to do computation
if DO_METACAL_MOF or DO_METACAL_TRUEDETECT or DO_METACAL_SEP:
def _meas_shear(res):
return measure_shear_metacal_plus_mof(
res, s2n_cut=10, t_ratio_cut=0.5)
else:
def _meas_shear(res):
return measure_shear_metadetect(
res, s2n_cut=10, t_ratio_cut=1.2, cut_interp=CUT_INTERP)
def _add_shears(cfg, plus=True):
g1 = 0.02
g2 = 0.0
if not plus:
g1 *= -1
if SWAP12:
g1, g2 = g2, g1
cfg.update({'g1': g1, 'g2': g2})
def _run_sim(seed):
config = {}
config.update(SHEAR_MEAS_CONFIG)
try:
# pos shear
rng = np.random.RandomState(seed=seed + 1000000)
_add_shears(CONFIG, plus=True)
if SWAP12:
assert CONFIG['g1'] == 0.0
assert CONFIG['g2'] == 0.02
else:
assert CONFIG['g1'] == 0.02
assert CONFIG['g2'] == 0.0
sim = SIM_CLASS(rng=rng, **CONFIG)
if DO_METACAL_MOF:
mbobs = sim.get_mbobs()
md = MetacalPlusMOF(config, mbobs, rng)
md.go()
elif DO_METACAL_SEP:
mbobs = sim.get_mbobs()
md = MetacalSepDetect(config, mbobs, rng)
md.go()
elif DO_METACAL_TRUEDETECT:
mbobs, tcat = sim.get_mbobs(return_truth_cat=True)
md = MetacalTrueDetect(config, mbobs, rng, tcat)
md.go()
else:
mbobs = sim.get_mbobs()
md = Metadetect(config, mbobs, rng)
md.go()
pres = _meas_shear(md.result)
dens = len(md.result['noshear']) / sim.area_sqr_arcmin
LOGGER.info('found %f objects per square arcminute', dens)
# neg shear
rng = np.random.RandomState(seed=seed + 1000000)
_add_shears(CONFIG, plus=False)
if SWAP12:
assert CONFIG['g1'] == 0.0
assert CONFIG['g2'] == -0.02
else:
assert CONFIG['g1'] == -0.02
assert CONFIG['g2'] == 0.0
sim = SIM_CLASS(rng=rng, **CONFIG)
if DO_METACAL_MOF:
mbobs = sim.get_mbobs()
md = MetacalPlusMOF(config, mbobs, rng)
md.go()
elif DO_METACAL_SEP:
mbobs = sim.get_mbobs()
md = MetacalSepDetect(config, mbobs, rng)
md.go()
elif DO_METACAL_TRUEDETECT:
mbobs, tcat = sim.get_mbobs(return_truth_cat=True)
md = MetacalTrueDetect(config, mbobs, rng, tcat)
md.go()
else:
mbobs = sim.get_mbobs()
md = Metadetect(config, mbobs, rng)
md.go()
mres = _meas_shear(md.result)
dens = len(md.result['noshear']) / sim.area_sqr_arcmin
LOGGER.info('found %f objects per square arcminute', dens)
retvals = (pres, mres)
except Exception as e:
print(repr(e))
retvals = (None, None)
if USE_MPI and seed % 1000 == 0:
print(
"[% 10ds] %04d: %d" % (time.time() - START, rank, seed),
flush=True)
return retvals
if rank == 0:
if DO_METACAL_MOF:
print('running metacal+MOF', flush=True)
elif DO_METACAL_SEP:
print('running metacal+SEP', flush=True)
elif DO_METACAL_TRUEDETECT:
print('running metacal+true detection', flush=True)
else:
print('running metadetect', flush=True)
print('config:', CONFIG, flush=True)
print('swap 12:', SWAP12)
print('use mpi:', USE_MPI, flush=True)
print("n_ranks:", n_ranks, flush=True)
print("n_workers:", n_workers, flush=True)
if n_workers == 1:
outputs = [_run_sim(0)]
else:
if not USE_MPI:
pool = schwimmbad.JoblibPool(
n_workers, backend='multiprocessing', verbose=100)
else:
pool = schwimmbad.choose_pool(mpi=USE_MPI, processes=n_workers)
outputs = pool.map(_run_sim, range(n_sims))
pool.close()
pres, mres = zip(*outputs)
pres, mres = cut_nones(pres, mres)
if rank == 0:
dt = [('g1p', 'f8'), ('g1m', 'f8'), ('g1', 'f8'),
('g2p', 'f8'), ('g2m', 'f8'), ('g2', 'f8')]
dplus = np.array(pres, dtype=dt)
dminus = np.array(mres, dtype=dt)
with fitsio.FITS('data.fits', 'rw') as fits:
fits.write(dplus, extname='plus')
fits.write(dminus, extname='minus')
m, msd, c, csd = estimate_m_and_c(pres, mres, 0.02, swap12=SWAP12)
print("""\
# of sims: {n_sims}
noise cancel m : {m:f} +/- {msd:f}
noise cancel c : {c:f} +/- {csd:f}""".format(
n_sims=len(pres),
m=m,
msd=msd,
c=c,
csd=csd), flush=True)
|
total = 0
for i in range(2,101,2):
total += i
print(total)
|
# Generated by Django 3.0.3 on 2020-03-20 15:26
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_core', '0111_remove_project_duration_months'),
]
operations = [
migrations.AddField(
model_name='historicalphysicalperson',
name='orcid_id',
field=models.CharField(help_text='Orcid ID', max_length=19, null=True, validators=[django.core.validators.RegexValidator(code='Invalid format', message='Format orcid ID is 0000-0000-0000-0000', regex='^[0-9]{4}-[0-9]{4}-[0-9]{4}-[0-9]{4}$')]),
),
migrations.AddField(
model_name='physicalperson',
name='orcid_id',
field=models.CharField(help_text='Orcid ID', max_length=19, null=True, validators=[django.core.validators.RegexValidator(code='Invalid format', message='Format orcid ID is 0000-0000-0000-0000', regex='^[0-9]{4}-[0-9]{4}-[0-9]{4}-[0-9]{4}$')]),
),
]
|
import numpy as np
from scipy import signal, ndimage
from skimage.feature import blob_dog, blob_log
from skimage.exposure import rescale_intensity
from hexrd import convolution
from hexrd.constants import fwhm_to_sigma
# =============================================================================
# BACKGROUND REMOVAL
# =============================================================================
def _scale_image_snip(y, offset, invert=False):
"""
Log-Log scale image for snip
Parameters
----------
y : TYPE
DESCRIPTION.
offset : TYPE
DESCRIPTION.
invert : TYPE, optional
DESCRIPTION. The default is False.
Returns
-------
TYPE
DESCRIPTION.
Notes
-----
offset should be <= min of the original image
"""
if invert:
return (np.exp(np.exp(y) - 1.) - 1.)**2 + offset
else:
return np.log(np.log(np.sqrt(y - offset) + 1.) + 1.)
def fast_snip1d(y, w=4, numiter=2):
"""
"""
bkg = np.zeros_like(y)
min_val = np.nanmin(y)
zfull = _scale_image_snip(y, min_val, invert=False)
for k, z in enumerate(zfull):
b = z
for i in range(numiter):
for p in range(w, 0, -1):
kernel = np.zeros(p*2 + 1)
kernel[0] = 0.5
kernel[-1] = 0.5
b = np.minimum(b, signal.fftconvolve(z, kernel, mode='same'))
z = b
bkg[k, :] = _scale_image_snip(b, min_val, invert=True)
return bkg
def snip1d(y, w=4, numiter=2, threshold=None):
"""
Return SNIP-estimated baseline-background for given spectrum y.
!!!: threshold values get marked as NaN in convolution
!!!: mask in astropy's convolve is True for masked; set to NaN
"""
# scale input
bkg = np.zeros_like(y)
min_val = np.nanmin(y)
zfull = _scale_image_snip(y, min_val, invert=False)
# handle mask
if threshold is not None:
mask = y <= threshold
else:
mask = np.zeros_like(y, dtype=bool)
# step through rows
for k, z in enumerate(zfull):
if np.all(mask[k]):
bkg[k, :] = np.nan
else:
b = z
for i in range(numiter):
for p in range(w, 0, -1):
kernel = np.zeros(p*2 + 1)
kernel[0] = kernel[-1] = 1./2.
b = np.minimum(
b,
convolution.convolve(
z, kernel, boundary='extend', mask=mask[k],
nan_treatment='interpolate', preserve_nan=True
)
)
z = b
bkg[k, :] = _scale_image_snip(b, min_val, invert=True)
nan_idx = np.isnan(bkg)
bkg[nan_idx] = threshold
return bkg
def snip1d_quad(y, w=4, numiter=2):
"""Return SNIP-estimated baseline-background for given spectrum y.
Adds a quadratic kernel convolution in parallel with the linear kernel."""
min_val = np.nanmin(y)
kernels = []
for p in range(w, 1, -2):
N = p * 2 + 1
# linear kernel
kern1 = np.zeros(N)
kern1[0] = kern1[-1] = 1./2.
# quadratic kernel
kern2 = np.zeros(N)
kern2[0] = kern2[-1] = -1./6.
kern2[int(p/2.)] = kern2[int(3.*p/2.)] = 4./6.
kernels.append([kern1, kern2])
z = b = _scale_image_snip(y, min_val, invert=False)
for i in range(numiter):
for (kern1, kern2) in kernels:
c = np.maximum(ndimage.convolve1d(z, kern1, mode='nearest'),
ndimage.convolve1d(z, kern2, mode='nearest'))
b = np.minimum(b, c)
z = b
return _scale_image_snip(b, min_val, invert=True)
def snip2d(y, w=4, numiter=2, order=1):
"""
Return estimate of 2D-array background by "clipping" peak-like structures.
2D adaptation of the peak-clipping component of the SNIP algorithm.
Parameters
----------
y : 2-D input array
w : integer (default 4)
kernel size (maximum kernel extent actually = 2 * w * order + 1)
numiter : integer (default 2)
number of iterations
order : integer (default 1)
maximum order of filter kernel, either 1 (linear) or 2 (quadratic)
Returns
-------
out : 2-D array with SNIP-estimated background of y
References!!!
-----
[1] C.G. Ryan et al, "SNIP, A statistics-sensitive background treatment
for the quantitative analysis of PIXE spectra in geoscience
applications," Nucl. Instr. and Meth. B 34, 396 (1988).
[2] M. Morhac et al., "Background elimination methods for multidimensional
coincidence gamma-ray spectra," Nucl. Instr. and Meth. A 401, 113
(1997).
"""
maximum, minimum = np.fmax, np.fmin
min_val = np.nanmin(y)
# create list of kernels
kernels = []
for p in range(w, 0, -1): # decrement window starting from w
N = 2 * p * order + 1 # size of filter kernels
p1 = order * p
# linear filter kernel
kern1 = np.zeros((N, N)) # initialize a kernel with all zeros
xx, yy = np.indices(kern1.shape) # x-y indices of kernel points
ij = np.round(
np.hypot(xx - p1, yy - p1)
) == p1 # select circular shape
kern1[ij] = 1 / ij.sum() # normalize so sum of kernel elements is 1
kernels.append([kern1])
if order >= 2: # add quadratic filter kernel
p2 = p1 // 2
kern2 = np.zeros_like(kern1)
radii, norms = (p2, 2 * p2), (4/3, -1/3)
for radius, norm in zip(radii, norms):
ij = np.round(np.hypot(xx - p1, yy - p1)) == radius
kern2[ij] = norm / ij.sum()
kernels[-1].append(kern2)
# convolve kernels with input array (in log space)
z = b = _scale_image_snip(y, min_val, invert=False)
for i in range(numiter):
for kk in kernels:
if order > 1:
c = maximum(ndimage.convolve(z, kk[0], mode='nearest'),
ndimage.convolve(z, kk[1], mode='nearest'))
else:
c = ndimage.convolve(z, kk[0], mode='nearest')
b = minimum(b, c)
z = b
return _scale_image_snip(b, min_val, invert=True)
# =============================================================================
# FEATURE DETECTION
# =============================================================================
def find_peaks_2d(img, method, method_kwargs):
if method == 'label':
# labeling mask
structureNDI_label = ndimage.generate_binary_structure(2, 1)
# First apply filter if specified
filter_fwhm = method_kwargs['filter_radius']
if filter_fwhm:
filt_stdev = fwhm_to_sigma * filter_fwhm
img = -ndimage.filters.gaussian_laplace(
img, filt_stdev
)
labels_t, numSpots_t = ndimage.label(
img > method_kwargs['threshold'],
structureNDI_label
)
coms_t = np.atleast_2d(
ndimage.center_of_mass(
img,
labels=labels_t,
index=np.arange(1, np.amax(labels_t) + 1)
)
)
elif method in ['blob_log', 'blob_dog']:
# must scale map
# TODO: we should so a parameter study here
scl_map = rescale_intensity(img, out_range=(-1, 1))
# TODO: Currently the method kwargs must be explicitly specified
# in the config, and there are no checks
# for 'blob_log': min_sigma=0.5, max_sigma=5,
# num_sigma=10, threshold=0.01, overlap=0.1
# for 'blob_dog': min_sigma=0.5, max_sigma=5,
# sigma_ratio=1.6, threshold=0.01, overlap=0.1
if method == 'blob_log':
blobs = np.atleast_2d(
blob_log(scl_map, **method_kwargs)
)
else: # blob_dog
blobs = np.atleast_2d(
blob_dog(scl_map, **method_kwargs)
)
numSpots_t = len(blobs)
coms_t = blobs[:, :2]
return numSpots_t, coms_t
|
# Generated by Django 3.2.7 on 2021-09-18 13:04
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pokecards', '0003_alter_cards_start_date'),
]
operations = [
migrations.AlterField(
model_name='cards',
name='start_date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2021, 9, 18, 10, 4, 17, 100011)),
),
]
|
from django.contrib.gis.db import models
class SamplingMethod(models.Model):
"""Sampling method model definition"""
sampling_method = models.CharField(
max_length=200,
null=False,
blank=False
)
effort_measure = models.CharField(
max_length=300,
null=True,
blank=True
)
normalisation_factor = models.IntegerField(
null=True,
blank=True
)
factor_description = models.CharField(
max_length=300,
null=True,
blank=True
)
comment = models.TextField(
null=True,
blank=True
)
def __unicode__(self):
return self.sampling_method
|
import discord
import configparser
import asyncio
import os
client = discord.Client()
@client.event
async def on_ready():
a = configparser.ConfigParser()
print(client.user.id)
print("ready")
game = discord.Game("p!help")
await client.change_presence(status=discord.Status.online, activity=game)
@client.event
async def on_message(message):
if message.content.startswith("p!help"):
embed = discord.Embed(title="Funny Discord Bot", description="made by Dohyun Kim", color=0xadfffe)
embed.set_author(name="Bot Commands")
embed.set_thumbnail(
url="https://theme.zdassets.com/theme_assets/678183/af1a442f9a25a27837f17805b1c0cfa4d1725f90.png")
embed.add_field(name="p!help", value="Get help (you're using it lol)", inline=False)
embed.add_field(name="p!info", value="Show bot's information", inline=False)
embed.add_field(name="p!invite", value="Invite me!", inline=False)
embed.add_field(name="p!support", value="Get support", inline=False)
embed.add_field(name="p!donate", value="Donate to bot developer.", inline=False)
embed.add_field(name="p!ban [@Member] [Reason(optional)]", value="Bans user (Fake)", inline=False)
embed.add_field(name="p!kick [@Member] [Reason(optional)]", value="Kick user (Fake)", inline=False)
embed.add_field(name="p!mute [@Member] [Reason(optional)]", value="Mute user (Fake)", inline=False)
embed.add_field(name="p!warn [@Member] [Reason(optional)]", value="Warn user (Fake)", inline=False)
embed.add_field(name="p!hack [@Member]", value="Hack user's Discord account (Fake)", inline=False)
embed.add_field(name="p!kill [@Member]", value="Kill user (Fake)", inline=False)
embed.add_field(name="p!get-nitro", value="Get Discord Nitro! (Maybe real)", inline=False)
embed.add_field(name="p!get-real-nitro", value="Get REAL Discord Nitro!", inline=False)
await message.channel.send(embed=embed)
if message.content.startswith("p!info"):
embed = discord.Embed(title="Funny Bot#3396", color=0x99ffe6)
embed.set_author(name="Information")
embed.add_field(name="Developer", value="Dohyun Kim", inline=True)
embed.add_field(name="Logo Creator", value="Dohyun Kim", inline=True)
embed.add_field(name="Creation Date", value="August 28 2020", inline=True)
embed.add_field(name="Prefix", value="`p!`", inline=True)
embed.add_field(name="Bot Commands", value="`p!help` for bot commands.", inline=True)
embed.add_field(name="Donate", value="`p!donate` to donate", inline=True)
embed.add_field(name="Invite", value="`p!invite` to invite me!", inline=True)
embed.add_field(name="Image Source", value="Dohyun Kim, Google, Pixabay, and Discord", inline=True)
await message.channel.send(embed=embed)
if message.content.startswith("p!invite"):
embed = discord.Embed(title="Invite Me!",
url="https://discord.com/oauth2/authorize?client_id=748731937081786459&scope=bot&permissions=201812992",
color=0xffe66b)
embed.set_author(name="Funny Discord Bot Invite")
embed.set_footer(text="Be careful! I can ban you anytime! LOL")
await message.channel.send(embed=embed)
if message.content.startswith("p!support"):
embed = discord.Embed(title="Join this server: https://discord.gg/xbn8hzM", color=0x8095ff)
embed.set_author(name="Get Bot Support")
embed.set_thumbnail(url="https://blogfiles.pstatic.net/MjAyMDA4MjhfMjM4/MDAxNTk4NTg1MTcwNDU3.tw9dy7KDcWJ02-g50DeSeGN57JCS5zoxmVhuxT2bWSkg.ibbzJQ2xtgOQ3_EuUGI3gP3poeDHbFHzbNIVF_Td6Mog.JPEG.dohyun854/8biticon_512.jpg?type=w1")
embed.set_footer(text="No spam DMing!!")
await message.channel.send(embed=embed)
if message.content.startswith("p!donate"):
embed = discord.Embed(title="Donate to Bot Developer", url="https://www.patreon.com/Codeman_IT", color=0xceff85)
embed.set_author(name="Donate")
embed.set_thumbnail(
url="https://blogfiles.pstatic.net/MjAyMDA4MjhfMjM4/MDAxNTk4NTg1MTcwNDU3.tw9dy7KDcWJ02-g50DeSeGN57JCS5zoxmVhuxT2bWSkg.ibbzJQ2xtgOQ3_EuUGI3gP3poeDHbFHzbNIVF_Td6Mog.JPEG.dohyun854/8biticon_512.jpg?type=w1")
embed.set_footer(text="Join developer's fan server too! Link: https://discord.gg/xbn8hzM")
await message.channel.send(embed=embed)
if message.content.startswith("p!ban"):
if message.content[28:] == "":
author = message.guild.get_member(int(message.content[9:27]))
embed = discord.Embed(title="Your punishment has been updated.", color=0xff0000)
embed.set_author(name="Moderation")
embed.add_field(name="Action", value="Ban", inline=True)
embed.add_field(name="Reason", value="No Reason", inline=True)
embed.add_field(name="Duration", value="Permanent", inline=True)
await author.send(embed=embed)
embed = discord.Embed(title="**Banned**", color=0xff0000)
embed.set_author(name="Moderation Log")
embed.add_field(name="User", value=author, inline=True)
embed.add_field(name="Action by", value="YOU", inline=True)
embed.add_field(name="Reason", value="No Reason", inline=True)
embed.add_field(name="Duration", value="Permanent", inline=True)
await message.channel.send(embed=embed)
else:
author = message.guild.get_member(int(message.content[9:27]))
reason = message.content[28:]
embed = discord.Embed(title="Your punishment has been updated.", color=0xff0000)
embed.set_author(name="Moderation")
embed.add_field(name="Action", value="Ban", inline=True)
embed.add_field(name="Reason", value=reason, inline=True)
embed.add_field(name="Duration", value="Permanent", inline=True)
await author.send(embed=embed)
embed = discord.Embed(title="**Banned**", color=0xff0000)
embed.set_author(name="Moderation Log")
embed.add_field(name="User", value=author, inline=True)
embed.add_field(name="Action by", value="YOU", inline=True)
embed.add_field(name="Reason", value=reason, inline=True)
embed.add_field(name="Duration", value="Permanent", inline=True)
await message.channel.send(embed=embed)
if message.content.startswith("p!kick"):
if message.content[29:] == "":
author = message.guild.get_member(int(message.content[10:28]))
embed = discord.Embed(title="Your punishment has been updated.", color=0xffa500)
embed.set_author(name="Moderation")
embed.add_field(name="Action", value="Kick", inline=True)
embed.add_field(name="Reason", value="No Reason", inline=True)
await author.send(embed=embed)
embed = discord.Embed(title="**Kicked**", color=0xffa500)
embed.set_author(name="Moderation Log")
embed.add_field(name="User", value=author, inline=True)
embed.add_field(name="Action by", value="YOU", inline=True)
embed.add_field(name="Reason", value="No Reason", inline=True)
await message.channel.send(embed=embed)
else:
author = message.guild.get_member(int(message.content[10:28]))
reason = message.content[29:]
embed = discord.Embed(title="Your punishment has been updated.", color=0xffa500)
embed.set_author(name="Moderation")
embed.add_field(name="Action", value="Kick", inline=True)
embed.add_field(name="Reason", value=reason, inline=True)
await author.send(embed=embed)
embed = discord.Embed(title="**Kicked**", color=0xffa500)
embed.set_author(name="Moderation Log")
embed.add_field(name="User", value=author, inline=True)
embed.add_field(name="Action by", value="YOU", inline=True)
embed.add_field(name="Reason", value=reason, inline=True)
await message.channel.send(embed=embed)
if message.content.startswith("p!mute"):
if message.content[29:] == "":
author = message.guild.get_member(int(message.content[10:28]))
embed = discord.Embed(title="Your punishment has been updated.", color=0xff0000)
embed.set_author(name="Moderation")
embed.add_field(name="Action", value="Mute", inline=True)
embed.add_field(name="Reason", value="No Reason", inline=True)
embed.add_field(name="Duration", value="Permanent", inline=True)
await author.send(embed=embed)
embed = discord.Embed(title="**Muted**", color=0xff0000)
embed.set_author(name="Moderation Log")
embed.add_field(name="User", value=author, inline=True)
embed.add_field(name="Action by", value="YOU", inline=True)
embed.add_field(name="Reason", value="No Reason", inline=True)
embed.add_field(name="Muted until", value="Permanent", inline=True)
await message.channel.send(embed=embed)
else:
author = message.guild.get_member(int(message.content[10:28]))
reason = message.content[29:]
embed = discord.Embed(title="Your punishment has been updated.", color=0xff0000)
embed.set_author(name="Moderation")
embed.add_field(name="Action", value="Mute", inline=True)
embed.add_field(name="Reason", value=reason, inline=True)
embed.add_field(name="Duration", value="Permanent", inline=True)
await author.send(embed=embed)
embed = discord.Embed(title="**Muted**", color=0xff0000)
embed.set_author(name="Moderation Log")
embed.add_field(name="User", value=author, inline=True)
embed.add_field(name="Action by", value="YOU", inline=True)
embed.add_field(name="Reason", value=reason, inline=True)
embed.add_field(name="Muted until", value="Permanent", inline=True)
await message.channel.send(embed=embed)
if message.content.startswith("p!warn"):
if message.content[29:] == "":
author = message.guild.get_member(int(message.content[10:28]))
embed = discord.Embed(title="Your punishment has been updated.", color=0xffee00)
embed.set_author(name="Moderation")
embed.add_field(name="Action", value="Warn", inline=True)
embed.add_field(name="Reason", value="No Reason", inline=True)
await author.send(embed=embed)
embed = discord.Embed(title="**Warned**", color=0xffee00)
embed.set_author(name="Moderation Log")
embed.add_field(name="User", value=author, inline=True)
embed.add_field(name="Action by", value="YOU", inline=True)
embed.add_field(name="Reason", value="No Reason", inline=True)
await message.channel.send(embed=embed)
else:
author = message.guild.get_member(int(message.content[10:28]))
reason = message.content[29:]
embed = discord.Embed(title="Your punishment has been updated.", color=0xffee00)
embed.set_author(name="Moderation")
embed.add_field(name="Action", value="Warn", inline=True)
embed.add_field(name="Reason", value=reason, inline=True)
await author.send(embed=embed)
embed = discord.Embed(title="**Warned**", color=0xffee00)
embed.set_author(name="Moderation Log")
embed.add_field(name="User", value=author, inline=True)
embed.add_field(name="Action by", value="YOU", inline=True)
embed.add_field(name="Reason", value=reason, inline=True)
await message.channel.send(embed=embed)
if message.content.startswith("p!hack"):
author = message.guild.get_member(int(message.content[10:28]))
embed = discord.Embed(title="Finding out IP address......", color=0xffee00)
embed.set_author(name="Hacking......")
msg = await message.channel.send(embed=embed)
await asyncio.sleep(3,0)
embed = discord.Embed(title="Finding out email address......", color=0xffee00)
embed.set_author(name="Hacking......")
await msg.edit(embed=embed)
await asyncio.sleep(3,0)
embed = discord.Embed(title="Finding out password......", color=0xffee00)
embed.set_author(name="Hacking......")
await msg.edit(embed=embed)
await asyncio.sleep(3,0)
embed = discord.Embed(title="Logging in......", color=0xffee00)
embed.set_author(name="Hacking......")
await msg.edit(embed=embed)
await asyncio.sleep(3,0)
embed = discord.Embed(title="Stealing private information......", color=0xffee00)
embed.set_author(name="Hacking......")
await msg.edit(embed=embed)
await asyncio.sleep(3,0)
embed = discord.Embed(title="Selling information to a government......", color=0xffee00)
embed.set_author(name="Hacking......")
await msg.edit(embed=embed)
await asyncio.sleep(3,0)
embed = discord.Embed(title="Virus injecting......", color=0xffee00)
embed.set_author(name="Hacking......")
await msg.edit(embed=embed)
await asyncio.sleep(3,0)
embed = discord.Embed(title="Hacking complete! Bot earned $98.99", color=0xffee00)
embed.set_author(name="Hacking Complete")
await msg.edit(embed=embed)
embed = discord.Embed(title="You lost $99.98", color=0x00a313)
embed.set_author(name="Account Hacked!!")
embed.set_thumbnail(url="https://cdn.pixabay.com/photo/2017/10/24/07/12/hacker-2883632_1280.jpg")
embed.add_field(name="Hacker", value="Funny Bot#3396", inline=True)
embed.add_field(name="Hack Requester", value="[Private]", inline=True)
embed.add_field(name="Damages",
value="- Private information(includes IP address and password) leaked\n- $99.98 stolen\n- Virus injected to your devices.",
inline=True)
embed.set_footer(text="Automatically called for police.")
await author.send(embed=embed)
if message.content.startswith("p!kill"):
author = message.guild.get_member(int(message.content[10:28]))
embed = discord.Embed(title="How dare you murder our member!!", color=0x9694ff)
embed.set_author(name="Killed!!")
embed.set_thumbnail(url="https://c0.wallpaperflare.com/preview/82/63/732/shooting-killing-murder-crime.jpg")
embed.add_field(name="Killer", value="YOU", inline=True)
embed.add_field(name="Killed Member", value=author, inline=True)
embed.add_field(name="Weapon Used", value="Gun", inline=True)
embed.set_footer(text="Called Police Automatically")
await message.channel.send(embed=embed)
embed = discord.Embed(title="🤪 • • • • • 🔫", color=0x9694ff)
embed.set_author(name="You are MURDERED!")
embed.set_thumbnail(url="https://c0.wallpaperflare.com/preview/82/63/732/shooting-killing-murder-crime.jpg")
embed.add_field(name="Body Condition", value="DEAD", inline=True)
embed.add_field(name="Murderer", value="[Private]", inline=True)
embed.add_field(name="Target", value="YOU", inline=True)
embed.add_field(name="Weapon Used", value="Gun", inline=True)
embed.set_footer(text="Bot called for police automatically.")
await author.send(embed=embed)
if message.content.startswith("p!get-nitro"):
await message.channel.send("https://discord.gift/KEKEKEKEKEKEKEKEKEKEKEKEKEKEKEKEKEKEKEK")
embed = discord.Embed(title="KEKEKEKEK", color=0xffb433)
embed.set_author(name="NO Discord Nitro For You!!")
embed.set_thumbnail(url="https://i.redd.it/mvoen8wq3w831.png")
embed.add_field(name="Why are you not giving me Discord Nitro?", value="Because, you're too DUM.", inline=True)
embed.add_field(name="Can I have Discord Nitro for free?", value="No, never. kkkk", inline=True)
embed.add_field(name="I want Discord Nitro, how can I get it?", value="Buy it, you stupid.", inline=True)
embed.set_footer(text="Bruh, get Discord Nitro legitimately!!")
await message.channel.send(embed=embed)
if message.content.startswith("p!get-real-nitro"):
embed = discord.Embed(title="Get Discord Nitro for FREE!!", url="https://discord.gift/fVAfX9NzdtKVZmgQ",
description="Legitimately Gifting", color=0x8d5cff)
embed.set_author(name="Discord Nitro Gift Here!!")
embed.set_thumbnail(url="https://discord.com/assets/b941bc1dfe379db6cc1f2acc5a612f41.png")
embed.set_footer(text="Yo, this time real Nitro")
await message.channel.send(embed=embed)
client.run(access_token)
access_token = os.environ["BOT_TOKEN"]
|
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.parameter import Parameter
# adapted from https://colab.research.google.com/github/bayesgroup/deepbayes-2019/blob/master/seminars/day6/SparseVD-solution.ipynb
class LinearSVDO(nn.Module):
def __init__(self, in_features, out_features, threshold):
super(LinearSVDO, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.threshold = threshold
self.W = Parameter(torch.Tensor(out_features, in_features))
self.log_alpha = Parameter(torch.Tensor(out_features, in_features))
self.bias = Parameter(torch.Tensor(1, out_features))
self.reset_parameters()
def reset_parameters(self):
self.bias.data.zero_()
self.W.data.normal_(0, 0.02)
self.log_alpha.data.fill_(-5)
def forward(self, x):
self.log_sigma = self.log_alpha / 2.0 + torch.log(1e-16 + torch.abs(self.W))
self.log_sigma = torch.clamp(self.log_sigma, -10, 10)
if self.training:
lrt_mean = F.linear(x, self.W) + self.bias
lrt_std = torch.sqrt(F.linear(x * x, torch.exp(self.log_sigma * 2.0)) + 1e-8)
eps = lrt_std.data.new(lrt_std.size()).normal_()
return lrt_mean + lrt_std * eps
return F.linear(x, self.W * (self.log_alpha < self.threshold).float()) + self.bias
def sparsity(self):
return torch.sum(self.log_alpha > self.threshold).item(), torch.numel(self.log_alpha)
def kl_reg(self):
# Return KL here -- a scalar
k1, k2, k3 = torch.Tensor([0.63576]).cuda(), torch.Tensor([1.8732]).cuda(), torch.Tensor([1.48695]).cuda()
kl = k1 * torch.sigmoid(k2 + k3 * self.log_alpha) - 0.5 * torch.log1p(torch.exp(-self.log_alpha))
a = - torch.sum(kl)
return a
class LinearSVDOAdditiveReparam(nn.Module):
def __init__(self, in_features, out_features, threshold):
super(LinearSVDOAdditiveReparam, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.threshold = threshold
self.W = Parameter(torch.Tensor(out_features, in_features))
self.log_sigma = Parameter(torch.Tensor(out_features, in_features))
self.bias = Parameter(torch.Tensor(1, out_features))
self.reset_parameters()
def reset_parameters(self):
self.bias.data.zero_()
self.W.data.normal_(0, 0.02)
self.log_sigma.data.fill_(-5)
def forward(self, x):
self.log_alpha = self.log_sigma * 2.0 - 2.0 * torch.log(1e-16 + torch.abs(self.W))
self.log_alpha = torch.clamp(self.log_alpha, -10, 10)
if self.training:
lrt_mean = F.linear(x, self.W) + self.bias
lrt_std = torch.sqrt(F.linear(x * x, torch.exp(self.log_sigma * 2.0)) + 1e-8)
eps = lrt_std.data.new(lrt_std.size()).normal_()
return lrt_mean + lrt_std * eps
return F.linear(x, self.W * (self.log_alpha < self.threshold).float()) + self.bias
def sparsity(self):
return torch.sum(self.log_alpha > self.threshold).item(), torch.numel(self.log_alpha)
def kl_reg(self):
# Return KL here -- a scalar
k1, k2, k3 = torch.Tensor([0.63576]).cuda(), torch.Tensor([1.8732]).cuda(), torch.Tensor([1.48695]).cuda()
kl = k1 * torch.sigmoid(k2 + k3 * self.log_alpha) - 0.5 * torch.log1p(torch.exp(-self.log_alpha))
a = - torch.sum(kl)
return a
class LinearSVDONoReparam(nn.Module):
def __init__(self, in_features, out_features, threshold):
super(LinearSVDONoReparam, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.threshold = threshold
self.W = Parameter(torch.Tensor(out_features, in_features))
self.log_alpha = Parameter(torch.Tensor(out_features, in_features))
self.bias = Parameter(torch.Tensor(1, out_features))
self.reset_parameters()
def reset_parameters(self):
self.bias.data.zero_()
self.W.data.normal_(0, 0.02)
self.log_alpha.data.fill_(-5)
def forward(self, x):
if self.training:
mask = 1 + torch.randn_like(self.log_alpha) * torch.exp(self.log_alpha / 2)
return F.linear(x, self.W * mask) + self.bias
return F.linear(x, self.W * (self.log_alpha < self.threshold).float()) + self.bias
def sparsity(self):
return torch.sum(self.log_alpha > self.threshold).item(), torch.numel(self.log_alpha)
def kl_reg(self):
# Return KL here -- a scalar
k1, k2, k3 = torch.Tensor([0.63576]).cuda(), torch.Tensor([1.8732]).cuda(), torch.Tensor([1.48695]).cuda()
kl = k1 * torch.sigmoid(k2 + k3 * self.log_alpha) - 0.5 * torch.log1p(torch.exp(-self.log_alpha))
a = - torch.sum(kl)
return a
class VarDropNet(nn.Module):
def __init__(self, threshold, reparam='additive', dims=None, lamda=None, tol=None):
super(VarDropNet, self).__init__()
if reparam == 'additive':
linear_svdo = LinearSVDOAdditiveReparam
elif reparam == 'local':
linear_svdo = LinearSVDO
else:
linear_svdo = LinearSVDONoReparam
self.fc1 = linear_svdo(28*28, 300, threshold)
self.fc2 = linear_svdo(300, 100, threshold)
self.fc3 = linear_svdo(100, 10, threshold)
self.threshold=threshold
def forward(self, x):
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.log_softmax(self.fc3(x), dim=1)
return x
def sparsity(self):
return [fc.sparsity() for fc in [self.fc1, self.fc2, self.fc3]]
# Define New Loss Function -- SGVLB
class SGVLB(nn.Module):
def __init__(self):
super(SGVLB, self).__init__()
# modified to set via callback
def set_params(self, net, train_size):
self.train_size = train_size
self.net = net
def forward(self, input, target, kl_weight=1.0):
assert not target.requires_grad
kl = 0.0
for module in self.net.children():
if hasattr(module, 'kl_reg'):
kl = kl + module.kl_reg()
return F.cross_entropy(input, target) * self.train_size + kl_weight * kl
|
import shutil
from pathlib import Path
from uuid import uuid4
import pytest
from panimg.models import ImageType, PanImgFile
from panimg.post_processors.tiff_to_dzi import tiff_to_dzi
from tests import RESOURCE_PATH
def test_dzi_creation(tmpdir_factory):
filename = "valid_tiff.tif"
temp_file = Path(tmpdir_factory.mktemp("temp") / filename)
shutil.copy(RESOURCE_PATH / filename, temp_file)
image_file = PanImgFile(
image_id=uuid4(), image_type=ImageType.TIFF, file=temp_file
)
result = tiff_to_dzi(image_files={image_file})
assert len(result.new_image_files) == 1
new_file = result.new_image_files.pop()
assert new_file.image_id == image_file.image_id
assert new_file.image_type == ImageType.DZI
assert (
new_file.file == image_file.file.parent / f"{image_file.image_id}.dzi"
)
assert len(result.new_folders) == 1
new_folder = result.new_folders.pop()
assert new_folder.image_id == image_file.image_id
assert (
new_folder.folder
== image_file.file.parent / f"{image_file.image_id}_files"
)
assert len(list((new_folder.folder).rglob("*.jpeg"))) == 9
def test_no_exception_when_failed(tmpdir_factory, caplog):
filename = "no_dzi.tif"
temp_file = Path(tmpdir_factory.mktemp("temp") / filename)
shutil.copy(RESOURCE_PATH / filename, temp_file)
image_file = PanImgFile(
image_id=uuid4(), image_type=ImageType.TIFF, file=temp_file
)
result = tiff_to_dzi(image_files={image_file})
assert len(result.new_image_files) == 0
assert len(result.new_folders) == 0
# The last warning should be from our logger
last_log = caplog.records[-1]
assert last_log.name == "panimg.post_processors.tiff_to_dzi"
assert last_log.levelname == "WARNING"
assert "Could not create DZI for" in last_log.message
@pytest.mark.parametrize("image_type", (ImageType.DZI, ImageType.MHD))
def test_non_tiff_skipped(tmpdir_factory, image_type):
filename = "valid_tiff.tif"
temp_file = Path(tmpdir_factory.mktemp("temp") / filename)
shutil.copy(RESOURCE_PATH / filename, temp_file)
image_file = PanImgFile(
image_id=uuid4(), image_type=image_type, file=temp_file
)
result = tiff_to_dzi(image_files={image_file})
assert len(result.new_image_files) == 0
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import json
import unittest
from dashboard import bisect_report
from dashboard.common import testing_common
from dashboard.models import try_job
_SAMPLE_BISECT_RESULTS_JSON = json.loads("""
{
"issue_url": "https://test-rietveld.appspot.com/200039",
"aborted_reason": null,
"bad_revision": "",
"bisect_bot": "staging_android_nexus5X_perf_bisect",
"bug_id": 12345,
"buildbot_log_url": "http://build.chromium.org/513",
"change": "7.35%",
"command": "src/tools/perf/run_benchmark foo",
"culprit_data": null,
"good_revision": "",
"metric": "Total/Score",
"culprit_data": null,
"revision_data": [],
"secondary_regressions": [],
"status": "completed",
"test_type": "perf",
"try_job_id": 123456,
"warnings": []
}
""")
_SAMPLE_BISECT_REVISION_JSON = json.loads("""
{
"build_id": null,
"commit_hash": "",
"depot_name": "chromium",
"failed": false,
"failure_reason": null,
"n_observations": 0,
"result": "unknown",
"revision_string": ""
}
""")
_SAMPLE_BISECT_CULPRIT_JSON = json.loads("""
{
"author": "author",
"cl": "cl",
"cl_date": "Thu Dec 08 01:25:35 2016",
"commit_info": "commit_info",
"email": "email",
"revisions_links": [],
"subject": "subject"
}
""")
_ABORTED_NO_VALUES = ('Bisect cannot identify a culprit: No values were found '\
'while testing the reference range.')
_ABORTED_NO_OUTPUT = ('Bisect cannot identify a culprit: Testing the \"good\" '\
'revision failed: Test runs failed to produce output.')
class BisectReportTest(testing_common.TestCase):
def setUp(self):
super(BisectReportTest, self).setUp()
def _AddTryJob(self, results_data, **kwargs):
job = try_job.TryJob(results_data=results_data, **kwargs)
job.put()
return job
def _Revisions(self, revisions):
revision_data = []
for r in revisions:
data = copy.deepcopy(_SAMPLE_BISECT_REVISION_JSON)
data['commit_hash'] = r['commit']
data['failed'] = r.get('failed', False)
data['failure_reason'] = r.get('failure_reason', None)
data['n_observations'] = r.get('num', 0)
data['revision_string'] = r['commit']
data['result'] = r.get('result', 'unknown')
if 'mean' in r:
data['mean_value'] = r.get('mean', 0)
data['std_dev'] = r.get('std_dev', 0)
data['depot_name'] = r.get('depot_name', 'chromium')
revision_data.append(data)
return revision_data
def _Culprit(self, **kwargs):
culprit = copy.deepcopy(_SAMPLE_BISECT_CULPRIT_JSON)
culprit.update(kwargs)
return culprit
def _BisectResults(self, **kwargs):
results = copy.deepcopy(_SAMPLE_BISECT_RESULTS_JSON)
results.update(kwargs)
return results
def testGetReport_CompletedWithCulprit(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 102, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 103, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
culprit_data=self._Culprit(cl=102),
good_revision=100, bad_revision=103)
job = self._AddTryJob(results_data)
log_with_culprit = r"""
=== BISECT JOB RESULTS ===
<b>Perf regression found with culprit</b>
Suspected Commit
Author : author
Commit : 102
Date : Thu Dec 08 01:25:35 2016
Subject: subject
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Revision Result N
100 100 +- 0 10 good
101 100 +- 0 10 good
102 200 +- 0 10 bad <--
103 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(log_with_culprit, bisect_report.GetReport(job))
def testGetReport_CompletedWithCulprit_Memory(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 102, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 103, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
command='src/tools/perf/run_benchmark system_health.memory_foo',
culprit_data=self._Culprit(cl=102),
good_revision=100, bad_revision=103)
job = self._AddTryJob(results_data)
log_with_culprit = r"""
=== BISECT JOB RESULTS ===
<b>Perf regression found with culprit</b>
Suspected Commit
Author : author
Commit : 102
Date : Thu Dec 08 01:25:35 2016
Subject: subject
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : system_health.memory_foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Revision Result N
100 100 +- 0 10 good
101 100 +- 0 10 good
102 200 +- 0 10 bad <--
103 200 +- 0 10 bad
Please refer to the following doc on diagnosing memory regressions:
https://chromium.googlesource.com/chromium/src/+/master/docs/memory-infra/memory_benchmarks.md
To Run This Test
src/tools/perf/run_benchmark system_health.memory_foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(log_with_culprit, bisect_report.GetReport(job))
def testGetReport_CompletedWithCulpritReturnCode(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 0, 'num': 10, 'result': 'good'},
{'commit': 101, 'mean': 0, 'num': 10, 'result': 'good'},
{'commit': 102, 'mean': 1, 'num': 10, 'result': 'bad'},
{'commit': 103, 'mean': 1, 'num': 10, 'result': 'bad'},
]),
culprit_data=self._Culprit(cl=102),
good_revision=100, bad_revision=103, test_type='return_code')
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Test failure found with culprit</b>
Suspected Commit
Author : author
Commit : 102
Date : Thu Dec 08 01:25:35 2016
Subject: subject
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Revision Exit Code N
100 0 +- 0 10 good
101 0 +- 0 10 good
102 1 +- 0 10 bad <--
103 1 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_CompletedWithoutCulprit(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101},
{'commit': 102},
{'commit': 103, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
culprit_data=None,
good_revision=100, bad_revision=103)
job = self._AddTryJob(results_data)
log_without_culprit = r"""
=== BISECT JOB RESULTS ===
<b>NO Perf regression found</b>
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Revision Result N
100 100 +- 0 10 good
103 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(log_without_culprit, bisect_report.GetReport(job))
def testGetReport_CompletedWithoutCulpritBuildFailuresAfterReference(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 102, 'failed': True, 'failure_reason': 'reason'},
{'commit': 103, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
culprit_data=None,
good_revision=100, bad_revision=103)
job = self._AddTryJob(results_data)
log_without_culprit = r"""
=== BISECT JOB RESULTS ===
<b>Perf regression found but unable to narrow commit range</b>
Build failures prevented the bisect from narrowing the range further.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Suspected Commit Range
2 commits in range
https://chromium.googlesource.com/chromium/src/+log/100..103
Revision Result N
100 100 +- 0 10 good
102 --- --- build failure
103 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(log_without_culprit, bisect_report.GetReport(job))
def testGetReport_CompletedWithoutCulpritUnknownDepot(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 1, 'num': 10,
'depot_name': 'a', 'result': 'good'},
{'commit': 101, 'mean': 1, 'num': 10,
'depot_name': 'a', 'result': 'good'},
{'commit': 102, 'mean': 2, 'num': 10,
'depot_name': 'a', 'result': 'bad'},
{'commit': 103, 'mean': 2, 'num': 10,
'depot_name': 'a', 'result': 'bad'},
]),
culprit_data=None,
good_revision=100, bad_revision=103)
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Perf regression found but unable to narrow commit range</b>
Build failures prevented the bisect from narrowing the range further.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 1 -> 2
Suspected Commit Range
1 commits in range
Unknown depot, please contact team to have this added.
Revision Result N
100 1 +- 0 10 good
101 1 +- 0 10 good
102 2 +- 0 10 bad
103 2 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_CompletedWithBuildFailures(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'failed': True, 'failure_reason': 'reason'},
{'commit': 102, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 103, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 104, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 105, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
culprit_data=self._Culprit(cl=104),
good_revision=100, bad_revision=105)
job = self._AddTryJob(results_data)
log_without_culprit = r"""
=== BISECT JOB RESULTS ===
<b>Perf regression found with culprit</b>
Suspected Commit
Author : author
Commit : 104
Date : Thu Dec 08 01:25:35 2016
Subject: subject
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Revision Result N
100 100 +- 0 10 good
102 100 +- 0 10 good
103 100 +- 0 10 good
104 200 +- 0 10 bad <--
105 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(log_without_culprit, bisect_report.GetReport(job))
def testGetReport_Completed_AbortedWithNoValues(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100},
{'commit': 105},
]),
aborted=True, aborted_reason=_ABORTED_NO_VALUES,
good_revision=100, bad_revision=105)
job = self._AddTryJob(results_data)
log_without_culprit = r"""
=== BISECT JOB RESULTS ===
<b>NO Perf regression found, tests failed to produce values</b>
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(log_without_culprit, bisect_report.GetReport(job))
def testGetReport_Completed_AbortedWithNoOutput(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100},
{'commit': 105},
]),
aborted=True, aborted_reason=_ABORTED_NO_OUTPUT,
good_revision=100, bad_revision=105)
job = self._AddTryJob(results_data)
log_without_culprit = r"""
=== BISECT JOB RESULTS ===
<b>NO Perf regression found, tests failed to produce values</b>
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(log_without_culprit, bisect_report.GetReport(job))
def testGetReport_CompletedCouldntNarrowCulprit(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'failed': True, 'failure_reason': 'reason'},
{'commit': 102, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 103, 'failed': True, 'failure_reason': 'reason'},
{'commit': 104, 'failed': True, 'failure_reason': 'reason'},
{'commit': 105, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 106, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
culprit_data=None,
good_revision=100, bad_revision=106)
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Perf regression found but unable to narrow commit range</b>
Build failures prevented the bisect from narrowing the range further.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Suspected Commit Range
3 commits in range
https://chromium.googlesource.com/chromium/src/+log/102..105
Revision Result N
100 100 +- 0 10 good
102 100 +- 0 10 good
103 --- --- build failure
104 --- --- build failure
105 200 +- 0 10 bad
106 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_CompletedMoreThan10BuildFailures(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'failed': True, 'failure_reason': 'reason'},
{'commit': 102, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 103, 'failed': True, 'failure_reason': 'reason'},
{'commit': 104, 'failed': True, 'failure_reason': 'reason'},
{'commit': 105, 'failed': True, 'failure_reason': 'reason'},
{'commit': 106, 'failed': True, 'failure_reason': 'reason'},
{'commit': 107, 'failed': True, 'failure_reason': 'reason'},
{'commit': 108, 'failed': True, 'failure_reason': 'reason'},
{'commit': 109, 'failed': True, 'failure_reason': 'reason'},
{'commit': 110, 'failed': True, 'failure_reason': 'reason'},
{'commit': 111, 'failed': True, 'failure_reason': 'reason'},
{'commit': 112, 'failed': True, 'failure_reason': 'reason'},
{'commit': 113, 'failed': True, 'failure_reason': 'reason'},
{'commit': 114, 'failed': True, 'failure_reason': 'reason'},
{'commit': 115, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 116, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
culprit_data=None,
good_revision=100, bad_revision=116)
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Perf regression found but unable to narrow commit range</b>
Build failures prevented the bisect from narrowing the range further.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Suspected Commit Range
13 commits in range
https://chromium.googlesource.com/chromium/src/+log/102..115
Revision Result N
100 100 +- 0 10 good
102 100 +- 0 10 good
103 --- --- build failure
--- --- --- too many build failures to list
114 --- --- build failure
115 200 +- 0 10 bad
116 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_FailedBisect(self):
results_data = self._BisectResults(
good_revision=100, bad_revision=110, status='failed')
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Bisect failed for unknown reasons</b>
Please contact the team (see below) and report the error.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_BisectWithWarnings(self):
results_data = self._BisectResults(
status='failed', good_revision=100, bad_revision=103,
warnings=['A warning.', 'Another warning.'])
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Bisect failed for unknown reasons</b>
Please contact the team (see below) and report the error.
The following warnings were raised by the bisect job:
* A warning.
* Another warning.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_BisectWithAbortedReason(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 102, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 103, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
good_revision=100, bad_revision=103,
status='aborted', aborted_reason='Something terrible happened.')
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Bisect failed unexpectedly</b>
Bisect was aborted with the following:
Something terrible happened.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Revision Result N
100 100 +- 0 10 good
101 100 +- 0 10 good
102 200 +- 0 10 bad
103 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_StatusStarted(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 105, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 106, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
good_revision=100, bad_revision=106,
status='started')
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Bisect was unable to run to completion</b>
The bisect was able to narrow the range, you can try running with:
good_revision: 101
bad_revision : 105
If failures persist contact the team (see below) and report the error.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Revision Result N
100 100 +- 0 10 good
101 100 +- 0 10 good
105 200 +- 0 10 bad
106 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_StatusStarted_FailureReason(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 105, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 106, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
good_revision=100, bad_revision=106,
failure_reason='INFRA_FAILURE',
status='started')
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Bisect was unable to run to completion</b>
Error: INFRA_FAILURE
The bisect was able to narrow the range, you can try running with:
good_revision: 101
bad_revision : 105
If failures persist contact the team (see below) and report the error.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Revision Result N
100 100 +- 0 10 good
101 100 +- 0 10 good
105 200 +- 0 10 bad
106 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_StatusInProgress(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 105, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 106, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
good_revision=100, bad_revision=106,
status='in_progress')
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Bisect is still in progress, results below are incomplete</b>
The bisect was able to narrow the range, you can try running with:
good_revision: 101
bad_revision : 105
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Revision Result N
100 100 +- 0 10 good
101 100 +- 0 10 good
105 200 +- 0 10 bad
106 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_StatusStartedDepotMismatch(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 1, 'num': 10,
'depot_name': 'a', 'result': 'good'},
{'commit': 101, 'mean': 1, 'num': 10,
'depot_name': 'a', 'result': 'good'},
{'commit': 102, 'mean': 2, 'num': 10,
'depot_name': 'b', 'result': 'bad'},
{'commit': 103, 'mean': 2, 'num': 10,
'depot_name': 'b', 'result': 'bad'},
]),
good_revision=100, bad_revision=103,
status='started')
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Bisect was unable to run to completion</b>
Please try rerunning the bisect.
If failures persist contact the team (see below) and report the error.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 1 -> 2
Revision Result N
100 1 +- 0 10 good
101 1 +- 0 10 good
102 2 +- 0 10 bad
103 2 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_WithBugIdBadBisectFeedback(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 102, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 103, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
good_revision=100, bad_revision=103, bug_id=6789)
job = self._AddTryJob(results_data, bug_id=6789)
job_id = job.key.id()
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Perf regression found but unable to narrow commit range</b>
Build failures prevented the bisect from narrowing the range further.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35%% | 100 -> 200
Suspected Commit Range
1 commits in range
https://chromium.googlesource.com/chromium/src/+log/101..102
Revision Result N
100 100 +- 0 10 good
101 100 +- 0 10 good
102 200 +- 0 10 bad
103 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
Is this bisect wrong?
https://chromeperf.appspot.com/bad_bisect?try_job_id=%s
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!""" % job_id
self.assertEqual(expected_output, bisect_report.GetReport(job))
if __name__ == '__main__':
unittest.main()
|
class Script(bytes):
Opcode = {
"NOP": 0x00,
"BURN": 0x01,
"SUCCESS": 0x02,
"FAIL": 0x03,
"NOT": 0x10,
"EQ": 0x11,
"JMP": 0x20,
"JNZ": 0x21,
"JZ": 0x22,
"PUSH": 0x30,
"POP": 0x31,
"PUSHB": 0x32,
"DUP": 0x33,
"SWAP": 0x34,
"COPY": 0x35,
"DROP": 0x36,
"CHKSIG": 0x80,
"CHKMULTISIG": 0x81,
"BLAKE256": 0x90,
"SHA256": 0x91,
"RIPEMD160": 0x92,
"KECCAK256": 0x93,
"BLAKE160": 0x94,
"BLKNUM": 0xA0,
"CHKTIMELOCK": 0xB0,
}
@staticmethod
def empty():
return Script(bytes())
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('party', '0002_remove_portfolio_is_deleted'),
]
operations = [
migrations.AlterField(
model_name='party',
name='portfolios',
field=models.ManyToManyField(related_name='party_portfolios', null=True, to='party.Portfolio', blank=True),
preserve_default=True,
),
]
|
from random import randint
import zero_division
def randomly_stringify_list_items(num_list):
amount_to_str = randint(0,len(num_list))
# print(f'amount = {amount_to_str}')
for i in range(0,amount_to_str):
# print(f'i = {i}')
to_be_stred = randint(0,len(num_list)-1)
# print(f'to_be_stred = {to_be_stred}')
num_list[to_be_stred] = str(num_list[to_be_stred])
return num_list
if __name__ == "__main__":
numbers = zero_division.create_numbers()
zero_division.display_num_list(numbers)
got_exc = False
while (not got_exc):
try:
zero_division.print_average(randomly_stringify_list_items(numbers))
numbers = zero_division.create_numbers()
except Exception:
got_exc = True
|
"""
uniquePath1
A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).
The robot can only move either down or right at any point in time. The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).
How many possible unique paths are there?
"""
class Solution(object):
def uniquePathsA(self, m, n):
"""
:param m:
:param n:
:return:
"""
dp = [[0]*n for x in range(m)]
dp[0][0] = 1
for x in range(m):
for y in range(n):
if x + 1 <m:
dp[x+1][y] += dp[x][y]
if y + 1 < n:
dp[x][y+1] += dp[x][y]
return dp[m-1][n-1]
def uniquePathsB(self, m, n):
"""
:param m:
:param n:
:return:
"""
if m < n:
m, n = n, m
dp = [n] * n
dp [0] = 1
for x in range(m):
for y in range(n - 1):
dp[y + 1] += dp[y]
return dp[n - 1]
#m行n列, 需要往下走m-1步, 往右走n-1步, 也就是求C(m-1+n-1, n-1)或C(m-1+n-1, m-1)。
# @return an integer
def uniquePathsC(self, m, n):
N = m - 1 + n - 1
K = min(m, n) - 1
# calculate C(N, K)
res = 1
for i in xrange(K):
res = res * (N - i) / (i + 1)
return res
"""
Unique Paths II
Now consider if some obstacles are added to the grids. How many unique paths would there be?
An obstacle and empty space is marked as 1 and 0 respectively in the grid.
"""
"""
Minimum Path Sum
Given a m x n grid filled with non-negative numbers, find a path from top left to bottom right which minimizes the sum of all numbers along its path.
Note: You can only move either down or right at any point in time.
"""
"""
Dungeon Game
The demons had captured the princess (P) and imprisoned her in the bottom-right corner of a dungeon. The dungeon consists of M x N rooms laid out in a 2D grid. Our valiant knight (K) was initially positioned in the top-left room and must fight his way through the dungeon to rescue the princess.
The knight has an initial health point represented by a positive integer. If at any point his health point drops to 0 or below, he dies immediately.
Some of the rooms are guarded by demons, so the knight loses health (negative integers) upon entering these rooms; other rooms are either empty (0's) or contain magic orbs that increase the knight's health (positive integers).
In order to reach the princess as quickly as possible, the knight decides to move only rightward or downward in each step.
Write a function to determine the knight's minimum initial health so that he is able to rescue the princess.
For example, given the dungeon below, the initial health of the knight must be at least 7 if he follows the optimal path RIGHT-> RIGHT -> DOWN -> DOWN.
"""
class Solution(object):
def __init__(self):
self.depth = 0
self.nestedList = []
def depthSum(self, nestedList):
"""
:type nestedList: List[NestedInteger]
:rtype: int
"""
nest = nestedList
if isinstance(type(nest), int):
return self.depth + 1
else:
for n in nest:
if isinstance(type(n), int):
self.depth = self.depth + 1
else:
self.depth = self.depth + self.depthSum(n)
return self.depth
class Solution(object):
def depthSum(self, nestedList):
"""
:type nestedList: List[NestedInteger]
:rtype: int
"""
if len(nestedList) == 0: return 0
stack = []
sum = 0
for n in nestedList:
stack.append((n, 1))
while stack:
next, d = stack.pop(0)
if next.isInteger():
sum += d * next.getInteger()
else:
for i in next.getList():
stack.append((i,d+1))
return sum
|
from .cookie import Cookie
from .exceptions import Disconnect
from .request import Request
from .response import Response
from .socket import Socket
from .wrappers import asgi_to_jackie, jackie_to_asgi
__all__ = [
'asgi_to_jackie',
'Cookie',
'Disconnect',
'jackie_to_asgi',
'Request',
'Response',
'Socket',
]
|
# -------------------------------------------------------------------#
# Contact: mrinalhaloi11@gmail.com
# Copyright 2017, Mrinal Haloi
# -------------------------------------------------------------------#
from __future__ import division, print_function, absolute_import
import numpy as np
import pickle
import tensorflow as tf
_EPSILON = 1e-8
class DataNormalization(object):
"""Input Data Normalization.
Computes inputs data normalization params, It also performs samplewise
and global standardization. It can be use to calculate the samplewise
and global mean and std of the dataset.
It can be use to compute ZCA whitening also.
Args:
name: an optional name of the ops
"""
def __init__(self, name="DataNormalization"):
self.session = None
# Data Persistence
with tf.name_scope(name) as scope:
self.scope = scope
self.global_mean = self.PersistentParameter(scope, name="mean")
self.global_std = self.PersistentParameter(scope, name="std")
self.global_pc = self.PersistentParameter(scope, name="pc")
def restore_params(self, session):
"""Restore the normalization params from the given session.
Args:
session: The session use to perform the computation
Returns:
Returns True/False based on restore success
"""
self.global_mean.is_restored(session)
self.global_std.is_restored(session)
self.global_pc.is_restored(session)
def initialize(self, dataset, session, limit=None):
"""Initialize preprocessing methods that pre-requires calculation over
entire dataset.
Args:
dataset: A `ndarray`, its a ndarray representation of the whole dataset.
session: The session use to perform the computation
limit: Number of data sample to use, if None, computes on the whole dataset
"""
# If a value is already provided, it has priority
if self.global_mean.value is not None:
self.global_mean.assign(self.global_mean.value, session)
# Otherwise, if it has not been restored, compute it
if not self.global_mean.is_restored(session):
print("---------------------------------")
print("Preprocessing... Calculating mean over all dataset " "(this may take long)...")
self.compute_global_mean(dataset, session, limit)
print("Mean: " + str(self.global_mean.value) + " (To avoid "
"repetitive computation, add it to argument 'mean' of "
"`add_featurewise_zero_center`)")
# If a value is already provided, it has priority
if self.global_std.value is not None:
self.global_std.assign(self.global_std.value, session)
# Otherwise, if it has not been restored, compute it
if not self.global_std.is_restored(session):
print("---------------------------------")
print("Preprocessing... Calculating std over all dataset " "(this may take long)...")
self.compute_global_std(dataset, session, limit)
print("STD: " + str(self.global_std.value) + " (To avoid "
"repetitive computation, add it to argument 'std' of "
"`add_featurewise_stdnorm`)")
# If a value is already provided, it has priority
if self.global_pc.value is not None:
self.global_pc.assign(self.global_pc.value, session)
# Otherwise, if it has not been restored, compute it
if not self.global_pc.is_restored(session):
print("---------------------------------")
print("Preprocessing... PCA over all dataset " "(this may take long)...")
self.compute_global_pc(dataset, session, limit)
with open('PC.pkl', 'wb') as f:
pickle.dump(self.global_pc.value, f)
print("PC saved to 'PC.pkl' (To avoid repetitive computation, "
"load this pickle file and assign its value to 'pc' "
"argument of `add_zca_whitening`)")
def zca_whitening(self, image):
"""ZCA wgitening.
Args:
image: input image
Returns:
ZCA whitened image
"""
flat = np.reshape(image, image.size)
white = np.dot(flat, self.global_pc.value)
s1, s2, s3 = image.shape[0], image.shape[1], image.shape[2]
image = np.reshape(white, (s1, s2, s3))
return image
def normalize_image(self, batch):
"""Normalize image to [0,1] range.
Args:
batch: a single image or batch of images
"""
return np.array(batch) / 255.
def crop_center(self, batch, shape):
"""Center crop of input images.
Args:
batch: a single image or batch of images
shape: output shape
Returns:
batch/single image with center crop as the value
"""
oshape = np.shape(batch[0])
nh = int((oshape[0] - shape[0]) * 0.5)
nw = int((oshape[1] - shape[1]) * 0.5)
new_batch = []
for i in range(len(batch)):
new_batch.append(batch[i][nh:nh + shape[0], nw:nw + shape[1]])
return new_batch
def samplewise_zero_center(self, image, per_channel=False):
"""Samplewise standardization.
Args:
image: input image
per_channel: whether to compute per image mean, default: False
Returns:
zero centered image
"""
if not per_channel:
im_zero_mean = image - np.mean(image)
else:
im_zero_mean = image - \
np.mean(image, axis=(0, 1, 2), keepdims=True)
return im_zero_mean
def samplewise_stdnorm(self, image, per_channel=False):
"""Samplewise standardization.
Args:
image: input image
per_channel: whether to compute per image std, default: False
Returns:
zero centered image
"""
if not per_channel:
im_std = np.std(image)
im_zero_std = image / (im_std + _EPSILON)
else:
im_std = np.std(image, axis=(0, 1, 2), keepdims=True)
im_zero_std = image / (im_std + _EPSILON)
return im_zero_std
def compute_global_mean(self, dataset, session, limit=None):
"""Compute mean of a dataset. A limit can be specified for faster
computation, considering only 'limit' first elements.
Args:
dataset: A `ndarray`, its a ndarray representation of the whole dataset.
session: The session use to perform the computation
limit: Number of data sample to use, if None, computes on the whole dataset
Returns:
global dataset mean
"""
_dataset = dataset
mean = 0.
if isinstance(limit, int):
_dataset = _dataset[:limit]
if isinstance(_dataset, np.ndarray) and not self.global_mean_pc:
mean = np.mean(_dataset)
else:
# Iterate in case of non numpy data
for i in range(len(dataset)):
if not self.global_mean_pc:
mean += np.mean(dataset[i]) / len(dataset)
else:
mean += (np.mean(dataset[i], axis=(0, 1), keepdims=True) / len(dataset))[0][0]
self.global_mean.assign(mean, session)
return mean
def compute_global_std(self, dataset, session, limit=None):
""" Compute std of a dataset. A limit can be specified for faster
computation, considering only 'limit' first elements.
Args:
dataset: A `ndarray`, its a ndarray representation of the whole dataset.
session: The session use to perform the computation
limit: Number of data sample to use, if None, computes on the whole dataset
Returns:
global dataset std
"""
_dataset = dataset
std = 0.
if isinstance(limit, int):
_dataset = _dataset[:limit]
if isinstance(_dataset, np.ndarray) and not self.global_std_pc:
std = np.std(_dataset)
else:
for i in range(len(dataset)):
if not self.global_std_pc:
std += np.std(dataset[i]) / len(dataset)
else:
std += (np.std(dataset[i], axis=(0, 1), keepdims=True) / len(dataset))[0][0]
self.global_std.assign(std, session)
return std
class PersistentParameter:
"""Create a persistent variable that will be stored into the Graph."""
def __init__(self, scope, name):
self.is_required = False
with tf.name_scope(scope):
with tf.device('/cpu:0'):
# One variable contains the value
self.var = tf.Variable(0., trainable=False, name=name, validate_shape=False)
# Another one check if it has been restored or not
self.var_r = tf.Variable(False, trainable=False, name=name + "_r")
# RAM saved vars for faster access
self.restored = False
self.value = None
def is_restored(self, session):
"""Check whether a param is restored from a session.
Args:
session: session to perform ops
Returns:
a bool, the status of the op
"""
if self.var_r.eval(session=session):
self.value = self.var.eval(session=session)
return True
else:
return False
def assign(self, value, session):
"""Assign a value to session variable.
Args:
value: the value to add
session: session to perform ops
"""
session.run(tf.assign(self.var, value, validate_shape=False))
self.value = value
session.run(self.var_r.assign(True))
self.restored = True
|
from mixers import BaseMixer, AckermannSteeringMixer, DifferentialSteeringMixer
|
"""Calculator script that pretty much solves general AI.
Trains machine learning models for operations every time you enter an expression.
Accuracy may vary but is pretty much guaranteed to be bad for multiplication and division."""
import random
import sys
from sklearn import linear_model
from sklearn.preprocessing import PolynomialFeatures
import actual_parser
def random_float():
"""Get a random float, with a very high chance of it being close to 0."""
return random.normalvariate(0, sys.float_info.min * 50) * sys.float_info.max
def get_fit_data(num_steps, combine_func, polynomial_degree=False, poly=None):
"""Produce data to fit a model."""
training_data = [[random_float(), random_float()] for _ in range(num_steps)]
if polynomial_degree:
training_data = poly.fit_transform(training_data)
labels = [combine_func(x[0], x[1]) for x in training_data]
return training_data, labels
class MachineLearningInterpreter(actual_parser.TreeInterpreter):
"""Intepreter using trained models for operations."""
def __init__(self, tree):
super(MachineLearningInterpreter, self).__init__(tree)
NUM_STEPS_LINEAR = 100_000
NUM_STEPS_POLYNOMIAL = 100_000
POLYNOMIAL_DEGREE = 20
print("Training model for sum...")
self.sum_model = linear_model.LinearRegression()
self.sum_model.fit(*get_fit_data(NUM_STEPS_LINEAR, lambda x, y: x + y))
print("Training model for subtraction...")
self.sub_model = linear_model.LinearRegression()
self.sub_model.fit(*get_fit_data(NUM_STEPS_LINEAR, lambda x, y: x - y))
print("Training model for multiplication...")
self.poly = PolynomialFeatures(degree=POLYNOMIAL_DEGREE)
self.prod_model = linear_model.LinearRegression()
self.prod_model.fit(
*get_fit_data(
NUM_STEPS_POLYNOMIAL,
lambda x, y: x * y,
polynomial_degree=20,
poly=self.poly,
)
)
print("Training model for division...")
self.div_model = linear_model.LinearRegression()
self.div_model.fit(
*get_fit_data(
NUM_STEPS_POLYNOMIAL,
lambda x, y: x / y,
polynomial_degree=20,
poly=self.poly,
)
)
def n_value(self, token):
return float(token)
def sum_value(self, lv, rv):
return self.sum_model.predict([[lv, rv]])[0]
def sub_value(self, lv, rv):
return self.sub_model.predict([[lv, rv]])[0]
def prod_value(self, lv, rv):
return self.prod_model.predict(self.poly.fit_transform([[lv, rv]]))[0]
def div_value(self, lv, rv):
return self.div_model.predict(self.poly.fit_transform([[lv, rv]]))[0]
def _one_calc():
# Receive input from the user.
# Close the program when we get Ctrl+C or Ctrl+D.
try:
user_input = input()
except (KeyboardInterrupt, EOFError):
sys.exit(0)
parser = actual_parser.Parser(user_input)
ast = parser.parse()
interpreter = MachineLearningInterpreter(ast)
print(interpreter.eval())
if __name__ == "__main__":
while True:
_one_calc()
|
class Node:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def countnodes(self, A):
if not A:
return 0
# count = 1
# count += self.countnodes(A.left)
# count += self.countnodes(A.right)
# return count
# A More Compact Implementation
return 1 + self.countnodes(A.left) + self.countnodes(A.right)
obj = Solution()
root = Node(4)
root.left = Node(2)
root.right = Node(5)
root.left.left = Node(1)
root.left.right = Node(3)
root.right.left = Node(1)
root.right.right = Node(3)
print("Number of Tree Nodes is {}" .format(obj.countnodes(root)))
|
from random import randint, choice
from operator import add, mul, sub
from uuid import uuid4
from tkinter import *
from tkinter import ttk
from Classes.Mongo import Mongo
class MathsQuiz(object):
def __init__(self):
self.root = Tk()
self.studentName = 0
self.studentInput = IntVar()
self.mathsQuestion = StringVar()
self.operator = {"+": add,
"-": sub,
"*": mul}
self.randomNumber1 = 0
self.randomNumber2 = 0
self.randomSign = 0
self.studentScore = 0
self.uniqueID = 0
self.questionNumber = 0
self.maxQuestion = 0
self.maxAddition = 0
self.maxSubtraction = 0
self.maxMultiplication = 0
self.start_quiz()
def store_to_database(self):
while True:
while True:
haveUID = input("Do you have a UID? It will be in the format: \nxxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n(Y/N): ").lower()
if haveUID not in "yes":
if haveUID not in "no":
print("Invalid input, please try again.")
else:
break
else:
break
if haveUID != "" and haveUID in "yes":
self.uniqueID = input("Please input your UID correctly with no spaces. ")
studentRecord = Mongo(self.uniqueID, self.studentName, self.studentScore)
studentRecord.overwrite_score_from_database()
break
elif haveUID != "" and haveUID in "no":
self.uniqueID = str(uuid4())
print(self.uniqueID,"\nThis is your UID. You will need it to log back into your account. So write it down.")
studentRecord = Mongo(self.uniqueID, self.studentName, self.studentScore)
studentRecord.store_to_database()
break
else:
print("Invalid input, please try again.")
def start_quiz(self):
self.studentName = str(input("What is your name? "))
while True:
try:
self.maxQuestions = int(input("Enter the number of questions in integer form. "))
break
except:
print("That is an invalid input. ")
continue
while True:
try:
self.maxAddition = int(input("Enter the maximum score for addition questions. "))
break
except:
print("That is an invalid input. ")
continue
while True:
try:
self.maxSubtraction = int(input("Enter the maximum score for subtraction questions. "))
break
except:
print("That is an invalid input. ")
continue
while True:
try:
self.maxMultiplication = int(input("Enter the maximum score for multiplication questions. "))
break
except:
print("That is an invalid input. ")
continue
self.generate_calculator()
self.question_checker()
def question_checker(self):
if self.questionNumber == self.maxQuestions - 1:
try:
self.root.destroy()
print("{} , your final score was {}".format(self.studentName,self.studentScore))
self.store_to_database()
except:
pass
else:
self.symbol_checker()
self.ask_question()
self.questionNumber = self.questionNumber + 1
print("\nYou are currently on {} points.".format(self.studentScore))
def symbol_checker(self):
if self.maxMultiplication >= self.studentScore >= self.maxSubtraction:
self.symbol = "*"
elif self.maxSubtraction >= self.studentScore >= self.maxAddition:
self.symbol = "-"
else:
if self.studentScore <= self.maxAddition:
self.symbol = "+"
def ask_question(self):
self.random_generator()
self.symbol_checker()
self.question = "Enter the answer to {} {} {}\n".format(self.randomNumber1, self.symbol, self.randomNumber2)
self.mathsQuestion.set(self.question)
def random_generator(self):
self.randomNumber1 = randint(0, 10)
self.randomNumber2 = randint(0, 10)
def check_math(self):
print(self.studentInput.get())
if self.studentInput.get() == self.operator[self.symbol](self.randomNumber1, self.randomNumber2):
print("Correct.")
self.studentScore += 1
else:
print("Incorrect.")
self.studentScore -= 1
self.studentInput.set(0)
self.question_checker()
def generate_calculator(self):
self.root.title("Maths Quiz")
correct = StringVar()
self.ask_question()
mathsQuestionLabel = Label(self.root, textvariable=self.mathsQuestion, height=2, width=20)
mathsQuestionLabel.grid(row=1, column=0, sticky=N)
studentInputLabel = Label(self.root, textvariable=self.studentInput, height=2, width=20)
studentInputLabel.grid(row=2, column=0, sticky=N)
Grid.rowconfigure(self.root, 0, weight=1)
Grid.columnconfigure(self.root, 0, weight=1)
frame=Frame(self.root)
frame.grid(row=0, column=0, sticky=N+S+E+W)
rowIndex = 0
buttonText = 1
while rowIndex != 4:
Grid.rowconfigure(frame, rowIndex, weight=1)
rowIndex = rowIndex + 1
colIndex = 0
while colIndex != 3:
Grid.columnconfigure(frame, colIndex, weight=1)
btn = ttk.Button(frame)
btn.grid(row=rowIndex, column=colIndex, sticky=N+S+E+W)
colIndex = colIndex + 1
btn['text'] =buttonText
btn['command'] =lambda x=buttonText: self.studentInput.set(self.studentInput.get()*10 + x)
buttonText = buttonText + 1
if buttonText ==11:
btn['text'] ="-"
btn['command'] =lambda : self.studentInput.set(-self.studentInput.get())
if buttonText ==12:
btn['text'] ="⌫"
btn['command'] =lambda : self.studentInput.set(0)
if buttonText == 13:
btn['text'] ="↵"
btn['command'] =lambda : self.check_math()
btn = ttk.Button(frame)
btn.grid(row=5, column=1, sticky=N+S+E+W)
btn['text'] ="0"
btn['command'] =lambda : self.studentInput.set(self.studentInput.get()*10)
self.root.mainloop()
|
# ---------------------------------------------------------------------
# Huawei.VRP.get_switchport
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
from itertools import compress
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetswitchport import IGetSwitchport
from noc.core.validators import is_int
from noc.core.mib import mib
from noc.core.snmp.render import render_bin
class Script(BaseScript):
name = "Huawei.VRP.get_switchport"
interface = IGetSwitchport
rx_vlan_comment = re.compile(r"\([^)]+\)", re.MULTILINE | re.DOTALL)
rx_line1 = re.compile(
r"(?P<interface>\S+)\s+(?P<mode>access|trunk|hybrid|trunking)\s+(?P<pvid>\d+)\s+(?P<vlans>(?:(?!40GE)\d|\-|\s|\n)+\s)",
re.MULTILINE,
)
rx_line2 = re.compile(
r"""
(?P<interface>\S+)\scurrent\sstate
.*?
PVID:\s(?P<pvid>\d+)
.*?
Port\slink-type:\s(?P<mode>access|trunk|hybrid|trunking)
.*?
(?:Tagged\s+VLAN\sID|VLAN\spermitted)?:\s(?P<vlans>.*?)\n
""",
re.MULTILINE | re.DOTALL | re.VERBOSE,
)
rx_descr1 = re.compile(r"^(?P<interface>\S+)\s+(?P<description>.+)", re.MULTILINE)
rx_descr2 = re.compile(r"^(?P<interface>\S+)\s+\S+\s+\S+\s+(?P<description>.+)", re.MULTILINE)
rx_descr3 = re.compile(
r"^(?P<interface>(?:Eth|GE|TENGE)\d+/\d+/\d+)\s+"
r"(?P<status>(?:UP|(?:ADM\s)?DOWN))\s+(?P<speed>.+?)\s+"
r"(?P<duplex>.+?)\s+"
r"(?P<mode>access|trunk|hybrid|trunking|A|T|H)\s+"
r"(?P<pvid>\d+)\s*(?P<description>.*)$",
re.MULTILINE,
)
rx_new_descr = re.compile(r"^Interface\s+PHY\s+Protocol\s+Description", re.MULTILINE)
@staticmethod
def convert_vlan(vlans):
"""
:param vlans: Byte string FF 00 01 80 ....
:return: itera
"""
for line in vlans.splitlines():
for vlan_pack in line.split()[0]:
for is_v in "{0:08b}".format(vlan_pack):
yield int(is_v)
def execute_snmp(self, **kwargs):
names = {x: y for y, x in self.scripts.get_ifindexes().items()}
r = {}
for port_num, ifindex, port_type, pvid in self.snmp.get_tables(
[
mib["HUAWEI-L2IF-MIB::hwL2IfPortIfIndex"],
mib["HUAWEI-L2IF-MIB::hwL2IfPortType"],
mib["HUAWEI-L2IF-MIB::hwL2IfPVID"],
]
):
# print port_num, ifindex, port_type, pvid
r[port_num] = {
"interface": names[ifindex],
"status": False,
# "ifindex": ifindex,
# "port_type": port_type,
# "untagged": pvid,
"tagged": [],
"members": [],
}
# Avoid zero-value untagged
# Found on ME60-X8 5.160 (V600R008C10SPC300)
if pvid:
r[port_num]["untagged"] = pvid
for oid, vlans_bank in self.snmp.get_tables(
[mib["HUAWEI-L2IF-MIB::hwL2IfTrunkPortTable"]],
display_hints={mib["HUAWEI-L2IF-MIB::hwL2IfTrunkPortTable"]: render_bin},
):
start, end = 0, 2048
oid, port_num = oid.rsplit(".", 1)
if oid.endswith("1.3"):
# HighVLAN
start, end = 2048, 4096
r[port_num]["tagged"] += list(
compress(range(start, end), self.convert_vlan(vlans_bank))
)
r[port_num]["802.1Q Enabled"] = True
# tagged_vlans = list()
# hybrid_vlans = list(self.snmp.get_tables([mib["HUAWEI-L2IF-MIB::hwL2IfHybridPortTable"]]))
# x2 = list(compress(range(1, 4096), self.convert_vlan(r2)))
return list(r.values())
def execute_cli(self, **kwargs):
# Get descriptions
descriptions = {}
try:
v = self.cli("display interface description")
if self.rx_new_descr.search(v):
rx_descr = self.rx_descr2
else:
rx_descr = self.rx_descr1
except self.CLISyntaxError:
rx_descr = self.rx_descr3
try:
v = self.cli("display brief interface")
except self.CLISyntaxError:
v = self.cli("display interface brief")
for match in rx_descr.finditer(v):
interface = self.profile.convert_interface_name(match.group("interface"))
description = match.group("description").strip()
if description.startswith("HUAWEI"):
description = ""
if match.group("interface") != "Interface":
descriptions[interface] = description
# Get interafces status
interface_status = {}
for s in self.scripts.get_interface_status():
interface_status[s["interface"]] = s["status"]
# Get portchannel
portchannels = self.scripts.get_portchannel()
# Get vlans
known_vlans = {vlan["vlan_id"] for vlan in self.scripts.get_vlans()}
# Get ports in vlans
r = []
version = self.profile.fix_version(self.scripts.get_version())
rx_line = self.rx_line1
if version.startswith("5.3"):
v = self.cli("display port allow-vlan")
elif version.startswith("3.10"):
rx_line = self.rx_line2
v = self.cli("display interface")
else:
try:
v = self.cli("display port vlan")
except self.CLISyntaxError:
v = "%s\n%s" % (self.cli("display port trunk"), self.cli("display port hybrid"))
for match in rx_line.finditer(v):
# port = {}
tagged = []
trunk = match.group("mode") in ("trunk", "hybrid", "trunking")
if trunk:
vlans = match.group("vlans").strip()
if vlans and (vlans not in ["-", "none"]) and is_int(vlans[0]):
vlans = self.rx_vlan_comment.sub("", vlans)
vlans = vlans.replace(" ", ",")
tagged = self.expand_rangelist(vlans)
# For VRP version 5.3
if r and r[-1]["interface"] == match.group("interface"):
r[-1]["tagged"] += [x for x in tagged if x in known_vlans]
continue
members = []
interface = match.group("interface")
if interface.startswith("Eth-Trunk"):
ifname = self.profile.convert_interface_name(interface)
for p in portchannels:
if p["interface"] in (ifname, interface):
members = p["members"]
pvid = int(match.group("pvid"))
# This is an exclusive Chinese networks ?
if pvid == 0:
pvid = 1
port = {
"interface": interface,
"status": interface_status.get(interface, False),
"802.1Q Enabled": trunk,
"802.1ad Tunnel": False,
"tagged": [x for x in tagged if x in known_vlans],
"members": members,
}
if match.group("mode") in ("access", "hybrid"):
port["untagged"] = pvid
description = descriptions.get(interface)
if description:
port["description"] = description
r += [port]
return r
|
import numpy as np
import scipy as sp
import pylab as p
xa=.2
xb=1.99
C=np.linspace(xa,xb,100)
print( C)
i=1000
Y = np.ones(len(C))
# for x in range(iter):
# Y = Y*C*(1-Y)#Y**2 - C #get rid of early transients
fig, ax = p.subplots(figsize=(8,6))
for x in range(i):
Y = Y**2 - C
ax.plot(C,Y, '.', color = 'g', markersize = .5)
ax.set_axis_off()
p.show()
#Explain range(iter)
#=================
# L = np.ones(5)
# lst = np.array([1,2,3,4,5])
# for i in range(iter):
# L = L+lst
# print('i',i,'L',L)
|
import rltk.utils as utils
def string_equal(str1, str2):
"""
Args:
n1 (str): String 1.
n2 (str): String 2.
Returns:
int: 0 for unequal and 1 for equal.
"""
utils.check_for_none(str1, str2)
utils.check_for_type(str, str1, str2)
return int(str1 == str2)
def number_equal(num1, num2, epsilon=0):
"""
Args:
n1 (int / float): Number 1.
n2 (int / float): Number 2.
epsilon (float, optional): Approximation margin.
Returns:
int: 0 for unequal and 1 for equal.
"""
utils.check_for_type((int, float), num1, num2)
return int(abs(num1 - num2) <= epsilon)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from os import path
import cvmfs
readme_path = path.join(path.dirname(__file__), 'README')
setup(
name=cvmfs.__package_name__,
version=cvmfs.__version__,
url='http://cernvm.cern.ch',
author='Rene Meusel',
author_email='rene.meusel@cern.ch',
license='(c) 2015 CERN - BSD License',
description='Inspect CernVM-FS repositories',
long_description=open(readme_path).read(),
classifiers= [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Filesystems',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Systems Administration'
],
packages=find_packages(),
test_suite='cvmfs.test',
install_requires=[ # don't forget to adapt the matching RPM dependencies!
'python-dateutil >= 1.4.1',
'requests >= 1.1.0',
'M2Crypto >= 0.20.0'
]
)
|
import discord
import colorsys
import random
def error(text):
return "\N{NO ENTRY SIGN} {}".format(text)
def warning(text):
return "\N{WARNING SIGN} {}".format(text)
def info(text):
return "\N{INFORMATION SOURCE} {}".format(text)
def question(text):
return "\N{BLACK QUESTION MARK ORNAMENT} {}".format(text)
def bold(text):
return "**{}**".format(text)
def box(text, lang=""):
ret = "```{}\n{}\n```".format(lang, text)
return ret
def inline(text):
return "`{}`".format(text)
def italics(text):
return "*{}*".format(text)
def strikethrough(text):
return "~~{}~~".format(text)
def underline(text):
return "__{}__".format(text)
def escape(text, *, mass_mentions=False, formatting=False):
if mass_mentions:
text = text.replace("@everyone", "@\u200beveryone")
text = text.replace("@here", "@\u200bhere")
if formatting:
text = (text.replace("`", "\\`")
.replace("*", "\\*")
.replace("_", "\\_")
.replace("~", "\\~"))
return text
def escape_mass_mentions(text):
return escape(text, mass_mentions=True)
def AVATAR_URL_AS(user, format=None, static_format='webp', size=256):
if not isinstance(user, discord.abc.User):
return 'https://cdn.discordapp.com/embed/avatars/0.png'
if user.avatar is None:
# Default is always blurple apparently
#return user.default_avatar_url
return 'https://cdn.discordapp.com/embed/avatars/{}.png'.format(user.default_avatar.value)
format = format or 'png'
return 'https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.{1}?size={2}'.format(user, format, size)
def GUILD_URL_AS(guild, format=None, static_format='webp', size=256):
if not isinstance(guild, discord.Guild):
return 'https://cdn.discordapp.com/embed/avatars/0.png'
if format is None:
format = 'gif' if guild.is_icon_animated() else static_format
return 'https://cdn.discordapp.com/icons/{0.id}/{0.icon}.{1}?size={2}'.format(guild, format, size)
def RANDOM_DISCORD_COLOR():
choice = random.choice([1]*10 + [2]*20 + [3]*20)
if choice == 1:
values = [int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), 1, 1)]
elif choice == 2:
values = [int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), random.random(), 1)]
else:
values = [int(x * 255) for x in colorsys.hsv_to_rgb(random.random(), random.random(), random.random())]
color = discord.Color.from_rgb(*values)
return color
|
import setup_util
import subprocess
def start(args, logfile, errfile):
setup_util.replace_text("play-java/conf/application.conf", "jdbc:mysql:\/\/.*:3306", "jdbc:mysql://" + args.database_host + ":3306")
subprocess.Popen(["play","start"], stdin=subprocess.PIPE, cwd="play-java", stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
p = subprocess.Popen(["play","stop"], cwd="play-java", stderr=errfile, stdout=logfile)
p.communicate()
return 0
|
from django.shortcuts import render,get_object_or_404,redirect
from .models import GonderiModel
from .forms import GonderiForm
from django.contrib.auth.models import User
from django.utils import timezone
def gonderListe(request):
gonderiler = GonderiModel.objects.all()
return render(request,"blog/gonderilist.html",{"gonderis":gonderiler})
def gonderiDetay(request,pk):
# gonderi = GonderiModel.objects.get(pk=pk)
gonderi = get_object_or_404(GonderiModel,pk=pk)
return render(request,"blog/gonderidetay.html",{"gonderi":gonderi})
def yeniGonderi(request):
if request.method == "POST":
form = GonderiForm(request.POST)
if form.is_valid():
gonderi = form.save(commit=False)
gonderi.yazar = request.user
gonderi.yayim_zaman = timezone.now()
gonderi.save()
return redirect('gonderListe')
else:
form = GonderiForm()
return render(request,"blog/yenigonderi.html",{"form":form})
def gonderiDuzenle(request,pk):
gonderi = get_object_or_404(GonderiModel,pk=pk)
if request.method == "POST":
form = GonderiForm(request.POST,instance=gonderi)
if form.is_valid():
gonderi = form.save(commit=False)
ben = User.objects.get(username="admin")
gonderi.yazar = ben
gonderi.yayim_zaman = timezone.now()
gonderi.save()
return redirect('gonderListe')
else:
form = GonderiForm(instance=gonderi)
return render(request,"blog/yenigonderi.html",{"form":form})
|
import os
import numpy as np
from opencv_sudoku_solver.pyimagesearch.sudoku import \
find_puzzle, extract_digit, multi_img_view
from matplotlib import pyplot as plt
import cv2
import typing
class BoardImgArr:
def __init__(self, img_path: str,
roi_shape: typing.Tuple[int, int],
norm_255: bool = True,
final_ei339: bool = False,
debug_board_detection: bool = False,
debug_digits_extraction: bool = False,
debug_digits_comparison: bool = False):
"""
:param img_path: Path of the input image of the board
:param roi_shape: Shape of the ROIs, dim=2 (e.g. (28,28))
:param norm_255: Whether to normalize by "img/=255."
required True by the TF approach here
required False by the Torch approach here
:param final_ei339: whether the final mode is on, for EI339 final test images only
:param debug_board_detection: Intermediate images during board detection
:param debug_digits_extraction: Intermediate images during digits extraction
:param debug_digits_comparison: Intermediate images of cell & extracted digit
"""
assert os.path.exists(img_path), "[Error] Input Board Image NOT Found"
img = cv2.imread(img_path)
self.img = img
self.roi_shape = roi_shape
self.norm_255 = norm_255
self.final_ei339 = final_ei339
self.debug_board_detection = debug_board_detection
self.debug_digits_extraction = debug_digits_extraction
self.debug_digits_comparison = debug_digits_comparison
self.board_cells_cnt = 9 * 9
self.board_size = 9
# ROI of cells flattened to len=81:
# upper-left to lower-right, count columns on one each row first
# None or guaranteed to be of shape self.roi_shape
self.img_cells = []
# Numbers of cells: -99=Empty or <int>number
self.num_cells = np.full((self.board_size, self.board_size), -99, dtype=np.int)
self.num_cells_updated = False
# Cell locations on the input board image: (x,y)
self.cell_loc = []
self.__detect_board__()
def __detect_board__(self) -> None:
"""
Detect board and extract cells. Update & Store:
1. Images of cells: in self.img_cells
2. Locations of cells on the board image: in self.cell_loc
Based on: https://www.pyimagesearch.com/2020/08/10/opencv-sudoku-solver-and-ocr/
"""
# find the puzzle in the image and then
puzzle_image, warped = find_puzzle(self.img)
# a sudoku puzzle is a 9x9 grid (81 individual cells), so we can
# infer the location of each cell by dividing the warped image
# into a 9x9 grid
step_x = warped.shape[1] // 9
step_y = warped.shape[0] // 9
# loop over the grid locations
for y in range(0, 9):
# initialize the current list of cell locations
row = []
for x in range(0, 9):
# compute the starting and ending (x, y)-coordinates of the
# current cell
start_x = x * step_x
start_y = y * step_y
end_x = (x + 1) * step_x
end_y = (y + 1) * step_y
# add the (x, y)-coordinates to our cell locations list
row.append((start_x, start_y, end_x, end_y))
# crop the cell from the warped transform image and then
# extract the digit from the cell
cell_img = warped[start_y:end_y, start_x:end_x]
digit_img = extract_digit(
cell_img, debug=self.debug_digits_extraction, final=self.final_ei339)
if digit_img is None:
self.img_cells.append(None)
else:
if self.debug_digits_comparison:
debug_imgs = [cell_img, digit_img]
debug_subtitles = ["Cell", "Digit"]
_ = multi_img_view.multi_img_view(
images=debug_imgs, subtitles=debug_subtitles, col_cnt=3, row_cnt=2,
title="Extract Digits", fig_size=None, close_all=True)
plt.show()
# resize the cell to 28x28 pixels and then prepare the
# cell for classification
# *** here, you can also use:
# from tensorflow.keras.preprocessing.image import img_to_array
# roi = img_to_array(roi)
roi = cv2.resize(digit_img, (28, 28))
if self.norm_255:
roi = roi.astype("float") / 255.0 # (28, 28)
roi = roi.reshape(self.roi_shape)
self.img_cells.append(roi)
# add the row to our cell locations
self.cell_loc.append(row)
def show_board_cells(self) -> None:
"""
Show how the flattened result, as describe in __init__() self.img_cells
:return:
"""
cell_titles = [""] * self.board_cells_cnt
cell_imgs = [img.reshape(img.shape[0], img.shape[1])
if img is not None else None
for img in self.img_cells]
fig = multi_img_view.multi_img_view(
images=cell_imgs, subtitles=cell_titles, col_cnt=9, row_cnt=9,
title="Cells ROI", fig_size=None, close_all=True)
# fig.tight_layout()
plt.show()
def output_board_cells(self) -> np.ndarray:
"""
Output the compact flattened result, as describe in __init__() self.img_cells
:return:
"""
(height, width) = self.roi_shape
row_cnt = cnt_per_col = self.board_size
col_cnt = cnt_per_row = self.board_size
final_img = np.zeros((height * row_cnt, width * col_cnt, 1),
dtype=np.float32 if self.norm_255 else np.uint8)
for col_idx in range(col_cnt):
for row_idx in range(row_cnt):
img_idx = col_idx + row_idx * cnt_per_col
img = self.img_cells[img_idx]
if img is None:
continue
img = img.reshape(self.roi_shape[0], self.roi_shape[1], 1)
# cv2.imshow("%d, %d, %d" % (img.shape[0], img.shape[1], img.shape[2]), img)
# cv2.waitKey(0)
dim1_start = row_idx * height
dim1_end = dim1_start + height
dim2_start = col_idx * width
dim2_end = dim2_start + width
# print("col=%d, row=%d, img=%d, dim1=%d-%d, dim2=%d-%d" %
# (col_idx, row_idx, img_idx, dim1_start, dim1_end, dim2_start, dim2_end))
final_img[dim1_start:dim1_end, dim2_start:dim2_end, :] = img[:]
# print(final_img.shape)
return final_img
def get_cells_imgs(self) -> typing.List[np.ndarray or None]:
"""
Get the ROI images of the cells, flattened as describe in
__init__() self.img_cells
:return:
"""
return self.img_cells
def update_cells_nums(self, numbers: np.ndarray) -> None:
"""
Update the numbers of cells
:param numbers: Numbers of the cells, flattened
as describe in __init__() self.img_cells
:return:
"""
assert self.board_cells_cnt == numbers.size, \
"[Error] Count of Given Numbers Mismatch. Expected %d, Got %d" \
% (self.board_cells_cnt, numbers.size)
self.num_cells[:, :] = numbers.reshape((self.board_size, self.board_size))
self.num_cells_updated = True
def get_cells_nums(self) -> np.ndarray:
"""
Get the numbers of cells, shape (board_size, board_size)
:return: Numbers of cells
"""
assert self.num_cells_updated, "[Error] Cells' Numbers NOT Updated yet."
return self.num_cells
if "__main__" == __name__:
test_image = "../imgs/sudoku_puzzle.jpg"
bia_obj = BoardImgArr(img_path=test_image, roi_shape=(28, 28))
bia_obj.show_board_cells()
|
import time
from servo_device import ServoDevice
# create the servo Device
SERVO_PIN = 17
servo = ServoDevice(SERVO_PIN)
angles = [0, 45, 90, 135, 180, 135, 90, 45]
print('Looping with Servo angles (Ctrl-C to quit)...')
try:
while True:
servo.set_angle(0)
time.sleep(3)
servo.set_angle(45)
time.sleep(3)
servo.set_angle(90)
time.sleep(3)
except KeyboardInterrupt:
servo.stop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.