content
stringlengths 5
1.05M
|
|---|
# <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
import os
import math
import sys
sys.path.insert( 0, '../../Utilities' )
from numericalFunctions import pointwiseXY_C
import utilities
options = utilities.getOptions( __file__ )
xData = [ 0.5 * i - 0.145 for i in range( 51 ) ]
yData = [ math.sin( x ) for x in xData ]
XYData = [ [ x, yData[i] ] for i, x in enumerate( xData ) ]
listData = []
for i, x in enumerate( xData ) :
listData.append( x )
listData.append( yData[i] )
dataXYs = pointwiseXY_C.pointwiseXY_C( XYData, initialSize = len( XYData ), overflowSize = 10 )
dataXYs2 = pointwiseXY_C.pointwiseXY_C( XYData, initialSize = len( XYData ), overflowSize = 10, dataForm = 'xys' )
dataXsAndYs = pointwiseXY_C.pointwiseXY_C( [ xData, yData ], initialSize = len( XYData ), overflowSize = 10, dataForm = 'XsAndYs' )
dataList = pointwiseXY_C.pointwiseXY_C( listData, initialSize = len( XYData ), overflowSize = 10, dataForm = 'List' )
def cmp( p1, p2 ) :
status = 0
d = p1 - p2
if( d.rangeMin( ) != 0 ) : status = 1
if( d.rangeMax( ) != 0 ) : status = 1
return( status )
status = cmp( dataXYs, dataXYs2 )
status += cmp( dataXYs, dataXsAndYs )
status += cmp( dataXYs, dataList )
if( status ) : raise Exception( '%s: %d sets not the same' % ( __file__, status ) )
|
from pypy.tool.sourcetools import compile2
from pypy.rlib.rarithmetic import r_uint
from pypy.jit.codegen.ppc.ppcgen.form import IDesc, IDupDesc
## "opcode": ( 0, 5),
## "rA": (11, 15, 'unsigned', regname._R),
## "rB": (16, 20, 'unsigned', regname._R),
## "Rc": (31, 31),
## "rD": ( 6, 10, 'unsigned', regname._R),
## "OE": (21, 21),
## "XO2": (22, 30),
## XO = Form("rD", "rA", "rB", "OE", "XO2", "Rc")
## add = XO(31, XO2=266, OE=0, Rc=0)
## def add(rD, rA, rB):
## v = 0
## v |= (31&(2**(5-0+1)-1)) << (32-5-1)
## ...
## return v
def make_func(name, desc):
sig = []
fieldvalues = []
for field in desc.fields:
if field in desc.specializations:
fieldvalues.append((field, desc.specializations[field]))
else:
sig.append(field.name)
fieldvalues.append((field, field.name))
if isinstance(desc, IDupDesc):
for destfield, srcfield in desc.dupfields.iteritems():
fieldvalues.append((destfield, srcfield.name))
body = ['v = r_uint(0)']
assert 'v' not in sig # that wouldn't be funny
#body.append('print %r'%name + ', ' + ', '.join(["'%s:', %s"%(s, s) for s in sig]))
for field, value in fieldvalues:
if field.name == 'spr':
body.append('spr = (%s&31) << 5 | (%s >> 5 & 31)'%(value, value))
value = 'spr'
body.append('v |= (%3s & r_uint(%#05x)) << %d'%(value,
field.mask,
(32 - field.right - 1)))
body.append('self.emit(v)')
src = 'def %s(self, %s):\n %s'%(name, ', '.join(sig), '\n '.join(body))
d = {'r_uint':r_uint}
#print src
exec compile2(src) in d
return d[name]
def make_rassembler(cls):
bases = [make_rassembler(b) for b in cls.__bases__]
ns = {}
for k, v in cls.__dict__.iteritems():
if isinstance(v, IDesc):
v = make_func(k, v)
ns[k] = v
rcls = type('R' + cls.__name__, tuple(bases), ns)
def emit(self, value):
self.insts.append(value)
rcls.emit = emit
return rcls
|
"""
Topic models look for groups of words that occur frequenty together. We can often recognize these clusters as specific themes that appear in the collection -- thus the term "topic" model.
Our example corpus today is a collection of Viking sagas. Start python like this:
python -i topicmodel.py sagas_en.txt 20
We will work at the python prompt ">>>".
Today we'll be working with the simplest and most reliable topic model algorithm, Gibbs sampling.
Gibb sampling is a way to take a very complicted optimization problem and break it into little problems that are individually easy.
First, we need to have a way of describing probability distributions.
A discrete distribution is a vector of numbers that are >= 0.0 and sum to 1.0.
One function is called *entropy*. Entropy takes a distribution and returns a number.
1. Run `entropy(np.array([0.7, 0.1, 0.2]))`. What is the value?
[Response here]
2. Run `entropy(np.array([7, 1, 2]))`. Does the value change? Why or why not?
[Response here]
3. Try different (non-negative) values of the three numbers. What is the largest value you can get, and what is the smallest?
[Response here]
4. Now try different (non-negative) values of *four* numbers. Can you get a larger or smaller entropy than with three?
[Response here]
5. Describe in your own words what entropy is measuring.
[Response here]
The Gibbs sampling algorithm proceeds in multiple iterations. In each iteration,
we look at all the word tokens in all the documents, one after another.
For each word, we erase its current topic assignment and sample a new topic
assignment given all the other word tokens' topic assignments.
Now look at the lines below the "SAMPLING DISTRIBUTION" comment. These define two vectors:
* The probability of each topic in the current document
* The probability of the current word in each topic
We'll look at a particular dramatic moment in Njal's saga. Define these variables:
document = documents[1160]
doc_topic_counts = document["topic_counts"]
word = "sword"
word_topic_counts = word_topics[word]
Use this command to suppress scientific notation:
np.set_printoptions(suppress=True)
6. Calculate the entropy of `doc_topic_counts`
7. Calculate the entropy of `(doc_topic_counts + doc_smoothing)`. Should this be larger or smaller than the previous value?
8. Calculate the entropy of `(word_topic_counts + word_smoothing) / (topic_totals + smoothing_times_vocab_size)`
9. Calculate the entropy of `(doc_topic_counts + doc_smoothing) * (word_topic_counts + word_smoothing) / (topic_totals + smoothing_times_vocab_size)`
These values are random initializations. Let's run the algorithm
over the documents a few times and see what happens. Run:
sample(25)
Use `print_all_topics()` to get a view of the current state of the topics.
10. This function prints the number of tokens in each topic for the sample doc. Describe how (if at all) they change.
11. Recalculate the four entropies we calculated above for the sampling distribution. How are they different?
12. What is the value of `word_smoothing`? Previously we added 1.0 in this situation. Why are we using a different value now? Use the concept of entropy in your answer.
[Response here]
13. What are Norse sagas about, from the perspective of the model?
[Response here]
14. I'm removing a list of frequent words, words that are too short, and
words whose first letter is capitalized. Why does removing capitalized words
help? What happens if you remove that check? Is this a good idea?
[Response here]
"""
import re, sys, random, math
import numpy as np
from collections import Counter
from timeit import default_timer as timer
word_pattern = re.compile("\w[\w\-\']*\w|\w")
if len(sys.argv) != 3:
print("Usage: topicmodel.py [docs file] [num topics]")
sys.exit()
num_topics = int(sys.argv[2])
doc_smoothing = 0.5
word_smoothing = 0.01
stoplist = set()
with open("stoplist.txt", encoding="utf-8") as stop_reader:
for line in stop_reader:
line = line.rstrip()
stoplist.add(line)
word_counts = Counter()
documents = []
word_topics = {}
topic_totals = np.zeros(num_topics)
for line in open(sys.argv[1], encoding="utf-8"):
#line = line.lower()
tokens = word_pattern.findall(line)
## remove stopwords, short words, and upper-cased words
tokens = [w for w in tokens if not w in stoplist and len(w) >= 3 and not w[0].isupper()]
word_counts.update(tokens)
doc_topic_counts = np.zeros(num_topics)
token_topics = []
for w in tokens:
## Generate a topic randomly
topic = random.randrange(num_topics)
token_topics.append({ "word": w, "topic": topic })
## If we haven't seen this word before, initialize it
if not w in word_topics:
word_topics[w] = np.zeros(num_topics)
## Update counts:
word_topics[w][topic] += 1
topic_totals[topic] += 1
doc_topic_counts[topic] += 1
documents.append({ "original": line, "token_topics": token_topics, "topic_counts": doc_topic_counts })
## Now that we're done reading from disk, we can count the total
## number of words.
vocabulary = list(word_counts.keys())
vocabulary_size = len(vocabulary)
smoothing_times_vocab_size = word_smoothing * vocabulary_size
def sample(num_iterations):
for iteration in range(num_iterations):
start = timer()
for document in documents:
doc_topic_counts = document["topic_counts"]
token_topics = document["token_topics"]
doc_length = len(token_topics)
for token_topic in token_topics:
w = token_topic["word"]
old_topic = token_topic["topic"]
word_topic_counts = word_topics[w]
## erase the effect of this token
word_topic_counts[old_topic] -= 1
topic_totals[old_topic] -= 1
doc_topic_counts[old_topic] -= 1
###
### SAMPLING DISTRIBUTION
###
## Does this topic occur often in the document?
topic_probs = (doc_topic_counts + doc_smoothing) / (doc_length + num_topics * doc_smoothing)
## Does this word occur often in the topic?
topic_probs *= (word_topic_counts + word_smoothing) / (topic_totals + smoothing_times_vocab_size)
## sample from an array that doesn't sum to 1.0
sample = random.uniform(0, np.sum(topic_probs))
new_topic = 0
while sample > topic_probs[new_topic]:
sample -= topic_probs[new_topic]
new_topic += 1
## add back in the effect of this token
word_topic_counts[new_topic] += 1
topic_totals[new_topic] += 1
doc_topic_counts[new_topic] += 1
token_topic["topic"] = new_topic
end = timer()
print(end - start)
def entropy(p):
## make sure the vector is a valid probability distribution
p = p / np.sum(p)
result = 0.0
for x in p:
if x > 0.0:
result += -x * math.log2(x)
return result
def print_topic(topic):
sorted_words = sorted(vocabulary, key=lambda w: word_topics[w][topic], reverse=True)
for i in range(20):
w = sorted_words[i]
print("{}\t{}".format(word_topics[w][topic], w))
def print_all_topics():
for topic in range(num_topics):
sorted_words = sorted(vocabulary, key=lambda w: word_topics[w][topic], reverse=True)
print(" ".join(sorted_words[:20]))
sample(100)
|
# -*- coding: utf-8 -*-
from typing import List
def check_strict_superset(raw_str: str, raw_arr: List[str]) -> bool:
"""
>>> check_strict_superset('1 2 3 4 5 6 7 8 9 10 11 12 23 45 84 78',
... ['1 2 3 4 5\\n', '100 11 12'])
False
"""
superset = set(raw_str.split())
sets = (set(el.split()) for el in map(str.strip, raw_arr))
return all(superset > el for el in sets)
if __name__ == '__main__':
import sys
raw_str, *raw_arr = sys.stdin.readlines()
print(check_strict_superset(raw_str, raw_arr))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.urls import reverse
from django.db import models
import uuid
class Job(models.Model):
"""
Holds the details about the job run
"""
slug = models.SlugField(blank=True, null=True)
created_date = models.DateTimeField(db_index=True, auto_now_add=True)
finished_date = models.DateTimeField(blank=True, null=True)
org_id = models.CharField(max_length=18)
username = models.CharField(max_length=120, blank=True, null=True)
email = models.EmailField(blank=True, null=True)
email_result = models.BooleanField(default=True)
access_token = models.CharField(max_length=255)
instance_url = models.CharField(max_length=255)
STATUS_CHOICES = (
('Not Started', 'Not Started'),
('Processing', 'Processing'),
('Finished', 'Finished'),
('Error', 'Error'),
)
status = models.CharField(max_length=40, choices=STATUS_CHOICES, default='Not Started')
error = models.TextField(blank=True, null=True)
stack_trace = models.TextField(blank=True, null=True)
def classes(self):
return self.apexclass_set.all().order_by('name')
def visualforce(self):
return self.apexpagecomponent_set.all().order_by('name')
def save(self, *args, **kwargs):
if not self.slug:
self.slug = uuid.uuid4()
super(Job, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('job', kwargs={'slug': self.slug})
class ApexClass(models.Model):
"""
Holds all details about an ApexClass
"""
job = models.ForeignKey(Job)
class_id = models.CharField(max_length=18)
class_member_id = models.CharField(max_length=18, blank=True, null=True)
name = models.CharField(max_length=120)
body = models.TextField()
symbol_table_json = models.TextField(blank=True, null=True)
is_referenced_externally = models.BooleanField(default=False)
# Holds a JSON structure of all the external classes that call this class
referenced_by_json = models.TextField(blank=True, null=True)
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
class ApexPageComponent(models.Model):
"""
Hold details about an ApexPage
"""
job = models.ForeignKey(Job)
sf_id = models.CharField(max_length=18)
name = models.CharField(max_length=120)
controller = models.CharField(max_length=120, blank=True, null=True)
body = models.TextField()
TYPE_CHOICES = (
('Page', 'Page'),
('Component', 'Component'),
)
type = models.CharField(max_length=10, default='Page')
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
|
import torchvision.models as models
from torch.nn import Parameter
from utility.util import *
import torch
import torch.nn as nn
class GraphConvolution(nn.Module):
def __init__(self, in_features, out_features, bias=False):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.Tensor(1, 1, out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.matmul(input, self.weight)
output = torch.matmul(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class GCNResnet(nn.Module):
def __init__(self, model, num_classes, in_channel=300, t=0, adj_file=None):
super(GCNResnet, self).__init__()
self.features = nn.Sequential(
model.conv1,
model.bn1,
model.relu,
model.maxpool,
model.layer1,
model.layer2,
model.layer3,
model.layer4,
)
self.num_classes = num_classes
# self.pooling = nn.MaxPool2d(14, 14)
self.pooling = nn.MaxPool2d(7, 7)
self.gc1 = GraphConvolution(in_channel, 1024)
self.gc2 = GraphConvolution(1024, 2048)
self.relu = nn.LeakyReLU(0.2)
_adj = gen_A(num_classes, t, adj_file)
self.A = Parameter(torch.from_numpy(_adj).float())
# # image normalization
self.image_normalization_mean = [0.485, 0.456, 0.406]
self.image_normalization_std = [0.229, 0.224, 0.225]
def forward(self, feature, inp):
feature = self.features(feature)
feature = self.pooling(feature)
feature = feature.view(feature.size(0), -1)
inp = inp[0]
adj = gen_adj(self.A).detach()
x = self.gc1(inp, adj)
x = self.relu(x)
x = self.gc2(x, adj)
x = x.transpose(0, 1)
x = torch.matmul(feature, x)
return x
def get_config_optim(self, lr, lrp):
return [
{'params': self.features.parameters(), 'lr': lr * lrp},
{'params': self.gc1.parameters(), 'lr': lr},
{'params': self.gc2.parameters(), 'lr': lr},
]
def gcn_resnet101(num_classes, t, pretrained=True, adj_file=None, in_channel=300):
model = models.resnet101(pretrained=pretrained)
return GCNResnet(model, num_classes, t=t, adj_file=adj_file, in_channel=in_channel)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Given two int values, return their sum. Unless the two values are the same, then return double their sum.
sum_double(1, 2) → 3
sum_double(3, 2) → 5
sum_double(2, 2) → 8
"""
from utils.args_to_r import validate
def main(a: int, b: int) -> int:
s = (a + b)
if (a == b):
s *= 2
return s
tests = [
((1, 2), 3),
((3, 2), 5),
((2, 2), 8),
]
validate(main, tests)
|
"""
Provide basic functionality for stock agent to interact with exchange.
"""
from typing import Union
from agent_exchange.agent import Agent
from stock_exchange_v1_utils import dict_repr, dict_str
from stock_exchange_v1 import StockExchangeV1FillTicket, StockExchangeV1OrderBook
class StockAgentV1InternalState:
def __init__(self, initial_num_shares: int, initial_capital: float):
self.num_shares = [initial_num_shares]
self.capital = [initial_capital]
# Mappings from price to number of shares
self.open_bids = {}
self.open_asks = {}
def on_timestep_passed(self, fill_ticket: Union[type(None), StockExchangeV1FillTicket]):
if fill_ticket != None:
new_num_shares, new_capital = self.update_with_fill_ticket(fill_ticket)
else:
new_num_shares, new_capital = self.get_num_shares(), self.get_capital()
self.num_shares.append(new_num_shares)
self.capital.append(new_capital)
def update_with_fill_ticket(self, ticket: StockExchangeV1FillTicket):
"""Use the fill ticket to update our state variables.
"""
# The state updates for after the update -- this should be modified in this function when capital or num_shares changes
new_num_shares = self.get_num_shares()
new_capital = self.get_capital()
# Add new bids
for price in ticket.open_bids:
StockAgentV1InternalState.increment_or_create(self.open_bids, price, ticket.open_bids[price])
# Add new asks
for price in ticket.open_asks:
StockAgentV1InternalState.increment_or_create(self.open_asks, price, ticket.open_asks[price])
# Remove old bids that were filled in the past time step
for price in ticket.closed_bids:
shares_bought = ticket.closed_bids[price]
new_num_shares += shares_bought
new_capital -= price * shares_bought
StockAgentV1InternalState.decrement_and_try_delete(self.open_bids, price, shares_bought)
# Remove old asks that were filled in the past time step
for price in ticket.closed_asks:
shares_sold = ticket.closed_asks[price]
new_num_shares -= shares_sold
new_capital += price * shares_sold
StockAgentV1InternalState.decrement_and_try_delete(self.open_asks, price, shares_sold)
return new_num_shares, new_capital
def get_num_shares(self):
return self.num_shares[-1]
def get_capital(self):
return self.capital[-1]
def __repr__(self):
return dict_repr(self)
def __str__(self):
return dict_str(self)
def increment_or_create(D, key, value):
"""If the key-value pair does not exist yet,
then add a new key-value pair with `value`
as the value. Otherwise, increment the
key's value with `value`.
"""
key = round(key, 2)
if key not in D:
D[key] = 0
D[key] += value
if D[key] == 0:
del D[key]
def decrement_and_try_delete(D, key, value):
"""Decrement a value in a dictionary,
and if the new value is 0, then delete
the k-v pair from the dictionary.
"""
key = round(key, 2)
if key not in D:
D[key] = 0
D[key] -= value
if D[key] == 0:
del D[key]
class StockAgentV1(Agent):
"""A base stock trading agent; this agent itself will perform no-ops each iteration.
"""
def __init__(self, initial_num_shares, initial_capital):
super().__init__()
self.internal_state = StockAgentV1InternalState(initial_num_shares, initial_capital)
def action_results_update(
self,
new_order_book: StockExchangeV1OrderBook,
reward,
done: bool,
fill_ticket: Union[type(None), StockExchangeV1FillTicket]):
self.internal_state.on_timestep_passed(fill_ticket)
def get_num_shares(self):
return self.internal_state.get_num_shares()
def get_capital(self):
return self.internal_state.get_capital()
def __repr__(self):
return dict_repr(self)
def __str__(self):
return dict_str(self)
|
from django.conf.urls import patterns, url
account_urls = patterns('',
url(r'^$', 'crmapp.accounts.views.account_detail', name = 'account_detail'),
url(r'^edit/$', 'crmapp.accounts.views.account_cru', name = 'account_update'),
)
|
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programytest.parser.pattern.matching.base import PatternMatcherBaseClass
class PatternMatcherSetTests(PatternMatcherBaseClass):
def test_basic_set_match_as_text(self):
if self._client_context.brain.sets.contains("SEX") is False:
set_dict = {"MAN": [["MAN"]], "WOMAN": [["WOMAN"]]}
values = {"MAN": "MAN", "WOMAN": "WOMAN"}
self._client_context.brain._sets_collection.add_set("SEX", set_dict, "teststore", False, values)
self.add_pattern_to_graph(pattern="I AM A <set>sex</set>", topic="X", that="Y", template="1")
context = self.match_sentence("I AM A MAN", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("MAN", context.star(1))
context = self.match_sentence("I AM A WOMAN", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("WOMAN", context.star(1))
def test_basic_set_match_as_name(self):
if self._client_context.brain.sets.contains("SEX") is False:
set_dict = {"MAN": [["MAN"]], "WOMAN": [["WOMAN"]]}
values = {"MAN": "MAN", "WOMAN": "WOMAN"}
self._client_context.brain._sets_collection.add_set("SEX", set_dict, "teststore", False, values)
self.add_pattern_to_graph(pattern='I AM A <set name="sex" />', topic="X", that="Y", template="1")
context = self.match_sentence("I AM A MAN", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("MAN", context.star(1))
context = self.match_sentence("I AM A WOMAN", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("WOMAN", context.star(1))
def test_multi_word_set_match(self):
set_dict = {"RED": [["RED"], ["RED", "AMBER"], ["RED", "BURNT", "OAK"], ["RED", "ORANGE"]]}
values = {"RED": "RED", "RED AMBER": "RED AMBER", "RED BURNT OAK": "RED BURNT OAK", "RED ORANGE": "RED ORANGE"}
self._client_context.brain._sets_collection.add_set("COLOR", set_dict, "teststore", False, values)
self.add_pattern_to_graph(pattern="I LIKE <set>color</set> *", topic="*", that="*", template="1")
context = self.match_sentence("I LIKE RED PAINT", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("RED", context.star(1))
self.assertEqual("PAINT", context.star(2))
context = self.match_sentence("I LIKE RED AMBER CARS", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("RED AMBER", context.star(1))
self.assertEqual("CARS", context.star(2))
context = self.match_sentence("I LIKE RED BURNT OAK MOTOR BIKES", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("RED BURNT OAK", context.star(1))
self.assertEqual("MOTOR BIKES", context.star(2))
def test_multi_word_match_value(self):
set_dict = {"RED": [["RED"], ["RED", "AMBER"], ["RED", "BURNT", "OAK"], ["RED", "ORANGE"]]}
values = {"RED": "red", "RED AMBER": "red amber", "RED BURNT OAK": "red burnt oak", "RED ORANGE": "red orange"}
self._client_context.brain._sets_collection.add_set("COLOR", set_dict, "teststore", False, values)
self.add_pattern_to_graph(pattern="i like <set>color</set> *", topic="*", that="*", template="1")
context = self.match_sentence("I LIKE RED PAINT", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("red", context.star(1))
self.assertEqual("PAINT", context.star(2))
context = self.match_sentence("I LIKE RED AMBER CARS", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("red amber", context.star(1))
self.assertEqual("CARS", context.star(2))
context = self.match_sentence("I LIKE RED BURNT OAK MOTOR BIKES", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("red burnt oak", context.star(1))
self.assertEqual("MOTOR BIKES", context.star(2))
def test_basic_set_match_as_text_jp(self):
if self._client_context.brain.sets.contains("SEX") is False:
set_dict = {"男": ["男", "男性"], "女": ["女", "女性"]}
values = {"男": "男", "男性": "男性", "女": "女", "女性": "女性"}
self._client_context.brain._sets_collection.add_set("SEX", set_dict, "teststore", True, values)
self.add_pattern_to_graph(pattern="私は <set>sex</set>", topic="X", that="Y", template="1")
context = self.match_sentence("私は男性", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("男性", context.star(1))
context = self.match_sentence("私は女", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("女", context.star(1))
def test_basic_set_match_as_name_jp(self):
if self._client_context.brain.sets.contains("SEX") is False:
set_dict = {"男": ["男", "男性"], "女": ["女", "女性"]}
values = {"男": "男", "男性": "男性", "女": "女", "女性": "女性"}
self._client_context.brain._sets_collection.add_set("SEX", set_dict, "teststore", True, values)
self.add_pattern_to_graph(pattern='私は <set name="sex" />', topic="X", that="Y", template="1")
context = self.match_sentence("私は男", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("男", context.star(1))
context = self.match_sentence("私は女性", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("女性", context.star(1))
def test_multi_word_set_match_jp(self):
set_dict = {"赤": ["赤", "赤色", "赤黒い", "赤面", "赤に塗った"]}
values = {"赤": "赤", "赤色": "赤色", "赤黒い": "赤黒い", "赤面": "赤面", "赤に塗った": "赤に塗った"}
self._client_context.brain._sets_collection.add_set("COLOR", set_dict, "teststore", True, values)
self.add_pattern_to_graph(pattern="私が好きなのは<set>color</set> *", topic="*", that="*", template="1")
context = self.match_sentence("私が好きなのは赤系統", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("赤", context.star(1))
self.assertEqual("系統", context.star(2))
context = self.match_sentence("私が好きなのは赤黒い車", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("赤黒い", context.star(1))
self.assertEqual("車", context.star(2))
context = self.match_sentence("私が好きなのは赤に塗ったバイク", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("赤に塗った", context.star(1))
self.assertEqual("バイク", context.star(2))
def test_basic_set_number_match(self):
self._client_context.brain.dynamics.add_dynamic_set('number', "programy.dynamic.sets.numeric.IsNumeric", None)
self.add_pattern_to_graph(pattern="I AM <set>number</set> YEARS OLD", topic="X", that="Y", template="1")
context = self.match_sentence("I AM 49 YEARS OLD", topic="X", that="Y")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
self.assertEqual("49", context.star(1))
|
"""
This library contains utilities to use authentication and authorization features of the DCP.
"""
from typing import List
from .dcp_service_account import DCPServiceAccountManager
from ..errors import SecurityException
security_config_not_set_error = "security configuration not set for {}. Use Config.setup() to set."
OIDC_claims = {
'email': 'https://auth.data.humancellatlas.org/email',
'group': 'https://auth.data.humancellatlas.org/group'
}
allowed_algorithms = ['RS256'] # for encrypting and decrypting a JWT
class Config:
_openid_provider = "humancellatlas.auth0.com"
_auth_provider = "https://auth.data.humancellatlas.org"
_trusted_google_projects = None
_oidc_email_claim = 'https://auth.data.humancellatlas.org/email'
@staticmethod
def setup(trusted_google_projects: List[str], *, openid_provider: str = None, auth_url: str = None):
"""
Set the configuration values
:param trusted_google_projects: trust service account project domains to allow access to your service.
:param openid_provider: the openid provider used to authenticate users.
:param auth_url: The url for the DCP authentication and authorization provider.
:return:
"""
Config._openid_provider = openid_provider or Config._openid_provider
Config._auth_url = auth_url or Config._auth_provider
Config._trusted_google_projects = [x for x in trusted_google_projects if x.endswith("iam.gserviceaccount.com")]
@staticmethod
def get_openid_provider() -> str:
if not Config._openid_provider:
raise SecurityException(security_config_not_set_error.format('openid_provider'))
return Config._openid_provider
@staticmethod
def get_auth_url() -> str:
if not Config._auth_url:
raise SecurityException(security_config_not_set_error.format('auth_url'))
return Config._auth_url
@staticmethod
def get_trusted_google_projects():
if Config._trusted_google_projects is None:
raise SecurityException(security_config_not_set_error.format('trusted_google_projects'))
return Config._trusted_google_projects
@staticmethod
def get_oidc_email_claim():
return Config._oidc_email_claim
|
#!/usr/bin/env python
import torch
import getopt
import math
import numpy
import os
import PIL
import PIL.Image
import sys
try:
from .correlation import correlation # the custom cost volume layer
except:
sys.path.insert(0, './correlation'); import correlation # you should consider upgrading python
# end
##########################################################
assert(int(str('').join(torch.__version__.split('.')[0:2])) >= 13) # requires at least pytorch version 1.3.0
torch.set_grad_enabled(False) # make sure to not compute gradients for computational performance
torch.backends.cudnn.enabled = True # make sure to use cudnn for computational performance
##########################################################
arguments_strModel = 'default' # 'default', or 'chairs-things'
arguments_strOne = './images/one.png'
arguments_strTwo = './images/two.png'
arguments_strOut = './out.flo'
for strOption, strArgument in getopt.getopt(sys.argv[1:], '', [ strParameter[2:] + '=' for strParameter in sys.argv[1::2] ])[0]:
if strOption == '--model' and strArgument != '': arguments_strModel = strArgument # which model to use
if strOption == '--one' and strArgument != '': arguments_strOne = strArgument # path to the first frame
if strOption == '--two' and strArgument != '': arguments_strTwo = strArgument # path to the second frame
if strOption == '--out' and strArgument != '': arguments_strOut = strArgument # path to where the output should be stored
# end
##########################################################
backwarp_tenGrid = {}
backwarp_tenPartial = {}
def backwarp(tenInput, tenFlow):
if str(tenFlow.shape) not in backwarp_tenGrid:
tenHor = torch.linspace(-1.0 + (1.0 / tenFlow.shape[3]), 1.0 - (1.0 / tenFlow.shape[3]), tenFlow.shape[3]).view(1, 1, 1, -1).expand(-1, -1, tenFlow.shape[2], -1)
tenVer = torch.linspace(-1.0 + (1.0 / tenFlow.shape[2]), 1.0 - (1.0 / tenFlow.shape[2]), tenFlow.shape[2]).view(1, 1, -1, 1).expand(-1, -1, -1, tenFlow.shape[3])
backwarp_tenGrid[str(tenFlow.shape)] = torch.cat([ tenHor, tenVer ], 1).cuda()
# end
if str(tenFlow.shape) not in backwarp_tenPartial:
backwarp_tenPartial[str(tenFlow.shape)] = tenFlow.new_ones([ tenFlow.shape[0], 1, tenFlow.shape[2], tenFlow.shape[3] ])
# end
tenFlow = torch.cat([ tenFlow[:, 0:1, :, :] / ((tenInput.shape[3] - 1.0) / 2.0), tenFlow[:, 1:2, :, :] / ((tenInput.shape[2] - 1.0) / 2.0) ], 1)
tenInput = torch.cat([ tenInput, backwarp_tenPartial[str(tenFlow.shape)] ], 1)
tenOutput = torch.nn.functional.grid_sample(input=tenInput, grid=(backwarp_tenGrid[str(tenFlow.shape)] + tenFlow).permute(0, 2, 3, 1), mode='bilinear', padding_mode='zeros', align_corners=False)
tenMask = tenOutput[:, -1:, :, :]; tenMask[tenMask > 0.999] = 1.0; tenMask[tenMask < 1.0] = 0.0
return tenOutput[:, :-1, :, :] * tenMask
# end
##########################################################
class Network(torch.nn.Module):
def __init__(self):
super().__init__()
class Extractor(torch.nn.Module):
def __init__(self):
super().__init__()
self.netOne = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, stride=2, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
self.netTwo = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=2, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
self.netThr = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
self.netFou = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=64, out_channels=96, kernel_size=3, stride=2, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=96, out_channels=96, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=96, out_channels=96, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
self.netFiv = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=96, out_channels=128, kernel_size=3, stride=2, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
self.netSix = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=128, out_channels=196, kernel_size=3, stride=2, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=196, out_channels=196, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=196, out_channels=196, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
# end
def forward(self, tenInput):
tenOne = self.netOne(tenInput)
tenTwo = self.netTwo(tenOne)
tenThr = self.netThr(tenTwo)
tenFou = self.netFou(tenThr)
tenFiv = self.netFiv(tenFou)
tenSix = self.netSix(tenFiv)
return [ tenOne, tenTwo, tenThr, tenFou, tenFiv, tenSix ]
# end
# end
class Decoder(torch.nn.Module):
def __init__(self, intLevel):
super().__init__()
intPrevious = [ None, None, 81 + 32 + 2 + 2, 81 + 64 + 2 + 2, 81 + 96 + 2 + 2, 81 + 128 + 2 + 2, 81, None ][intLevel + 1]
intCurrent = [ None, None, 81 + 32 + 2 + 2, 81 + 64 + 2 + 2, 81 + 96 + 2 + 2, 81 + 128 + 2 + 2, 81, None ][intLevel + 0]
if intLevel < 6: self.netUpflow = torch.nn.ConvTranspose2d(in_channels=2, out_channels=2, kernel_size=4, stride=2, padding=1)
if intLevel < 6: self.netUpfeat = torch.nn.ConvTranspose2d(in_channels=intPrevious + 128 + 128 + 96 + 64 + 32, out_channels=2, kernel_size=4, stride=2, padding=1)
if intLevel < 6: self.fltBackwarp = [ None, None, None, 5.0, 2.5, 1.25, 0.625, None ][intLevel + 1]
self.netOne = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=intCurrent, out_channels=128, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
self.netTwo = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=intCurrent + 128, out_channels=128, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
self.netThr = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=intCurrent + 128 + 128, out_channels=96, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
self.netFou = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=intCurrent + 128 + 128 + 96, out_channels=64, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
self.netFiv = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=intCurrent + 128 + 128 + 96 + 64, out_channels=32, kernel_size=3, stride=1, padding=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
)
self.netSix = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=intCurrent + 128 + 128 + 96 + 64 + 32, out_channels=2, kernel_size=3, stride=1, padding=1)
)
# end
def forward(self, tenOne, tenTwo, objPrevious):
tenFlow = None
tenFeat = None
if objPrevious is None:
tenFlow = None
tenFeat = None
tenVolume = torch.nn.functional.leaky_relu(input=correlation.FunctionCorrelation(tenOne=tenOne, tenTwo=tenTwo), negative_slope=0.1, inplace=False)
tenFeat = torch.cat([ tenVolume ], 1)
elif objPrevious is not None:
tenFlow = self.netUpflow(objPrevious['tenFlow'])
tenFeat = self.netUpfeat(objPrevious['tenFeat'])
tenVolume = torch.nn.functional.leaky_relu(input=correlation.FunctionCorrelation(tenOne=tenOne, tenTwo=backwarp(tenInput=tenTwo, tenFlow=tenFlow * self.fltBackwarp)), negative_slope=0.1, inplace=False)
tenFeat = torch.cat([ tenVolume, tenOne, tenFlow, tenFeat ], 1)
# end
tenFeat = torch.cat([ self.netOne(tenFeat), tenFeat ], 1)
tenFeat = torch.cat([ self.netTwo(tenFeat), tenFeat ], 1)
tenFeat = torch.cat([ self.netThr(tenFeat), tenFeat ], 1)
tenFeat = torch.cat([ self.netFou(tenFeat), tenFeat ], 1)
tenFeat = torch.cat([ self.netFiv(tenFeat), tenFeat ], 1)
tenFlow = self.netSix(tenFeat)
return {
'tenFlow': tenFlow,
'tenFeat': tenFeat
}
# end
# end
class Refiner(torch.nn.Module):
def __init__(self):
super().__init__()
self.netMain = torch.nn.Sequential(
torch.nn.Conv2d(in_channels=81 + 32 + 2 + 2 + 128 + 128 + 96 + 64 + 32, out_channels=128, kernel_size=3, stride=1, padding=1, dilation=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=2, dilation=2),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=4, dilation=4),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=128, out_channels=96, kernel_size=3, stride=1, padding=8, dilation=8),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=96, out_channels=64, kernel_size=3, stride=1, padding=16, dilation=16),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, stride=1, padding=1, dilation=1),
torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
torch.nn.Conv2d(in_channels=32, out_channels=2, kernel_size=3, stride=1, padding=1, dilation=1)
)
# end
def forward(self, tenInput):
return self.netMain(tenInput)
# end
# end
self.netExtractor = Extractor()
self.netTwo = Decoder(2)
self.netThr = Decoder(3)
self.netFou = Decoder(4)
self.netFiv = Decoder(5)
self.netSix = Decoder(6)
self.netRefiner = Refiner()
self.load_state_dict({ strKey.replace('module', 'net'): tenWeight for strKey, tenWeight in torch.hub.load_state_dict_from_url(url='http://content.sniklaus.com/github/pytorch-pwc/network-' + arguments_strModel + '.pytorch', file_name='pwc-' + arguments_strModel).items() })
# end
def forward(self, tenOne, tenTwo):
tenOne = self.netExtractor(tenOne)
tenTwo = self.netExtractor(tenTwo)
objEstimate = self.netSix(tenOne[-1], tenTwo[-1], None)
objEstimate = self.netFiv(tenOne[-2], tenTwo[-2], objEstimate)
objEstimate = self.netFou(tenOne[-3], tenTwo[-3], objEstimate)
objEstimate = self.netThr(tenOne[-4], tenTwo[-4], objEstimate)
objEstimate = self.netTwo(tenOne[-5], tenTwo[-5], objEstimate)
return (objEstimate['tenFlow'] + self.netRefiner(objEstimate['tenFeat'])) * 20.0
# end
# end
netNetwork = None
##########################################################
def estimate(tenOne, tenTwo):
global netNetwork
if netNetwork is None:
netNetwork = Network().cuda().eval()
# end
assert(tenOne.shape[1] == tenTwo.shape[1])
assert(tenOne.shape[2] == tenTwo.shape[2])
intWidth = tenOne.shape[2]
intHeight = tenOne.shape[1]
assert(intWidth == 1360) # remember that there is no guarantee for correctness, comment this line out if you acknowledge this and want to continue
assert(intHeight == 1000) # remember that there is no guarantee for correctness, comment this line out if you acknowledge this and want to continue
tenPreprocessedOne = tenOne.cuda().view(1, 3, intHeight, intWidth)
tenPreprocessedTwo = tenTwo.cuda().view(1, 3, intHeight, intWidth)
intPreprocessedWidth = int(math.floor(math.ceil(intWidth / 64.0) * 64.0))
intPreprocessedHeight = int(math.floor(math.ceil(intHeight / 64.0) * 64.0))
tenPreprocessedOne = torch.nn.functional.interpolate(input=tenPreprocessedOne, size=(intPreprocessedHeight, intPreprocessedWidth), mode='bilinear', align_corners=False)
tenPreprocessedTwo = torch.nn.functional.interpolate(input=tenPreprocessedTwo, size=(intPreprocessedHeight, intPreprocessedWidth), mode='bilinear', align_corners=False)
tenFlow = torch.nn.functional.interpolate(input=netNetwork(tenPreprocessedOne, tenPreprocessedTwo), size=(intHeight, intWidth), mode='bilinear', align_corners=False)
tenFlow[:, 0, :, :] *= float(intWidth) / float(intPreprocessedWidth)
tenFlow[:, 1, :, :] *= float(intHeight) / float(intPreprocessedHeight)
return tenFlow[0, :, :, :].cpu()
# end
##########################################################
if __name__ == '__main__':
tenOne = torch.FloatTensor(numpy.ascontiguousarray(numpy.array(PIL.Image.open(arguments_strOne))[:, :, ::-1].transpose(2, 0, 1).astype(numpy.float32) * (1.0 / 255.0)))
tenTwo = torch.FloatTensor(numpy.ascontiguousarray(numpy.array(PIL.Image.open(arguments_strTwo))[:, :, ::-1].transpose(2, 0, 1).astype(numpy.float32) * (1.0 / 255.0)))
tenOutput = estimate(tenOne, tenTwo)
objOutput = open(arguments_strOut, 'wb')
numpy.array([ 80, 73, 69, 72 ], numpy.uint8).tofile(objOutput)
numpy.array([ tenOutput.shape[2], tenOutput.shape[1] ], numpy.int32).tofile(objOutput)
numpy.array(tenOutput.numpy().transpose(1, 2, 0), numpy.float32).tofile(objOutput)
objOutput.close()
# end
|
import I2C_LCD_driver
mylcd1 = I2C_LCD_driver.lcd(0x27)
mylcd1.lcd_display_string("Naresh's Status:", 1)
|
from django.http import HttpResponse, HttpResponsePermanentRedirect
from django.shortcuts import render, redirect
from rest_framework.generics import CreateAPIView, RetrieveAPIView
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.authentication import SessionAuthentication, BasicAuthentication, BaseAuthentication
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from django.contrib.auth.hashers import check_password
from .paginations import *
from .models import *
from .serializers import *
from .helper_functions import *
from django.utils import timezone
from rest_framework.parsers import JSONParser
from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
from django.contrib.auth import get_user_model, authenticate
from django.contrib.auth.models import User
from .forms import *
import os
import json
import uuid
from base64 import b64decode
from uuid import uuid4
def serializePost(post):
postDict = {}
postDict['author'] = serializeAuthor(post.author)
postDict['title'] = post.title
postDict['source'] = post.source
postDict['origin'] = post.origin
postDict['description'] = post.description
postDict['contentType'] = post.contentType
postDict['content'] = post.content
postDict['categories'] = post.categories
# postDict['count'] = 'COUNT'
# postDict['size'] = 'SIZE'
# postDict['next'] = 'NEXT'
comments = Comment.objects.filter(post=post)
postDict['comments'] = []
for comment in comments:
postDict['comments'].append(serializeComment(comment))
postDict['published'] = post.published
postDict['id'] = post.uuid
postDict['visibility'] = post.visibility
postDict['visibleTo'] = []
postDict['unlisted'] = post.unlisted
return postDict
def serializeComment(comment):
commentDict = {}
commentDict['author'] = serializeAuthor(comment.author)
commentDict['comment'] = comment.comment
commentDict['contentType'] = comment.contentType
commentDict['published'] = comment.published
commentDict['id'] = comment.uuid
return commentDict
def serializeAuthor(author):
authorDict = {}
authorDict['id'] = str(author.host.hostname) + \
'author/' + str(author.uuid)
authorDict['host'] = author.host.hostname
authorDict['displayName'] = author.displayName
authorDict['github'] = author.github
authorDict['url'] = str(author.host.hostname) + \
'author/' + str(author.uuid)
authorDict['bio'] = author.bio
authorDict['firstName'] = author.first_name
authorDict['lastName'] = author.last_name
authorDict['email'] = author.email
return authorDict
class NodeBasicAuth(BaseAuthentication):
def authenticate(self, request):
try:
auth = request.META['HTTP_AUTHORIZATION'].split()
if auth[0].lower() == "basic":
server_name, server_password = b64decode(auth[1]).decode().split(':')
node = Node.objects.get(server_name=server_name, server_password=server_password)
if node:
return (Author.objects.get(username='warren'), None)
raise
except:
return None
class CreateAuthorAPIView(CreateAPIView):
authentication_classes = [SessionAuthentication, BasicAuthentication]
permission_classes = [AllowAny]
serializer_class = CreateAuthorSerializer
def create(self, request, *args, **kwargs):
print(request.data)
serializer = self.get_serializer(data=request.data)
# print(serializer)
serializer.is_valid(raise_exception=True)
print("VALID")
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
print(request.user)
print(request.auth)
return Response(
{**serializer.data},
status=status.HTTP_201_CREATED,
headers=headers
)
class GetAllAuthorsAPIView(APIView):
def get(self, request):
authors = Author.objects.filter(host=settings.HOSTNAME)
authors = list(map(serializeAuthor, authors))
return Response(authors, status=status.HTTP_200_OK)
class LogoutView(APIView):
authentication_classes = [SessionAuthentication, BasicAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
request.user.auth_token.delete()
return Response(status=status.HTTP_200_OK)
class EditUserView(APIView):
authentication_classes = [SessionAuthentication, BasicAuthentication]
permission_classes = [IsAuthenticated]
serializer = AuthorSerializer
def put(self, request, pk, format=None):
if request.user.uuid != pk:
return Response(status=status.HTTP_403_FORBIDDEN)
author = Author.objects.get(pk=pk)
serializer = AuthorSerializer(
instance=author, data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
class GetAuthorAPIView(APIView):
authentication_classes = [SessionAuthentication, BasicAuthentication]
# permission_classes = [IsAuthenticated]
serializer_class = AuthorSerializer
def get(self, request, pk, format=None):
try:
author = Author.objects.get(uuid=pk)
except:
return Response(status=status.HTTP_404_NOT_FOUND)
authorDict = serializeAuthor(author)
friendList = []
asAuthor = Friend.objects.filter(author=author)
asFriend = Friend.objects.filter(friend=author)
friendList += list(map(lambda x: serializeAuthor(x.friend), asAuthor))
friendList += list(map(lambda x: serializeAuthor(x.author), asFriend))
authorDict['friends'] = friendList
return Response(authorDict, status=status.HTTP_200_OK)
class CreatePostAPIView(CreateAPIView):
authentication_classes = [SessionAuthentication, BasicAuthentication]
# permission_classes = [IsAuthenticated]
serializer_class = CreatePostSerializer
def create(self, request, pk):
data = request.data.copy()
data['author'] = pk
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(
{**serializer.data},
status=status.HTTP_201_CREATED,
headers=headers
)
class GetPostAPIView(APIView):
authentication_classes = [SessionAuthentication, BasicAuthentication]
# permission_classes = [IsAuthenticated]
serializer_class = GetPostSerializer
def get(self, request, pk, format=None):
post = Post.objects.get(uuid=pk)
postDict = serializePost(post)
if post.visibility == 'PUBLIC':
return Response(postDict, status=status.HTTP_200_OK)
elif str(request.user) == 'AnonymousUser':
return Response(status=status.HTTP_401_UNAUTHORIZED)
elif post.visibility == 'FRIENDS':
friendsUUIDList = []
for friend in Friend.objects.filter(author=post.author.uuid):
friendsUUIDList.append(friend.friend.uuid)
for friend in Friend.objects.filter(friend=post.author.uuid):
friendsUUIDList.append(friend.author.uuid)
if request.user.uuid in friendsUUIDList:
return Response(postDict, status=status.HTTP_200_OK)
elif post.visibility == 'FOAF':
friendsUUIDList = []
for friend in Friend.objects.filter(author=post.author.uuid):
friendsUUIDList.append(friend.friend.uuid)
for friend in Friend.objects.filter(friend=post.author.uuid):
friendsUUIDList.append(friend.author.uuid)
foafUUIDList = []
for friendUUID in friendsUUIDList:
tempAuthor = Author.objects.get(uuid=friendUUID)
for innerFriend in Friend.objects.filter(author=tempAuthor):
if innerFriend.friend.uuid not in foafUUIDList:
foafUUIDList.append(innerFriend.friend.uuid)
for innerFriend in Friend.objects.filter(friend=tempAuthor):
if innerFriend.friend.uuid not in foafUUIDList:
foafUUIDList.append(innerFriend.author.uuid)
foafUUIDList.append(friendsUUIDList)
if request.user.uuid in foafUUIDList:
return Response(postDict, status=status.HTTP_200_OK)
elif post.visibility == 'PRIVATE':
if request.user == post.author:
return Response(postDict, status=status.HTTP_200_OK)
elif post.visibility == 'SERVERONLY':
if request.user.host == post.host:
return Response(postDict, status=status.HTTP_200_OK)
return Response(status=status.HTTP_401_UNAUTHORIZED)
# Returns Post by sending POST request
def post(self, request, pk, format=None):
try:
post = Post.objects.get(uuid=request.data['postid'])
postDict = serializePost(post)
except:
print("EXCEPTION")
return Response(status=status.HTTP_404_NOT_FOUND)
userUUID = request.data['author']['id'].split('/author/')[1]
requestor = Author.objects.get(uuid=userUUID)
if post.visibility == 'PUBLIC':
return Response(postDict, status=status.HTTP_200_OK)
elif post.visibility == 'FRIENDS':
friendsUUIDList = []
for friend in Friend.objects.filter(author=post.author.uuid):
friendsUUIDList.append(friend.friend.uuid)
for friend in Friend.objects.filter(friend=post.author.uuid):
friendsUUIDList.append(friend.author.uuid)
if requestor.uuid in friendsUUIDList:
return Response(postDict, status=status.HTTP_200_OK)
elif post.visibility == 'FOAF':
friendsUUIDList = []
for friend in Friend.objects.filter(author=post.author.uuid):
friendsUUIDList.append(friend.friend.uuid)
for friend in Friend.objects.filter(friend=post.author.uuid):
friendsUUIDList.append(friend.author.uuid)
foafUUIDList = []
for friendUUID in friendsUUIDList:
tempAuthor = Author.objects.get(uuid=friendUUID)
for innerFriend in Friend.objects.filter(author=tempAuthor):
if innerFriend.friend.uuid not in foafUUIDList:
foafUUIDList.append(innerFriend.friend.uuid)
for innerFriend in Friend.objects.filter(friend=tempAuthor):
if innerFriend.friend.uuid not in foafUUIDList:
foafUUIDList.append(innerFriend.author.uuid)
foafUUIDList.append(friendsUUIDList)
if requestor.uuid in foafUUIDList:
return Response(postDict, status=status.HTTP_200_OK)
elif post.visibility == 'PRIVATE':
if requestor == post.author:
return Response(postDict, status=status.HTTP_200_OK)
elif post.visibility == 'SERVERONLY':
if requestor.host == post.host:
return Response(postDict, status=status.HTTP_200_OK)
return Response(status=status.HTTP_401_UNAUTHORIZED)
class GetAllAuthorPostAPIView(APIView):
authentication_classes = [BasicAuthentication, SessionAuthentication]
serializer_class = GetPostSerializer
# Returns All Author's Posts by sending UUID of Author
def get(self, request, pk, format=None):
posts = Post.objects.filter(author=pk)
posts = posts.filter(visibility='PUBLIC')
serializer = GetPostSerializer(posts, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class GetAllAuthorFriendsAPIView(APIView):
authentication_classes = [BasicAuthentication, SessionAuthentication]
serializer_class = FriendSerializer
# Returns All Author's friends
def get(self, request, pk, format=None):
friendList = []
asAuthor = Friend.objects.filter(author=pk)
asFriend = Friend.objects.filter(friend=pk)
friendList += list(map(lambda x: serializeAuthor(x.friend), asAuthor))
friendList += list(map(lambda x: serializeAuthor(x.author), asFriend))
return Response(friendList, status=status.HTTP_200_OK)
def post(self, request, pk, format=None):
requestedAuthor = Author.objects.get(uuid=pk)
data = request.data
authors = data['authors']
friendsList = []
for author in authors:
friends = Friend.objects.filter(author=author).filter(friend=requestedAuthor).union(
Friend.objects.filter(author=requestedAuthor).filter(friend=author))
if len(friends) == 1:
friendsList.append(author)
return Response({
"query": "friends",
"author": requestedAuthor.uuid,
"authors": friendsList
})
class GetAllPublicPostsAPIView(APIView):
authentication_classes = [NodeBasicAuth,SessionAuthentication, BasicAuthentication]
serializer_class = GetPostSerializer
def get(self, request, format=None):
posts = Post.objects.filter(visibility='PUBLIC')
data = paginated_result(
request, posts, GetPostSerializer, 'posts', query='posts')
return Response(data, status=status.HTTP_200_OK)
class GetAllVisiblePostsAPIView(APIView):
authentication_classes = [SessionAuthentication, BasicAuthentication]
serializer_class = GetPostSerializer
def get(self, request, format=None):
user = request.user
if str(user) == "AnonymousUser":
print("Anonymous user")
posts = Post.objects.filter(visibility='PUBLIC')
serializer = GetPostSerializer(posts, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
else:
userUUID = user.uuid
print(userUUID)
# -------------
# public posts
# -------------
filteredPosts = Post.objects.filter(visibility='PUBLIC')
# -------------
# foaf posts
# -------------
foafPosts = Post.objects.filter(visibility='FOAF')
friends = Friend.objects.filter(author=userUUID).union(
Friend.objects.filter(friend=userUUID))
foafUUIDs = []
friendUUIDs = []
# for each friend
for friend in friends:
# append friend's uuid to foaf
if friend.friend not in foafUUIDs:
foafUUIDs.append(friend.friend)
friendUUIDs.append(friend.friend)
# innerFriends is friend's friends
innerFriend = Friend.objects.filter(author=friend.friend)
for f2 in innerFriend:
if f2.friend not in foafUUIDs:
foafUUIDs.append(f2.friend)
for uuid in foafUUIDs:
filteredPosts.union(foafPosts.filter(author=uuid))
# -------------
# friend posts
# -------------
friendPosts = Post.objects.filter(visibility='FRIENDS')
for uuid in friendUUIDs:
filteredPosts.union(friendPosts.filter(author=uuid))
# -------------
# private posts
# -------------
privatePosts = Post.objects.filter(visibility='PRIVATE')
filteredPosts.union(privatePosts.filter(author=userUUID))
# -------------
# server posts
# -------------
author = Author.objects.get(uuid=userUUID)
serverAuthors = Author.objects.filter(host=author.host)
print(serverAuthors)
for author in serverAuthors:
temp = Post.objects.filter(
author=author.uuid, visibility='SERVERONLY')
# print('temp ok')
filteredPosts = filteredPosts.union(temp)
postList = []
for post in filteredPosts:
postList.append(serializePost(post))
return PostPagination().get_paginated_response(postList, author.host)
# serializer = GetPostSerializer(filteredPosts, many=True)
class DeletePostAPIView(APIView):
authentication_classes = [SessionAuthentication, BasicAuthentication]
# permission_classes = [IsAuthenticated]
# serializer_class = DeletePostSerializer
def delete(self, request, pk):
try:
post = Post.objects.get(uuid=pk).delete()
return Response(status=status.HTTP_200_OK)
except Exception as e:
print("Post Not Found!", e)
return Response(status=status.HTTP_404_NOT_FOUND)
class CreateCommentAPIView(CreateAPIView):
authentication_classes = [SessionAuthentication, BasicAuthentication]
permission_classes = [IsAuthenticated]
serializer_class = CreateCommentSerializer
def create(self, request, pk):
print(pk)
data = request.data.copy()
data['post'] = pk
data['author'] = request.user.uuid
print(data)
serializer = self.get_serializer(data=data)
# print(serializer)
serializer.is_valid(raise_exception=True)
# print("VALID")
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(
{**serializer.data},
status=status.HTTP_201_CREATED,
headers=headers
)
class CommentsAPIView(APIView):
authentication_classes = [SessionAuthentication, BasicAuthentication]
# permission_classes = [IsAuthenticated]
serializer_class = CommentSerializer
def get(self, request, pk):
postHost = Post.objects.get(uuid=pk).host.hostname
comments = Comment.objects.filter(post=pk)
commentList = []
for comment in comments:
commentList.append(serializeComment(comment))
return CommentPagination().get_paginated_response(commentList, postHost)
def post(self, request, pk, format=None):
try:
post = Post.objects.get(
uuid=request.data['post'].split('/posts/')[1])
postDict = serializePost(post)
except:
print("EXCEPTION")
return Response(status=status.HTTP_404_NOT_FOUND)
userUUID = request.data['comment']['author']['id'].split('/author/')[1]
requestor = Author.objects.get(uuid=userUUID)
if post.visibility == 'PUBLIC':
newComment = Comment(author=requestor, comment=request.data['comment']['comment'], contentType=request.data['comment']
['contentType'], published=request.data['comment']['published'], post=post)
newComment.save()
return Response({
"query": "addComment",
"success": True,
"message": "Comment Added"},
status=status.HTTP_200_OK)
elif post.visibility == 'FRIENDS':
friendsUUIDList = []
for friend in Friend.objects.filter(author=post.author.uuid):
friendsUUIDList.append(friend.friend.uuid)
for friend in Friend.objects.filter(friend=post.author.uuid):
friendsUUIDList.append(friend.author.uuid)
if requestor.uuid in friendsUUIDList:
newComment = Comment(author=requestor, comment=request.data['comment']['comment'], contentType=request.data['comment']
['contentType'], published=request.data['comment']['published'], post=post)
newComment.save()
return Response({
"query": "addComment",
"success": True,
"message": "Comment Added"},
status=status.HTTP_200_OK)
elif post.visibility == 'FOAF':
friendsUUIDList = []
for friend in Friend.objects.filter(author=post.author.uuid):
friendsUUIDList.append(friend.friend.uuid)
for friend in Friend.objects.filter(friend=post.author.uuid):
friendsUUIDList.append(friend.author.uuid)
foafUUIDList = []
for friendUUID in friendsUUIDList:
tempAuthor = Author.objects.get(uuid=friendUUID)
for innerFriend in Friend.objects.filter(author=tempAuthor):
if innerFriend.friend.uuid not in foafUUIDList:
foafUUIDList.append(innerFriend.friend.uuid)
for innerFriend in Friend.objects.filter(friend=tempAuthor):
if innerFriend.friend.uuid not in foafUUIDList:
foafUUIDList.append(innerFriend.author.uuid)
foafUUIDList.append(friendsUUIDList)
if requestor.uuid in foafUUIDList:
newComment = Comment(author=requestor, comment=request.data['comment']['comment'], contentType=request.data['comment']
['contentType'], published=request.data['comment']['published'], post=post)
newComment.save()
return Response({
"query": "addComment",
"success": True,
"message": "Comment Added"},
status=status.HTTP_200_OK)
elif post.visibility == 'PRIVATE':
if requestor == post.author:
newComment = Comment(author=requestor, comment=request.data['comment']['comment'], contentType=request.data['comment']
['contentType'], published=request.data['comment']['published'], post=post)
newComment.save()
return Response({
"query": "addComment",
"success": True,
"message": "Comment Added"},
status=status.HTTP_200_OK)
elif post.visibility == 'SERVERONLY':
if requestor.host == post.host:
newComment = Comment(author=requestor, comment=request.data['comment']['comment'], contentType=request.data['comment']
['contentType'], published=request.data['comment']['published'], post=post)
newComment.save()
return Response({
"query": "addComment",
"success": True,
"message": "Comment Added"},
status=status.HTTP_200_OK)
return Response({"query": "addComment",
"success": False,
"message": "Comment not allowed"},
status=status.HTTP_403_FORBIDDEN)
class CreateFriendRequestAPIView(CreateAPIView):
authentication_classes = [SessionAuthentication, BasicAuthentication]
serializer_class = FriendRequestSerializer
def create(self, request):
data = request.data
print(data)
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(
{**serializer.data},
status=status.HTTP_201_CREATED,
headers=headers
)
class GetAllAuthorFriendRequest(APIView):
serializer_class = FriendRequestSerializer
def get(self, request, pk):
friend_requests = FriendRequest.objects.filter(to_author=pk)
serializer = FriendRequestSerializer(friend_requests, many=True)
return Response(
serializer.data, status=status.HTTP_200_OK
)
class GetAllFOAFAPIView(APIView):
serializer_class = AuthorSerializer
def get(self, request, pk):
friends = Friend.objects.filter(author=pk)
foaf = []
# for each friend
for friend in friends:
# append friend's uuid to foaf
if friend.friend not in foaf:
foaf.append(friend.friend)
# innerFriends is friend's friends
innerFriend = Friend.objects.filter(author=friend.friend)
for f2 in innerFriend:
if f2.friend not in foaf:
foaf.append(f2.friend)
authors = []
for author in foaf:
if author.uuid != pk:
authors.append(Author.objects.get(uuid=author.uuid))
serializer = AuthorSerializer(authors, many=True)
return Response(
serializer.data, status=status.HTTP_200_OK
)
class CreateFriendAPIView(CreateAPIView):
serializer_class = FriendSerializer
# pk = uuid of friend request
def create(self, request, pk):
friendRequest = FriendRequest.objects.get(uuid=pk)
data = {}
data['friend'] = friendRequest.to_author
data['author'] = friendRequest.from_author
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(
{**serializer.data},
status=status.HTTP_201_CREATED,
headers=headers
)
class CheckFriendAPIView(APIView):
def get(self, request, pk1, pk2):
# checking for friendship
author1 = Author.objects.get(uuid=pk1)
author1dict = serializeAuthor(author1)
author2 = Author.objects.get(uuid=pk2)
author2dict = serializeAuthor(author2)
friends = Friend.objects.filter(author=author1, friend=author2).union(
Friend.objects.filter(author=author2, friend=author1))
return Response(
{"query": "friends",
"authors": [author1dict['id'], author2dict['id']],
"friends": len(list(friends.all())) != 0
}
)
class DeleteFriendAPIView(APIView):
def delete(self, request, pk, format=None):
currentUser = request.user
# determine if user is Friend object's "author" or "friend"
try:
friend = Friend.objects.get(author=pk, friend=currentUser)
except Exception:
try:
friend = Friend.objects.get(author=currentUser, friend=pk)
except Exception:
print("Friendship doesn't exist!")
return Response(
status=status.HTTP_404_NOT_FOUND
)
friend.delete()
return Response(
status=status.HTTP_204_NO_CONTENT
)
|
# narrowtemplates.py - added template keywords for narrow clones
#
# Copyright 2017 Google, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from mercurial import (
registrar,
revlog,
)
keywords = {}
templatekeyword = registrar.templatekeyword(keywords)
revsetpredicate = registrar.revsetpredicate()
def _isellipsis(repo, rev):
if repo.changelog.flags(rev) & revlog.REVIDX_ELLIPSIS:
return True
return False
@templatekeyword(b'ellipsis', requires={b'repo', b'ctx'})
def ellipsis(context, mapping):
"""String. 'ellipsis' if the change is an ellipsis node, else ''."""
repo = context.resource(mapping, b'repo')
ctx = context.resource(mapping, b'ctx')
if _isellipsis(repo, ctx.rev()):
return b'ellipsis'
return b''
@templatekeyword(b'outsidenarrow', requires={b'repo', b'ctx'})
def outsidenarrow(context, mapping):
"""String. 'outsidenarrow' if the change affects no tracked files,
else ''."""
repo = context.resource(mapping, b'repo')
ctx = context.resource(mapping, b'ctx')
m = repo.narrowmatch()
if ctx.files() and not m.always():
if not any(m(f) for f in ctx.files()):
return b'outsidenarrow'
return b''
@revsetpredicate(b'ellipsis()')
def ellipsisrevset(repo, subset, x):
"""Changesets that are ellipsis nodes."""
return subset.filter(lambda r: _isellipsis(repo, r))
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.core.exceptions import ValidationError
from django.core.validators import EmailValidator as DjangoEmailValidator
from django.utils.encoding import force_text
from dns import resolver
from dns.exception import DNSException
class EmailValidator(DjangoEmailValidator):
"""Extended EmailValidator which checks for MX records on the domain."""
def __call__(self, value, *args, **kwargs):
super(EmailValidator, self).__call__(value, *args, **kwargs)
value = force_text(value)
domain = value.split('@')[1]
try:
resolver.query(domain, 'MX')
except DNSException:
raise ValidationError('Enter a valid email address domain.',
code=self.code)
validate_email = EmailValidator()
|
# MIT License
#
# Copyright (c) 2018 Airthings AS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# https://airthings.com
# ===============================
# Module import dependencies
# ===============================
import sys
import time
import tableprint
from bluepy import btle
from Sensors import Sensors
from WavePlus import WavePlus
# ===============================
# Script guards for correct usage
# ===============================
def print_usage():
print ("USAGE: read_waveplus.py SN SAMPLE-PERIOD [pipe > yourfile.txt]")
print (" where SN is the 10-digit serial number found under the magnetic backplate of your Wave Plus.")
print (" where SAMPLE-PERIOD is the time in seconds between reading the current values.")
print (" where [pipe > yourfile.txt] is optional and specifies that you want to pipe your results to yourfile.txt.")
if len(sys.argv) < 3:
print ("ERROR: Missing input argument SN or SAMPLE-PERIOD.")
print_usage()
sys.exit(1)
if sys.argv[1].isdigit() is not True or len(sys.argv[1]) != 10:
print ("ERROR: Invalid SN format.")
print_usage()
sys.exit(1)
if sys.argv[2].isdigit() is not True or int(sys.argv[2]) < 0:
print ("ERROR: Invalid SAMPLE-PERIOD. Must be a numerical value larger than zero.")
print_usage()
sys.exit(1)
if len(sys.argv) > 3:
Mode = sys.argv[3].lower()
if Mode == 'mqtt':
Broker = sys.argv[4]
else:
Broker = None
else:
Mode = 'terminal' # (default) print to terminal
if Mode!='pipe' and Mode!='terminal':
print ("ERROR: Invalid piping method.")
print_usage()
sys.exit(1)
SerialNumber = int(sys.argv[1])
SamplePeriod = int(sys.argv[2])
try:
#---- Initialize ----#
waveplus = WavePlus(SerialNumber)
if (Mode=='terminal'):
print ("\nPress CTRL+C to exit program\n")
header = ['TimeStamp', 'Humidity', 'Radon ST avg', 'Radon LT avg', 'Temperature', 'Pressure', 'CO2 level', 'VOC level', 'Battery']
print ("Device serial number: %s" %(SerialNumber))
if (Mode=='terminal'):
print (tableprint.header(header, width=12))
elif (Mode=='pipe'):
print (header)
while True:
sensors = None
try:
waveplus.connect()
# read values
sensors = waveplus.read()
# extract
timestamp = time.strftime('%m/%d..%H:%M')
humidity = str(sensors.getValue(sensors.SENSOR_IDX_HUMIDITY)) + " " + str(sensors.getUnit(sensors.SENSOR_IDX_HUMIDITY))
radon_st_avg = str(sensors.getValue(sensors.SENSOR_IDX_RADON_SHORT_TERM_AVG)) + " " + str(sensors.getUnit(sensors.SENSOR_IDX_RADON_SHORT_TERM_AVG))
radon_lt_avg = str(sensors.getValue(sensors.SENSOR_IDX_RADON_LONG_TERM_AVG)) + " " + str(sensors.getUnit(sensors.SENSOR_IDX_RADON_LONG_TERM_AVG))
temperature = str(sensors.getValue(sensors.SENSOR_IDX_TEMPERATURE)) + " " + str(sensors.getUnit(sensors.SENSOR_IDX_TEMPERATURE))
pressure = str(sensors.getValue(sensors.SENSOR_IDX_REL_ATM_PRESSURE)) + " " + str(sensors.getUnit(sensors.SENSOR_IDX_REL_ATM_PRESSURE))
CO2_lvl = str(sensors.getValue(sensors.SENSOR_IDX_CO2_LVL)) + " " + str(sensors.getUnit(sensors.SENSOR_IDX_CO2_LVL))
VOC_lvl = str(sensors.getValue(sensors.SENSOR_IDX_VOC_LVL)) + " " + str(sensors.getUnit(sensors.SENSOR_IDX_VOC_LVL))
battery = str(sensors.getValue(sensors.SENSOR_IDX_BATTERY)) + " " + str(sensors.getUnit(sensors.SENSOR_IDX_BATTERY))
# Print data
data = [timestamp, humidity, radon_st_avg, radon_lt_avg, temperature, pressure, CO2_lvl, VOC_lvl, battery]
if (Mode=='terminal'):
print (tableprint.row(data, width=12))
elif (Mode=='pipe'):
print (data)
waveplus.disconnect()
time.sleep(SamplePeriod)
except btle.BTLEException as ex:
timestamp = time.strftime('%m/%d..%H:%M')
errormsg = " ERROR: Failed to connect to the AirThings Wave Plus sensor, will try again on the next cycle..."
if (Mode=='terminal'):
#print (tableprint.bottom(9, width=12))
print (u'\u2502' + " " + timestamp + " " + u'\u2502' + errormsg + " " + u'\u2502')
#print (tableprint.header(header, width=12))
elif (Mode=='pipe'):
print (timestamp + errormsg)
time.sleep(SamplePeriod)
continue
finally:
waveplus.disconnect()
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 18 23:23:23 2020
@author: afran
"""
import numpy as np
import scipy.io as sio
import os
import sys
from sklearn.svm import LinearSVC
# SVM Classification
def SVM_leave_one_classify(directory, SVM_random_state=None, SVM_dual=True, SVM_penalty='l2', SVM_max_iter=1000, SVM_C=1.0):
#Creating ground truth array
truth = []
prediction = []
for file in os.listdir(directory):
if "normal" in file:
truth.append(0)
elif "increased" in file:
truth.append(1)
else:
print("[Error] Invalid Filename")
sys.exit()
#Leave one subject out validation using SVM
for file in os.listdir(directory):
print("predicting file " + file)
X_train = []
y_train = []
#Every file except for left out trial
for train_file in os.listdir(directory):
if train_file != file:
X_data = np.squeeze(np.transpose(sio.loadmat(directory + train_file)['PersImg']))
X_train.append(X_data)
if "normal" in train_file:
y_train.append(0)
elif "increased" in train_file:
y_train.append(1)
else:
print("[Error] Invalid Filename")
sys.exit()
#Using parameters entered earlier
clf = LinearSVC(random_state=SVM_random_state, dual=SVM_dual, penalty=SVM_penalty, max_iter=SVM_max_iter, C=SVM_C)
clf.fit(X_train, y_train)
X_pred_data = np.squeeze(np.transpose(sio.loadmat(directory + file)['PersImg']))
X_pred_data = [X_pred_data]
prediction.append(clf.predict(X_pred_data))
#Checking against ground truth array
print("Checking against ground truth array....")
accuracy_array = []
for i in range(len(truth)):
accuracy_array.append(1 if truth[i] == prediction[i] else 0)
accuracy = sum(accuracy_array)/len(accuracy_array)
return accuracy
|
from unittest import TestCase
from peco.parser import parse, parse_string
class TestParseMember (TestCase):
def test_parse(self):
template = parse_string("$person.name")
# when undefined
self.assertEqual(template.render_string(), "")
# when did set
self.assertEqual(template.render_string(
person={"name": "tikubonn"}), "tikubonn")
|
STOP = 0x0
ADD = 0x1
MUL = 0x2
SUB = 0x3
DIV = 0x4
SDIV = 0x5
MOD = 0x6
SMOD = 0x7
ADDMOD = 0x8
MULMOD = 0x9
EXP = 0xa
SIGNEXTEND = 0xb
LT = 0x10
GT = 0x11
SLT = 0x12
SGT = 0x13
EQ = 0x14
ISZERO = 0x15
AND = 0x16
OR = 0x17
XOR = 0x18
NOT = 0x19
BYTE = 0x1a
SHL = 0x1b
SHR = 0x1c
SAR = 0x1d
SHA3 = 0x20
ADDRESS = 0x30
BALANCE = 0x31
ORIGIN = 0x32
CALLER = 0x33
CALLVALUE = 0x34
CALLDATALOAD = 0x35
CALLDATASIZE = 0x36
CALLDATACOPY = 0x37
CODESIZE = 0x38
CODECOPY = 0x39
GASPRICE = 0x3a
EXTCODESIZE = 0x3b
EXTCODECOPY = 0x3c
RETURNDATASIZE = 0x3d
RETURNDATACOPY = 0x3e
EXTCODEHASH = 0x3f
BLOCKHASH = 0x40
COINBASE = 0x41
TIMESTAMP = 0x42
NUMBER = 0x43
DIFFICULTY = 0x44
GASLIMIT = 0x45
CHAINID = 0x46
SELFBALANCE = 0x47
BASEFEE = 0x48
POP = 0x50
MLOAD = 0x51
MSTORE = 0x52
MSTORE8 = 0x53
SLOAD = 0x54
SSTORE = 0x55
JUMP = 0x56
JUMPI = 0x57
PC = 0x58
MSIZE = 0x59
GAS = 0x5a
JUMPDEST = 0x5b
PUSH1 = 0x60
PUSH2 = 0x61
PUSH3 = 0x62
PUSH4 = 0x63
PUSH5 = 0x64
PUSH6 = 0x65
PUSH7 = 0x66
PUSH8 = 0x67
PUSH9 = 0x68
PUSH10 = 0x69
PUSH11 = 0x6a
PUSH12 = 0x6b
PUSH13 = 0x6c
PUSH14 = 0x6d
PUSH15 = 0x6e
PUSH16 = 0x6f
PUSH17 = 0x70
PUSH18 = 0x71
PUSH19 = 0x72
PUSH20 = 0x73
PUSH21 = 0x74
PUSH22 = 0x75
PUSH23 = 0x76
PUSH24 = 0x77
PUSH25 = 0x78
PUSH26 = 0x79
PUSH27 = 0x7a
PUSH28 = 0x7b
PUSH29 = 0x7c
PUSH30 = 0x7d
PUSH31 = 0x7e
PUSH32 = 0x7f
DUP1 = 0x80
DUP2 = 0x81
DUP3 = 0x82
DUP4 = 0x83
DUP5 = 0x84
DUP6 = 0x85
DUP7 = 0x86
DUP8 = 0x87
DUP9 = 0x88
DUP10 = 0x89
DUP11 = 0x8a
DUP12 = 0x8b
DUP13 = 0x8c
DUP14 = 0x8d
DUP15 = 0x8e
DUP16 = 0x8f
SWAP1 = 0x90
SWAP2 = 0x91
SWAP3 = 0x92
SWAP4 = 0x93
SWAP5 = 0x94
SWAP6 = 0x95
SWAP7 = 0x96
SWAP8 = 0x97
SWAP9 = 0x98
SWAP10 = 0x99
SWAP11 = 0x9a
SWAP12 = 0x9b
SWAP13 = 0x9c
SWAP14 = 0x9d
SWAP15 = 0x9e
SWAP16 = 0x9f
LOG0 = 0xa0
LOG1 = 0xa1
LOG2 = 0xa2
LOG3 = 0xa3
LOG4 = 0xa4
PUSH = 0xb0
DUP = 0xb1
SWAP = 0xb2
CREATE = 0xf0
CALL = 0xf1
CALLCODE = 0xf2
RETURN = 0xf3
DELEGATECALL = 0xf4
STATICCALL = 0xfa
REVERT = 0xfd
INVALID = 0xfe
SELFDESTRUCT = 0xff
def is_push(op):
return 0x60 <= op <= 0x7f
def is_dup(op):
return 0x80 <= op <= 0x8f
def is_swap(op):
return 0x90 <= op <= 0x9f
STACK_CHANGES = {
STOP: 0,
ADD: -1,
MUL: -1,
SUB: -1,
DIV: -1,
SDIV: -1,
MOD: -1,
SMOD: -1,
ADDMOD: -2,
MULMOD: -2,
EXP: -1,
SIGNEXTEND: -1,
LT: -1,
GT: -1,
SLT: -1,
SGT: -1,
EQ: -1,
ISZERO: 0,
AND: -1,
OR: -1,
XOR: -1,
NOT: 0,
BYTE: -1,
SHL: -1,
SHR: -1,
SAR: -1,
SHA3: -1,
ADDRESS: 1,
BALANCE: 0,
ORIGIN: 1,
CALLER: 1,
CALLVALUE: 1,
CALLDATALOAD: 0,
CALLDATASIZE: 1,
CALLDATACOPY: -3,
CODESIZE: 1,
CODECOPY: -3,
GASPRICE: 1,
EXTCODESIZE: 0,
EXTCODECOPY: -4,
RETURNDATASIZE: 1,
RETURNDATACOPY: -3,
BLOCKHASH: 0,
COINBASE: 1,
TIMESTAMP: 1,
NUMBER: 1,
DIFFICULTY: 1,
GASLIMIT: 1,
CHAINID: 1,
SELFBALANCE: 1,
BASEFEE: 1,
POP: -1,
MLOAD: 0,
MSTORE: -2,
MSTORE8: -2,
SLOAD: 0,
SSTORE: -2,
JUMP: -1,
JUMPI: -2,
PC: 1,
MSIZE: 1,
GAS: 1,
JUMPDEST: 0,
PUSH1: 1,
PUSH2: 1,
PUSH3: 1,
PUSH4: 1,
PUSH5: 1,
PUSH6: 1,
PUSH7: 1,
PUSH8: 1,
PUSH9: 1,
PUSH10: 1,
PUSH11: 1,
PUSH12: 1,
PUSH13: 1,
PUSH14: 1,
PUSH15: 1,
PUSH16: 1,
PUSH17: 1,
PUSH18: 1,
PUSH19: 1,
PUSH20: 1,
PUSH21: 1,
PUSH22: 1,
PUSH23: 1,
PUSH24: 1,
PUSH25: 1,
PUSH26: 1,
PUSH27: 1,
PUSH28: 1,
PUSH29: 1,
PUSH30: 1,
PUSH31: 1,
PUSH32: 1,
DUP1: 1,
DUP2: 1,
DUP3: 1,
DUP4: 1,
DUP5: 1,
DUP6: 1,
DUP7: 1,
DUP8: 1,
DUP9: 1,
DUP10: 1,
DUP11: 1,
DUP12: 1,
DUP13: 1,
DUP14: 1,
DUP15: 1,
DUP16: 1,
SWAP1: 0,
SWAP2: 0,
SWAP3: 0,
SWAP4: 0,
SWAP5: 0,
SWAP6: 0,
SWAP7: 0,
SWAP8: 0,
SWAP9: 0,
SWAP10: 0,
SWAP11: 0,
SWAP12: 0,
SWAP13: 0,
SWAP14: 0,
SWAP15: 0,
SWAP16: 0,
LOG0: -2,
LOG1: -3,
LOG2: -4,
LOG3: -5,
LOG4: -6,
CREATE: -2,
CALL: -6,
CALLCODE: -6,
RETURN: -2,
DELEGATECALL: -5,
STATICCALL: -5,
REVERT: -2,
INVALID: 0,
SELFDESTRUCT: -1
}
OP_NAME = {
STOP: 'STOP',
ADD: 'ADD',
MUL: 'MUL',
SUB: 'SUB',
DIV: 'DIV',
SDIV: 'SDIV',
MOD: 'MOD',
SMOD: 'SMOD',
ADDMOD: 'ADDMOD',
MULMOD: 'MULMOD',
EXP: 'EXP',
SIGNEXTEND: 'SIGNEXTEND',
LT: 'LT',
GT: 'GT',
SLT: 'SLT',
SGT: 'SGT',
EQ: 'EQ',
ISZERO: 'ISZERO',
AND: 'AND',
OR: 'OR',
XOR: 'XOR',
NOT: 'NOT',
BYTE: 'BYTE',
SHL: 'SHL',
SHR: 'SHR',
SAR: 'SAR',
SHA3: 'SHA3',
ADDRESS: 'ADDRESS',
BALANCE: 'BALANCE',
ORIGIN: 'ORIGIN',
CALLER: 'CALLER',
CALLVALUE: 'CALLVALUE',
CALLDATALOAD: 'CALLDATALOAD',
CALLDATASIZE: 'CALLDATASIZE',
CALLDATACOPY: 'CALLDATACOPY',
CODESIZE: 'CODESIZE',
CODECOPY: 'CODECOPY',
GASPRICE: 'GASPRICE',
EXTCODESIZE: 'EXTCODESIZE',
EXTCODECOPY: 'EXTCODECOPY',
RETURNDATASIZE: 'RETURNDATASIZE',
RETURNDATACOPY: 'RETURNDATACOPY',
EXTCODEHASH: 'EXTCODEHASH',
BLOCKHASH: 'BLOCKHASH',
COINBASE: 'COINBASE',
TIMESTAMP: 'TIMESTAMP',
NUMBER: 'NUMBER',
DIFFICULTY: 'DIFFICULTY',
GASLIMIT: 'GASLIMIT',
CHAINID: 'CHAINID',
SELFBALANCE: 'SELFBALANCE',
BASEFEE: 'BASEFEE',
POP: 'POP',
MLOAD: 'MLOAD',
MSTORE: 'MSTORE',
MSTORE8: 'MSTORE8',
SLOAD: 'SLOAD',
SSTORE: 'SSTORE',
JUMP: 'JUMP',
JUMPI: 'JUMPI',
PC: 'PC',
MSIZE: 'MSIZE',
GAS: 'GAS',
JUMPDEST: 'JUMPDEST',
PUSH1: 'PUSH1',
PUSH2: 'PUSH2',
PUSH3: 'PUSH3',
PUSH4: 'PUSH4',
PUSH5: 'PUSH5',
PUSH6: 'PUSH6',
PUSH7: 'PUSH7',
PUSH8: 'PUSH8',
PUSH9: 'PUSH9',
PUSH10: 'PUSH10',
PUSH11: 'PUSH11',
PUSH12: 'PUSH12',
PUSH13: 'PUSH13',
PUSH14: 'PUSH14',
PUSH15: 'PUSH15',
PUSH16: 'PUSH16',
PUSH17: 'PUSH17',
PUSH18: 'PUSH18',
PUSH19: 'PUSH19',
PUSH20: 'PUSH20',
PUSH21: 'PUSH21',
PUSH22: 'PUSH22',
PUSH23: 'PUSH23',
PUSH24: 'PUSH24',
PUSH25: 'PUSH25',
PUSH26: 'PUSH26',
PUSH27: 'PUSH27',
PUSH28: 'PUSH28',
PUSH29: 'PUSH29',
PUSH30: 'PUSH30',
PUSH31: 'PUSH31',
PUSH32: 'PUSH32',
DUP1: 'DUP1',
DUP2: 'DUP2',
DUP3: 'DUP3',
DUP4: 'DUP4',
DUP5: 'DUP5',
DUP6: 'DUP6',
DUP7: 'DUP7',
DUP8: 'DUP8',
DUP9: 'DUP9',
DUP10: 'DUP10',
DUP11: 'DUP11',
DUP12: 'DUP12',
DUP13: 'DUP13',
DUP14: 'DUP14',
DUP15: 'DUP15',
DUP16: 'DUP16',
SWAP1: 'SWAP1',
SWAP2: 'SWAP2',
SWAP3: 'SWAP3',
SWAP4: 'SWAP4',
SWAP5: 'SWAP5',
SWAP6: 'SWAP6',
SWAP7: 'SWAP7',
SWAP8: 'SWAP8',
SWAP9: 'SWAP9',
SWAP10: 'SWP10',
SWAP11: 'SWP11',
SWAP12: 'SWP12',
SWAP13: 'SWP13',
SWAP14: 'SWP14',
SWAP15: 'SWP15',
SWAP16: 'SWP16',
LOG0: 'LOG0',
LOG1: 'LOG1',
LOG2: 'LOG2',
LOG3: 'LOG3',
LOG4: 'LOG4',
CREATE: 'CREATE',
CALL: 'CALL',
CALLCODE: 'CALLCODE',
RETURN: 'RETURN',
DELEGATECALL: 'DELEGATECALL',
STATICCALL: 'STATICCALL',
REVERT: 'REVERT',
INVALID: 'INVALID',
SELFDESTRUCT: 'SELFDESTRUCT',
}
INTERESTING_OPS = [
LT,
GT,
SLT,
SGT,
ISZERO,
AND,
OR,
XOR,
NOT,
SHA3,
ADDRESS,
BALANCE,
ORIGIN,
CALLER,
CALLVALUE,
CALLDATALOAD,
CALLDATASIZE,
CALLDATACOPY,
CODESIZE,
CODECOPY,
GASPRICE,
EXTCODESIZE,
EXTCODECOPY,
RETURNDATASIZE,
RETURNDATACOPY,
BLOCKHASH,
COINBASE,
TIMESTAMP,
NUMBER,
DIFFICULTY,
GASLIMIT,
SLOAD,
SSTORE,
PC,
MSIZE,
GAS,
LOG0,
LOG1,
LOG2,
LOG3,
LOG4,
CREATE,
CALL,
CALLCODE,
RETURN,
DELEGATECALL,
STATICCALL,
REVERT,
INVALID,
SELFDESTRUCT,
]
MEM_WRITES = {
MSTORE,
MSTORE8,
CALLDATACOPY,
CODECOPY,
RETURNDATACOPY,
EXTCODECOPY,
}
MEM_READS = {
SHA3,
LOG0,
LOG1,
LOG2,
LOG3,
LOG4,
MLOAD,
CREATE,
RETURN,
REVERT,
}
MEM_READ_WRITES = {
CALL,
CALLCODE,
DELEGATECALL,
STATICCALL,
}
def select_interesting_ops(bow):
res = []
for op in INTERESTING_OPS:
res.append(bow[op])
return res
|
""" Configure and create build file for libnuma """
def build_numa_lib(repository_ctx):
repository_ctx.download_and_extract(
url = "https://github.com/numactl/numactl/archive/3648aa5bf6e29bf618195c615ff2ced4bb995327.zip",
stripPrefix = "numactl-3648aa5bf6e29bf618195c615ff2ced4bb995327",
sha256 = "70b41bac88587ee980f266b3b2a9f32e9efef7c003a258eb50677cd8c66f358e",
)
repository_ctx.execute(["./autogen.sh"])
repository_ctx.execute(["./configure"])
repository_ctx.file(
"BUILD",
"""
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
cc_library(
name = "numa",
srcs = [
"affinity.c",
"config.h",
"distance.c",
"libnuma.c",
"rtnetlink.c",
"syscall.c",
"sysfs.c",
],
hdrs = [
"affinity.h",
"numa.h",
"numaif.h",
"numaint.h",
"rtnetlink.h",
"sysfs.h",
"util.h",
],
)
""",
)
numa_configure = repository_rule(
implementation = build_numa_lib,
)
|
from __future__ import print_function
import numpy as np
from scipy.spatial import ConvexHull
import dicom_tools.pyqtgraph as pg
def roi2myroi(ROI, verbose=False):
rois = [None]*len(ROI)
firsttime = True
for layer in xrange(0, len(ROI)):
fetta = ROI[layer]
apoints = []
if fetta.max() > 0 :
for j, riga in enumerate(fetta):
for i, elemento in enumerate(riga):
if elemento:
thispoint = [i,j]
apoints.append(thispoint)
points = np.array(apoints)
if firsttime:
firsttime = False
for point in points:
print(point)
hull = ConvexHull(points)
rois[layer] = pg.PolyLineROI(hull.simplices.tolist(), pen=(6,9), closed=True).saveState()
return rois
# print "hull.simplices", hull.simplices
|
print 50000
s = ""
for i in xrange(100000-2):
s += 'cA'
print s
|
from django.conf import settings
from django.contrib.auth.models import Permission
from django.core.urlresolvers import reverse
from suit.templatetags.suit_menu import get_menu
from suit.tests.mixins import ModelsTestCaseMixin, UserTestCaseMixin
# conditional import, force_unicode was renamed in Django 1.5
try:
from django.utils.encoding import force_unicode
except ImportError:
from django.utils.encoding import force_text as force_unicode
class SuitMenuTestCase(ModelsTestCaseMixin, UserTestCaseMixin):
def setUp(self):
self.setUpConfig()
self.login_superuser()
def setUpConfig(self):
settings.SUIT_CONFIG.update({
'MENU_OPEN_FIRST_CHILD': True,
'MENU_ICONS': {
'auth': 'icon-auth-assert',
},
'MENU_EXCLUDE': [],
'MENU': [
'tests',
{'app': 'tests'},
{'app': 'tests', 'label': 'Custom'},
{'app': 'tests', 'icon': 'icon-test-assert'},
{'app': 'tests', 'icon': ''},
{'app': 'tests', 'icon': None},
{'app': 'auth'},
'-',
{'label': 'Custom', 'url': '/custom/'},
{'label': 'Custom2', 'url': '/custom2/', 'permissions': 'x'},
{'label': 'Custom3', 'url': '/custom3/', 'permissions': ('y',)},
{'label': 'Custom4', 'url': '/custom4/', 'blank': True},
{'label': 'C4', 'url': '/c/4', 'models': ('book',)},
{'label': 'C5', 'url': '/c/5', 'models': ('tests.book',)},
{'label': 'C6', 'url': 'admin:index', 'models':
({'label': 'mx', 'url': 'admin:index'},)},
{'label': 'C7', 'url': 'tests.book'},
{'app': 'tests', 'models': []},
{'app': 'tests', 'models': ['book', 'album']},
{'app': 'tests', 'models': ['tests.book', 'tests.album']},
{'app': 'tests', 'models': [
'book', 'tests.book',
{
'model': 'tests.album',
'label': 'Albumzzz',
'url': '/albumzzz/',
}, {
'label': 'CustModel',
'url': '/cust-mod/',
'permissions': 'z'
}]},
]
})
def setUpOldConfig(self):
settings.SUIT_CONFIG.update({
'MENU_OPEN_FIRST_CHILD': False,
'MENU_ICONS': {
'tests': 'icon-fire icon-test-against-keyword',
},
'MENU_ORDER': (
('tests', ('book',)),
(('Custom app name', '/custom-url-test/', 'icon-custom-app'), (
('Custom link', '/admin/custom/', 'tests.add_book'),
('Check out error 404', '/admin/non-existant/',
('mega-perms',)),
'tests.album'
)),
(('Custom app no models', '/custom-app-no-models',
'', 'mega-rights'),),
(('Custom app no models tuple perms', '/custom-app-tuple-perms',
'', ('mega-rights',)),),
),
'MENU_EXCLUDE': []
})
del settings.SUIT_CONFIG['MENU']
def make_menu_from_response(self):
return get_menu(self.response.context[-1], self.response._request)
def test_menu_search_url_formats(self):
# Test named url as defined in setUp config
settings.SUIT_CONFIG['SEARCH_URL'] = 'admin:tests_book_changelist'
admin_root = reverse('admin:index')
self.get_response()
self.assertContains(self.response, 'action="%stests/book/"' % admin_root)
# Test absolute url
absolute_search_url = '/absolute/search/url'
settings.SUIT_CONFIG['SEARCH_URL'] = absolute_search_url
self.get_response()
self.assertContains(self.response, absolute_search_url)
def test_menu(self):
mc = settings.SUIT_CONFIG['MENU']
self.get_response()
menu = self.make_menu_from_response()
self.assertEqual(len(menu), len(mc))
# as string
i = 0
first_model_url = reverse('admin:tests_album_changelist')
self.assertEqual(menu[i]['url'], first_model_url)
self.assertEqual(len(menu[i]['models']), 2)
self.assertEqual(menu[i]['name'], mc[i])
self.assertEqual(menu[i]['label'], 'Tests')
self.assertEqual(menu[i]['icon'], None)
self.assertEqual(menu[i]['models'][0]['url'], first_model_url)
self.assertEqual(force_unicode(menu[0]['models'][0]['label']), 'Albums')
i += 1 # as dict
self.assertEqual(menu[i]['url'], first_model_url)
self.assertEqual(len(menu[i]['models']), 2)
i += 1 # with label
self.assertEqual(menu[i]['label'], mc[i]['label'])
i += 1 # with icon
self.assertEqual(menu[i]['icon'], mc[i]['icon'])
i += 1 # with icon=''
self.assertEqual(menu[i]['icon'], 'icon-')
i += 1 # with is is None
self.assertEqual(menu[i]['icon'], 'icon-')
i += 1 # icon from SUIT_ICONS
self.assertEqual(menu[i]['icon'], 'icon-auth-assert')
i += 1 # separator
self.assertEqual(menu[i]['separator'], True)
i += 1 # custom app
self.assertEqual(menu[i]['label'], mc[i]['label'])
self.assertEqual(menu[i]['url'], mc[i]['url'])
i += 1 # custom app, with perms as string
self.assertEqual(menu[i]['label'], mc[i]['label'])
i += 1 # custom app, with perms as tuple
self.assertEqual(menu[i]['label'], mc[i]['label'])
i += 1 # custom app, with perms as tuple
self.assertEqual(menu[i]['blank'], True)
i += 1 # custom app with wrong model
self.assertEqual(menu[i]['label'], mc[i]['label'])
self.assertEqual(menu[i]['models'], [])
self.assertEqual(menu[i]['url'], mc[i]['url'])
i += 1 # custom app with correct model
first_model_url = reverse('admin:tests_book_changelist')
self.assertEqual(menu[i]['label'], mc[i]['label'])
self.assertEqual(len(menu[i]['models']), 1)
self.assertEqual(menu[i]['url'], first_model_url)
i += 1 # custom app and model with named urls
expected_url = reverse('admin:index')
self.assertEqual(menu[i]['url'], expected_url)
self.assertEqual(menu[i]['models'][0]['url'], expected_url)
i += 1 # with url by model
books_url = reverse('admin:tests_book_changelist')
self.assertEqual(menu[i]['url'], books_url)
i += 1 # with empty models
self.assertEqual(menu[i]['models'], [])
self.assertEqual(menu[i]['url'],
reverse('admin:app_list', args=[mc[i]['app']]))
i += 1 # with ordered models
first_model_url = reverse('admin:tests_book_changelist')
self.assertEqual(menu[i]['models'][0]['url'], first_model_url)
self.assertEqual(len(menu[i]['models']), 2)
i += 1 # with prefixed models
first_model_url = reverse('admin:tests_book_changelist')
self.assertEqual(menu[i]['models'][0]['url'], first_model_url)
self.assertEqual(len(menu[i]['models']), 2)
i += 1 # with dict models
first_model_url = reverse('admin:tests_book_changelist')
self.assertEqual(menu[i]['models'][0]['url'], first_model_url)
self.assertEqual(len(menu[i]['models']), 4)
self.assertEqual(force_unicode(menu[i]['models'][2]['label']),
mc[i]['models'][2]['label'])
self.assertEqual(force_unicode(menu[i]['models'][2]['url']),
mc[i]['models'][2]['url'])
self.assertEqual(force_unicode(menu[i]['models'][3]['label']),
mc[i]['models'][3]['label'])
self.assertEqual(force_unicode(menu[i]['models'][3]['url']),
mc[i]['models'][3]['url'])
def test_menu_app_exclude(self):
settings.SUIT_CONFIG['MENU'] = ({'app': 'tests', 'models': ['book']},
{'app': 'auth'}, 'auth')
settings.SUIT_CONFIG['MENU_EXCLUDE'] = ('auth', 'tests.book')
self.get_response()
menu = self.make_menu_from_response()
self.assertEqual(len(menu), 1)
self.assertEqual(menu[0]['models'], [])
def test_menu_model_exclude_with_string_app(self):
settings.SUIT_CONFIG['MENU'] = ('tests',)
settings.SUIT_CONFIG['MENU_EXCLUDE'] = ('tests.book',)
self.get_response()
menu = self.make_menu_from_response()
self.assertEqual(len(menu), 1)
self.assertEqual(len(menu[0]['models']), 1)
def test_menu_custom_app(self):
label = 'custom'
icon = 'icon-custom'
settings.SUIT_CONFIG['MENU'] = ({'label': label, 'icon': icon},)
self.get_response()
menu = self.make_menu_from_response()
self.assertEqual(len(menu), 1)
self.assertEqual(menu[0]['label'], label)
self.assertEqual(menu[0]['icon'], icon)
def test_menu_custom_app_permissions(self):
settings.SUIT_CONFIG['MENU'] = ({'label': 'a',
'permissions': 'secure-perms'},
{'label': 'b',
'permissions': ('secure-perms',)},
{'label': 'c', 'models': [
{'label': 'model1',
'permissions': 'x'}]},)
self.client.logout()
self.login_user()
self.get_response()
menu = self.make_menu_from_response()
self.assertEqual(len(menu), 1)
self.assertEqual(len(menu[0]['models']), 0)
# Now do the same with super user
self.client.logout()
self.login_superuser()
self.get_response()
menu = self.make_menu_from_response()
self.assertEqual(len(menu), 3)
self.assertEqual(len(menu[2]['models']), 1)
def test_menu_app_marked_as_active(self):
self.get_response(reverse('admin:app_list', args=['tests']))
self.assertContains(self.response, '<li class="active">')
menu = self.make_menu_from_response()
self.assertTrue(menu[0]['is_active'])
def test_menu_app_marked_as_active_model_link(self):
settings.SUIT_CONFIG['MENU'] = ({'label': 'C7', 'url': 'auth.user'},)
self.get_response(reverse('admin:auth_user_add'))
self.assertContains(self.response, '<li class="active">')
menu = self.make_menu_from_response()
self.assertTrue(menu[0]['is_active'])
def test_menu_model_marked_as_active(self):
self.get_response(reverse('admin:tests_album_changelist'))
menu = self.make_menu_from_response()
self.assertTrue(menu[0]['is_active'])
self.assertTrue(menu[0]['models'][0]['is_active'])
def test_only_native_apps(self):
del settings.SUIT_CONFIG['MENU']
if 'MENU_ORDER' in settings.SUIT_CONFIG:
del settings.SUIT_CONFIG['MENU_ORDER']
icon = 'icon-auth-assert'
settings.SUIT_CONFIG['MENU_ICONS'] = {'auth': icon}
self.get_response()
menu = self.make_menu_from_response()
self.assertEqual(len(menu), 4)
self.assertEqual(menu[0]['icon'], icon)
def test_user_with_add_but_not_change(self):
settings.SUIT_CONFIG['MENU'] = ({'app': 'tests', 'models': ['book']},
{'app': 'auth'}, 'auth')
settings.SUIT_CONFIG['MENU_EXCLUDE'] = ()
self.client.logout()
self.login_user()
self.user.user_permissions.add(
Permission.objects.get(codename='add_book'))
self.user.save()
self.get_response()
menu = self.make_menu_from_response()
add_book_url = reverse('admin:tests_book_add')
self.assertEqual(menu[0]['url'], add_book_url)
self.assertEqual(menu[0]['models'][0]['url'], add_book_url)
#
# Tests for old menu config format
#
def test_old_menu_init(self):
# Template usage
self.client.logout()
self.login_superuser()
self.setUpOldConfig()
self.get_response()
self.assertTemplateUsed(self.response, 'suit/menu.html')
self.assertContains(self.response, 'left-nav')
self.assertContains(self.response, 'icon-test-against-keyword')
app_list = self.response.context_data['app_list']
pass
# print self.response.content
def test_old_menu_custom_app_and_models(self):
# Test custom app name, url and icon
self.setUpOldConfig()
self.get_response()
menu_order = settings.SUIT_CONFIG['MENU_ORDER']
self.assertContains(self.response, menu_order[1][0][0])
self.assertContains(self.response, menu_order[1][0][1])
self.assertContains(self.response, menu_order[1][0][2])
# Test custom app no models name, url and icon
self.assertContains(self.response, menu_order[2][0][0])
self.assertContains(self.response, menu_order[2][0][1])
self.assertContains(self.response, menu_order[2][0][2])
# Test custom app when perms defined but is allowed
self.assertContains(self.response, menu_order[2][0][0])
# Test cross-linked app
self.assertContains(self.response, 'tests/album')
def test_old_menu_when_open_first_child_is_true(self):
# Test custom app name, url and icon
self.setUpOldConfig()
settings.SUIT_CONFIG['MENU_OPEN_FIRST_CHILD'] = True
self.get_response()
menu_order = settings.SUIT_CONFIG['MENU_ORDER']
self.assertNotContains(self.response, menu_order[1][0][1])
def test_old_custom_menu_permissions(self):
self.client.logout()
self.login_user()
self.setUpOldConfig()
self.get_response()
# Test for menu at all for simple user
self.assertTemplateUsed(self.response, 'suit/menu.html')
self.assertContains(self.response, 'left-nav')
menu_order = settings.SUIT_CONFIG['MENU_ORDER']
# Test custom model when perms defined as string
self.assertNotContains(self.response, menu_order[1][1][0][0])
# Test custom model when perms defined as tuple
self.assertNotContains(self.response, menu_order[1][1][1][0])
# Test custom app when perms defined as string
self.assertNotContains(self.response, menu_order[2][0][0])
# Test custom app when perms defined as tuple
self.assertNotContains(self.response, menu_order[3][0][0])
def test_old_menu_marked_as_active(self):
self.setUpOldConfig()
self.get_response(reverse('admin:app_list', args=['tests']))
self.assertContains(self.response, '<li class="active">')
class SuitMenuAdminRootURLTestCase(SuitMenuTestCase):
urls = 'suit.tests.urls.admin_at_root'
class SuitMenuAdminI18NURLTestCase(SuitMenuTestCase):
urls = 'suit.tests.urls.admin_i18n'
class SuitMenuAdminCustomURLTestCase(SuitMenuTestCase):
urls = 'suit.tests.urls.admin_custom'
|
[ ## this file was manually modified by jt
{
'functor' : {
'description' : ["The function computes the remainder of dividing a0 by a1.",
"The return value is a0-n*a1, where n is the value a0/a1,",
"rounded to the nearest integer.",
"If the absolute value of a0-n*a1 is 0.5, n is chosen to be even.",
"\par",
"if one prefer: if a1 is zero returns a0, else return",
"a0-divround(a0,a1)*a1"],
'module' : 'boost',
'arity' : '2',
'call_types' : [],
'ret_arity' : '0',
'rturn' : {
'default' : 'typename boost::common_type<T>::type',
},
'simd_types' : ['real_'],
'type_defs' : [],
'types' : ['real_', 'unsigned_int_', 'signed_int_'],
},
'info' : 'manually modified',
'unit' : {
'global_header' : {
'first_stamp' : 'modified by jt the 01/12/2010',
'included' : ['#include <boost/simd/include/functions/idivround.hpp>'],
'notes' :
['The remainder() function computes the remainder of dividing x by y.',
'The return value is x-n*y, where n is the value x / y,',
'rounded to the nearest integer. If the absolute value of x-n*y is 0.5,',
'n is chosen to be even. The drem() function does precisely the same thing.'],
'stamp' : 'modified by jt the 13/12/2010',
},
'ranges' : {
'real_' : [['T(-10)', 'T(10)'], ['T(-10)', 'T(10)']],
'signed_int_' : [['-100', '100'], ['1', '100']],
'default' : [['0', '100'], ['1', '100']],
},
'specific_values' : {
'default' : {
},
'real_' : {
'boost::simd::Inf<T>()' : 'boost::simd::Nan<T>()',
'boost::simd::Minf<T>()' : 'boost::simd::Nan<T>()',
'boost::simd::Mone<T>()' : 'boost::simd::Zero<T>()',
'boost::simd::Nan<T>()' : 'boost::simd::Nan<T>()',
'boost::simd::One<T>()' : 'boost::simd::Zero<T>()',
'boost::simd::One<T>(),boost::simd::Zero<T>()' : 'boost::simd::Nan<T>()',
'boost::simd::Zero<T>(),boost::simd::Zero<T>()' : 'boost::simd::Nan<T>()',
},
'signed_int_' : {
'boost::simd::Mone<T>()' : 'boost::simd::Zero<T>()',
'boost::simd::One<T>()' : 'boost::simd::Zero<T>()',
'boost::simd::Zero<T>()' : 'boost::simd::Zero<T>()',
},
'unsigned_int_' : {
'boost::simd::One<T>()' : 'boost::simd::Zero<T>()',
'boost::simd::Zero<T>()' : 'boost::simd::Zero<T>()',
},
},
'verif_test' : {
'property_call' : {
'default' : ['boost::simd::remainder(a0,a1)'],
},
'property_value' : {
'default' : ['a0-boost::simd::idivround(a0, a1)*a1'],
},
'ulp_thresh' : {
'default' : ['0'],
},
},
},
'version' : '0.1',
},
]
|
import os
import shutil
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import Client, RequestFactory
from django.urls import reverse
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from accounts.forms import UserEditForm, ProfileEditForm, PhoneVerificationForm, TrustedDeviceForm
from accounts.views import verify_phone
from accounts.views import RegisterView, ProfileEditView, LoginView
from accounts.tokens import account_activation_token
from .utils import AccountsTestCase
User = get_user_model()
class UserTest(AccountsTestCase):
def setUp(self):
self.client = Client(enforce_csrf_checks=False)
self.rf = RequestFactory()
self.user = self.initialize_user()
def tearDown(self):
try:
shutil.rmtree((os.path.join(settings.MEDIA_ROOT, 'test')))
except FileNotFoundError:
pass
def test_login_view(self):
response = self.client.post(reverse('auth:login'),
data={'username': 'test_user', 'password': 'rrrr'}, follow=True)
self.assertEquals(response.context['user'].username, 'test_user')
self.assertRedirects(response, reverse('auth:new_device'))
self.client.logout()
response = self.client.post(reverse('auth:login'),
data={'username': 'test_user', 'password': 'rrrr'}, follow=True)
self.assertRedirects(response, reverse('auth:profile'))
self.assertTemplateUsed('accounts/profile.html')
def test_login_invalid(self):
response = self.client.post(reverse('auth:login'),
data={'username': 'test_user', 'password': 'rrr'}, follow=True)
self.assertTrue(response.context['user'].is_anonymous)
def test_request_email_verification(self):
response = self.client.get(reverse('auth:verification_request', args=[self.user.id]), follow=True)
self.assertRedirects(response, reverse('auth:login'))
def test_user_list_view(self):
self.client.force_login(self.user)
response = self.client.get(reverse('auth:user_detail', args=[self.user.username]))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'accounts/user/detail.html')
def test_validation_update_on_change_verified_attribute(self):
user = self.user
self.client.force_login(user)
self.assertTrue(user.profile.email_verified)
user.email = 'new@test.com'
user.phone = '+18709932214'
user.save()
self.assertFalse(user.profile.email_verified)
self.assertFalse(user.profile.phone_verified)
def test_avatar_uploads(self):
u = User.objects.get(username='test_user')
avatar = self.generate_photo_file()
self.client.force_login(u)
user_form = UserEditForm(instance=u, data=u.__dict__)
profile_form = ProfileEditForm({'photo': avatar})
profile_form.instance = u.profile
res = self.client.post(reverse('auth:edit'), data={**profile_form.data, **user_form.data},
format='multipart', follow=True)
u.refresh_from_db()
upload_url = os.path.join(os.path.join(os.path.abspath(settings.MEDIA_ROOT),
u.profile.photo.field.upload_to)) + 'test.png'
self.assertEquals(res.status_code, 200)
self.assertEquals(u.profile.photo.path, upload_url)
u.profile.photo.delete()
def test_cache_setup(self):
from django.core.cache import cache
cache.set('key', 'test_val')
val = cache.get('key')
self.assertEquals(val, 'test_val')
class RegistrationTest(AccountsTestCase):
def setUp(self):
self.client = Client(enforce_csrf_checks=False)
self.rf = RequestFactory()
def test_register_post_valid(self):
request = self.rf.post(reverse('auth:register'),
data={'username': 'test', 'email': 'test@email.test',
'phone': '+5571981265131', 'password': 'secret',
'password2': 'secret'})
self.process_requests(request)
response = RegisterView.as_view()(request)
self.assertEquals(response.status_code, 201)
def test_register_post_invalid(self):
request = self.rf.post(reverse('auth:register'),
data={'username': 'test', 'email': 'test@email.test',
'phone': '+5571981265131', 'password': 'secret',
'password2': 'secretttt'})
self.process_requests(request)
response = RegisterView.as_view()(request)
self.assertEquals(response.status_code, 200)
def test_reset_password_process(self):
from django.core import mail
import re
user = self.initialize_user()
response = self.client.post(reverse('auth:password_reset'), data={'email': user.email}, follow=True)
self.assertContains(response, 'We\'ve emailed you instructions for setting your password')
self.assertRedirects(response, reverse('auth:password_reset_done'))
url = re.findall('https?://[/-/_A-Za-z0-9/{4}].+', mail.outbox[0].body)
reset_response = self.client.get(url[1][:-2], follow=True)
self.assertContains(reset_response, 'New password')
reset_done_response = self.client.post(reset_response.context['request'].path,
data={'new_password1': 'secret_123444',
'new_password2': 'secret_123444'}, follow=True)
self.assertContains(reset_done_response, 'password has been set')
self.assertRedirects(response, reverse('auth:password_reset_done'))
class UserActivationTest(AccountsTestCase):
def setUp(self):
self.client = Client(enforce_csrf_checks=False)
self.rf = RequestFactory()
def test_activate_user_email_valid(self):
user = self.initialize_user(is_active=False, email_verified=False)
uid = urlsafe_base64_encode(force_bytes(user.pk))
token = account_activation_token.make_token(user)
self.client.get(reverse('auth:activate', args=[uid, token]))
user.refresh_from_db()
self.assertTrue(user.profile.email_verified)
self.assertTrue(user.is_active)
def test_activate_user_email_invalid(self):
user = self.initialize_user(is_active=False, email_verified=False)
uid = urlsafe_base64_encode(force_bytes('grabage'))
token = account_activation_token.make_token(user)
self.client.get(reverse('auth:activate', args=[uid, token]))
user.refresh_from_db()
self.assertFalse(user.profile.email_verified)
self.assertFalse(user.is_active)
class ProfileTest(AccountsTestCase):
def setUp(self):
self.client = Client(enforce_csrf_checks=False)
self.rf = RequestFactory()
# @override_settings(ROOT_URLCONF='core.public_urls')
def test_profile_get_superuser(self):
user = self.initialize_user(is_superuser=True)
self.client.force_login(user)
response = self.client.get(reverse('auth:profile'))
self.assertEquals(response.status_code, 200)
def test_profile_view(self):
user = self.initialize_user()
self.client.force_login(user)
self.response = self.client.get(reverse('auth:profile'))
self.assertTemplateUsed('accounts/profile.html')
self.assertEqual(self.response.status_code, 200)
self.assertContains(self.response, '<strong> Welcome</strong>, {}'.format(user.username),
html=True, status_code=200)
self.client.logout()
self.response = self.client.get(reverse('auth:profile'))
self.assertContains(self.response, '', html=False, status_code=302)
self.assertTemplateUsed('accounts/login.html')
def test_profile_edit_get(self):
user = self.initialize_user()
self.client.force_login(user)
response = self.client.get(reverse('auth:edit'), follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('accounts/edit.html')
self.assertIsInstance(response.context['user_form'], UserEditForm)
self.assertIsInstance(response.context['profile_form'], ProfileEditForm)
def test_profile_edit_post_invalid(self):
user = self.initialize_user()
self.client.force_login(user)
user_form = UserEditForm(instance=user, data={})
profile_form = ProfileEditForm()
profile_form.instance = user.profile
response = self.client.post(reverse('auth:edit'), data={**profile_form.data, **user_form.data})
self.assertEquals(response.status_code, 200)
self.assertContains(response, 'Error updating your profile')
class DeviceTest(AccountsTestCase):
def setUp(self):
self.client = Client(enforce_csrf_checks=False)
self.rf = RequestFactory()
def test_alert_user_device_changed(self):
self.initialize_user()
res = self.client.post(reverse('auth:login'),
data={'username': 'test_user', 'password': 'rrrr'}, follow=True)
device_form = res.context['form']
self.assertTrue(device_form.is_valid())
response = self.client.post(reverse('auth:new_device'), data=device_form.data, follow=True)
self.assertContains(response, 'Device has been added to your safe list')
def test_alert_user_device_changed_invalid(self):
user = self.initialize_user()
self.client.force_login(user)
device_form = TrustedDeviceForm()
response = self.client.post(reverse('auth:new_device'),
device_form.data, follow=True)
self.assertRedirects(response, reverse('auth:profile'))
|
import numpy
import ardustat_library_simple as ard
import time
import atexit
ardustat_id = 16
ardustat_socket = 7777
debug = False
#Below here no touchy
#connect to to ardustat and setup resistance table
a = ard.ardustat()
a.connect(ardustat_socket)
a.debug = debug
a.load_resistance_table(ardustat_id)
a.ocv()
time.sleep(.1)
a.groundvalue = 2.5
a.moveground()
time.sleep(.2)
a.ocv()
#create arrays + a function for logging data
times = []
potential = []
current = []
time_start = time.time()
cycle = 0
def appender(reading):
print reading['work_v_ref'],reading['current']
tdiff = str(time.time()-time_start)
out = tdiff+","+str(reading['work_v_ref'])+","+str(reading['current'])+","+str(cycle)+"\n"
open(file_name,"a").write(out)
#Allows cell to settle and picks starting potential based on OCV
while True:
time.sleep(1)
read = a.parsedread()
#appender(read)
print read
|
from artemis.general.mymath import softmax
from artemis.ml.predictors.i_predictor import IPredictor
import numpy as np
class LogisticRegressor(IPredictor):
def __init__(self, w, learning_rate=0.1):
self.w = w
self.learning_rate = learning_rate
def predict(self, x):
"""
:param x: An (n_samples, n_inputs) input
:return: An (n_samples, n_classes) class probability
"""
return softmax(x.dot(self.w), axis=1)
def train(self, x, y):
"""
:param x: An (n_samples, n_inputs) input
:param y: An integer class label
:return:
"""
probs = self.predict(x)
d_l_d_u = probs - onehot(y, n_classes=self.w.shape[1])
self.w -= self.learning_rate * x.T.dot(d_l_d_u)
@staticmethod
def from_init(n_in, n_out, **kwargs):
return LogisticRegressor(np.zeros((n_in, n_out)), **kwargs)
def onehot(labels, n_classes):
"""
Turn a vector of labels into a onehot-encoding.
:param labels:
:param n_classes:
:return:
"""
values = np.zeros((len(labels), n_classes))
values[np.arange(len(values)), labels] = 1
return values
|
from scout.server.blueprints.variants.controllers import variant_verification, variants_export_header, variant_export_lines
def url_for(param, institute_id, case_name, variant_id):
pass
def test_sanger_mail_sent(mock_mail, real_variant_database, institute_obj, case_obj, user_obj,
mock_sender, mock_comment):
adapter = real_variant_database
## GIVEN we have a variant the we want to order sanger for
variant_obj = adapter.variant_collection.find_one()
variant_obj['hgnc_symbols'] = ''
variant_obj['panels'] = ''
## WHEN calling variant_verification method with order==True
variant_verification(adapter, mock_mail, institute_obj, case_obj, user_obj, variant_obj, mock_sender, 'complete_variant_url', True, mock_comment, url_builder=url_for)
## THEN the supplied mail objects send method should have been called
assert mock_mail._send_was_called
## THEN the supplied mail objects send method should have received a message object
assert mock_mail._message
def test_cancel_sanger_mail_sent(mock_mail, real_variant_database, institute_obj, case_obj, user_obj,
mock_sender, mock_comment):
adapter = real_variant_database
## GIVEN we have a variant the we want to order sanger for
variant_obj = adapter.variant_collection.find_one()
variant_obj['hgnc_symbols'] = ''
variant_obj['panels'] = ''
## WHEN calling variant_verification method with order==False
variant_verification(adapter, mock_mail, institute_obj, case_obj, user_obj, variant_obj, mock_sender, 'complete_variant_url', False, mock_comment, url_builder=url_for)
## THEN the supplied mail objects send method should have been called
assert mock_mail._send_was_called
## THEN the supplied mail objects send method should have received a message object
assert mock_mail._message
def test_variant_csv_export(real_variant_database, case_obj):
adapter = real_variant_database
case_id = case_obj['_id']
# Given a database with variants from a case
snv_variants = adapter.variant_collection.find({'case_id' : case_id, "category" : "snv"})
# Given 5 variants to be exported
variants_to_export = []
for variant in snv_variants.limit(5):
assert type(variant) is dict
variants_to_export.append(variant)
n_vars = len(variants_to_export)
assert n_vars == 5
# Collect export header from variants controller
export_header = variants_export_header(case_obj)
# Assert that exported document has n fields:
# n = (EXPORT_HEADER items in variants_export.py) + (3 * number of individuals analysed for the case)
assert len(export_header) == 8 + 3 * len(case_obj['individuals'])
# Given the lines of the document to be exported
export_lines = variant_export_lines(adapter, case_obj, variants_to_export)
# Assert that all five variants are going to be exported to CSV
assert len(export_lines) == 5
# Assert that all of 5 variants contain the fields specified by the document header
for export_line in export_lines:
export_cols = export_line.split(',')
assert len(export_cols) == len(export_header)
|
#!/usr/bin/env python
from neo4j.v1 import GraphDatabase
from datetime import datetime
import argparse
import time
parser = argparse.ArgumentParser()
parser.add_argument("--server", help="Server to connect", default="localhost")
parser.add_argument("--protocol", help="Protocol to use", default="bolt")
parser.add_argument("--num_recs", help="Number of records to write", type=int, default=1000)
parser.add_argument("--commit_every", help="Number of records after which to commit", type=int, default=100)
args = parser.parse_args()
driver = GraphDatabase.driver(args.protocol+"://"+args.server+":7687")
def create_person(driver, name):
with driver.session() as session:
tx = session.begin_transaction()
node_id = create_person_node(tx)
set_person_name(tx, node_id, name)
tx.commit()
def create_person_node(tx):
return tx.run("CREATE (a:Person)"
"RETURN id(a)").single().value()
def set_person_name(tx, node_id, name):
tx.run("MATCH (a:Person) WHERE id(a) = $id "
"SET a.name = $name", id=node_id, name=name)
# Write test
startWrite = time.time()
with driver.session() as session:
tx = session.begin_transaction()
uncommited = True
for id in range(1, args.num_recs+1):
name = "User" + str(id)
node_id = create_person_node(tx)
# set_person_name(tx, node_id, name)
uncommited = True
if id%args.commit_every == 0:
print("Committing [{recs}]".format(recs=id))
tx.commit()
uncommited = False
if id < args.num_recs:
tx = session.begin_transaction()
uncommited = True
if uncommited:
print('Committing [' + str(id) + ']')
tx.commit()
elapsedWrite = time.time() - startWrite
# Read test
num_read = 0
def read_all_nodes(tx):
global num_read
for record in tx.run("MATCH (n) RETURN n"):
# print record['n'].id
num_read = num_read + 1
startRead = time.time()
with driver.session() as session:
session.read_transaction(read_all_nodes)
elapsedRead = time.time() - startRead
# Results
print
print("ARGUMENTS:")
print(" server = {server}".format(server=args.server))
print(" protocol = {protocol}".format(protocol=args.protocol))
print(" num_recs = {num_recs}".format(num_recs=args.num_recs))
print(" commit_every = {commit_every}".format(commit_every=args.commit_every))
print
print("WRITE [{num_recs} rows]:\nElapsed: {elapsed}, RPS: {rps}"
.format(num_recs=args.num_recs,commit_every=args.commit_every,elapsed=elapsedWrite,
rps=round(args.num_recs/elapsedWrite,1)))
print
print("READ [{num_read} rows]:\nElapsed: {elapsed}, RPS: {rps}"
.format(num_read=num_read,elapsed=elapsedRead,
rps=round(num_read/float(elapsedRead),1)))
print
|
print("address")
|
from django.urls import path
from p2coffee.views.kindly import KindlyOutgoingView
from p2coffee.views.sensors import CreateSensorEventView
from p2coffee.views.slack import SlackCommandView, SlackInteractionsView, SlackEventsView
from strawberry.django.views import AsyncGraphQLView
from p2coffee.schema import schema
urlpatterns = [
path("event/log/", CreateSensorEventView.as_view(), name="create-log-event"),
path("kindly/", KindlyOutgoingView.as_view(), name="kindly-outgoing"),
path("slack/commands/", SlackCommandView.as_view(), name="slack-commands"),
path("slack/interactions/", SlackInteractionsView.as_view(), name="slack-interactions"),
path("slack/events/", SlackEventsView.as_view(), name="slack-events"),
path("graphql", AsyncGraphQLView.as_view(schema=schema)),
]
|
import copy
import datetime
import json
import logging
import re
import time
from collections import defaultdict
import pyjq
from crypto.keychain_service.keychain import ttypes as keychain
from fbnet.command_runner.CommandRunner import ttypes as fcr_ttypes
from libfb.py.thrift_clients.fbnet_command_runner_thrift_client import (
FBNetCommandRunnerThriftClient as Legacy_FcrClient,
)
from libfb.py.thrift_clients.keychain_thrift_client import KeychainClient
from scripts.neteng.optical.bow.ssh_interface import (
ssh_read,
ssh_write,
)
# define the class of line devices
class Line_Device(object):
def __init__(
self,
amplifiers,
wss,
wss_onoff,
proprietary_controller_info,
transponder_box,
transponder_fre,
fast_slow_version,
):
self.amplifiers = amplifiers
self.wss = wss
self.wss_onoff = wss_onoff
self.proprietary_controller_info = proprietary_controller_info
self.transponder_box = transponder_box
self.transponder_fre = transponder_fre
self.fast_slow_version = fast_slow_version
# system password for authetication
def get_secret(self, name, group):
req = keychain.GetSecretRequest(
name=name,
group=group,
)
try:
return KeychainClient().getSecret(req)
except keychain.KeychainServiceException as ex:
print("Error retrieving secret:" + ex)
return False
# activate user on device
def ssh_act_user(self, ssh):
print("Activating ssh user\n")
user = self.get_secret("TACACS_USERNAME", "NETENG_AUTOMATION").secret
pw = self.get_secret("TACACS_PASSWORD", "NETENG_AUTOMATION").secret
user_add_cmd = "XXXXXXXXXXX" + user + pw ## anonymized southbound command
ssh_write(ssh, user_add_cmd)
results = ssh_read(ssh, identify_sign="")
print("ssh_act_user results", results, "\n")
return user
def ssh_cancel_user(self, user, ssh):
print("Canceling ssh user\n")
user_cancel_cmd = "XXXXXXXXXXX" + user ## anonymized southbound command
ssh_write(ssh, user_cancel_cmd)
results = ssh_read(ssh, identify_sign="")
print("ssh_cancel_user resutls", results, "\n")
def tl1_ssh_runner(self, ssh, cmd, identify_sign):
ssh_write(ssh, cmd)
results = ssh_read(ssh, identify_sign=identify_sign)
micro_timestamp = datetime.datetime.now()
return results, micro_timestamp
def tl1_device_bulkrunner(self, device_commands, read_or_write):
try:
user = self.get_secret("TACACS_USERNAME", "NETENG_AUTOMATION").secret
pw = self.get_secret("TACACS_PASSWORD", "NETENG_AUTOMATION").secret
if read_or_write == 1:
persistent_sign = "auto"
else:
persistent_sign = "auto"
fcr_commands = {
fcr_ttypes.Device(
hostname=device,
username=user,
password=pw,
session_type=fcr_ttypes.SessionType.TL1,
session_data=fcr_ttypes.SessionData(
extra_options={
"format": "json",
"use_persistent_connection": persistent_sign,
}
),
): device_commands[device]
for device in device_commands
}
with Legacy_FcrClient() as client:
res = client.bulk_run(fcr_commands)
timestamp = time.localtime()
return res, timestamp
except Exception as ex:
print("User exception: {}".format(ex))
timestamp = time.localtime()
return 1, timestamp
# get the spectrum peaks of the channels on the testbed, vacant wavelength is 0
def read_spectrum(self, wss, ssh_list, ssh_flag):
spectrum_spectrum = []
noise_spectrum = []
central_freq = []
corrected_central_freq = []
fine_grain_spectrum = []
fine_grain_frequency = []
bins = []
roadm_id = wss["roadm_id"]
chassis_id = wss["chassis_id"]
card_id = wss["card_id"]
port_id = wss["port_id"]
max_channel_read_num = wss["channel_num"]
grid_space = wss["grid"]
startchannel_freq = wss["start_freq"]
if ssh_flag == 1: # use ssh one-time authentication
print("\033[1;31m\n** Use SSH direct connection for read_spectrum\033[0;0m")
command = "XXXXXXXXXXX" ## anonymized southbound command
results, micro_timestamp = self.tl1_ssh_runner(
ssh_list[roadm_id], command, identify_sign="XXXXXXXXXXX"
)
print("tl1_ssh_runner", results, "\n")
channel_id = []
frequency = []
power = []
for s in results:
if s.startswith('"spectrum'):
channel_id.append(str(s[1:-1].split(",")[0]))
frequency.append(int(s[1:-1].split(",")[0].split("-")[-1]))
power.append(float(s[1:-1].split(",")[2]))
else: # use FCR
print("\033[1;31m\n** Use FCR client connection for read_spectrum\033[0;0m")
command = "XXXXXXXXXXX" ## anonymized southbound command
commands = defaultdict(list)
commands[roadm_id].append(command)
rc = 1
count = 0
while rc == 1 and count < 10: # rc = 1 means exception
rc, timestamp = self.tl1_device_bulkrunner(commands, read_or_write=1)
count += 1
if rc == 1:
print("the FCR fails to fetch data\n")
channel_id = []
frequency = []
power = []
else:
j_data = json.loads(rc[roadm_id][0].output)
vars = {}
jqscript = ".[][].fields | {keys: .[0][0], value: . [1][1]}"
results = pyjq.all(jqscript, j_data, vars=vars)
channel_id = list(map(lambda x: str(x["keys"]), results))
frequency = list(map(lambda x: int(x["keys"].split("-")[-1]), results))
power = list(map(lambda x: float(x["value"]), results))
spectrum_reading = {channel_id[i]: power[i] for i in range(len(frequency))}
# print(spectrum_reading)
loc_match = "spectrum-" + chassis_id + "-" + card_id + "-" + port_id
# print(loc_match)
step = 0
channel_num = 0
current_fre_bin = []
current_power_bin = []
for item in range(len(spectrum_reading)):
if channel_id[item].startswith(loc_match):
fine_grain_spectrum.append(power[item])
fine_grain_frequency.append(channel_id[item])
if frequency[item] >= start_freq + grid_space * channel_num:
# print(frequency[item])
current_fre_bin.append(frequency[item])
current_power_bin.append(power[item])
step = step + 1
if step == grid_space:
peak_power = max(current_power_bin)
loc = current_power_bin.index(peak_power)
noise = float(
(
current_power_bin[0]
+ current_power_bin[1]
+ current_power_bin[-1]
+ current_power_bin[-2]
)
/ 4
)
if peak_power < -15:
pass
else:
spectrum_spectrum.append(peak_power)
central_freq.append(current_fre_bin[loc])
noise_spectrum.append(noise)
current_fre_bin_copy = copy.deepcopy(current_fre_bin)
bins.append(current_fre_bin_copy)
current_fre_bin.clear()
current_power_bin.clear()
channel_num = channel_num + 1
step = 0
if channel_num == max_channel_read_num:
break
## if the detected central frequency is closest to one of the transponder frequency
for c in central_freq:
for x in self.transponder_fre:
if (
abs(c - self.transponder_fre[x]) < 75000
): # frequency slot size is 75 GHz
corrected_central_freq.append(self.transponder_fre[x])
break
return (
spectrum_spectrum,
noise_spectrum,
corrected_central_freq,
fine_grain_spectrum,
fine_grain_frequency,
bins,
)
def write_amp(self, parameters, ssh_list, ssh_flag):
write_amp_command = "XXXXXXXXXXX" ## anonymized southbound command
if ssh_flag:
print("\033[1;31m\n** Use SSH direct connection for write_amp\033[0;0m")
for amp in ssh_list:
print("write to", amp, "via ssh", ssh_list[amp])
gain_command = (
write_amp_command.format(amplifier_name=self.amplifiers[amp])
+ str(parameters[amp])
+ ";"
)
rc, micro_timestamp = self.tl1_ssh_runner(
ssh_list[amp], gain_command, identify_sign=""
)
print("tl1_ssh_runner", rc, "\n")
else:
print("\033[1;31m\n** Use FCR for write_amp\033[0;0m")
commands = defaultdict(list)
for amp in parameters:
gain_command = (
write_amp_command.format(amplifier_name=self.amplifiers[amp])
+ str(parameters[amp])
+ ";"
)
commands[amp].append(gain_command)
rc = 1
count = 0
while rc == 1 and count < 10: # rc = 1 means exception
rc, timestamp = self.tl1_device_bulkrunner(commands, read_or_write=0)
count += 1
def read_amp(self, amplifiers):
query_amplifier_cmd = "XXXXXXXXXXX" ## anonymized southbound command
commands = defaultdict(list)
for amp in amplifiers:
commands[amp].append(query_amplifier_cmd.format(amp_name=amplifiers[amp]))
print(commands)
results = 1
count = 0
while results == 1 and count < 10: # rc = 1 means exception
results, timestamp = self.tl1_device_bulkrunner(commands, read_or_write=1)
count += 1
gain_results = {}
if results == 1:
print("the FCR fails to fetch data")
else:
for device, command_results in results.items():
for command_result in command_results:
if command_result.status != "success":
logging.error(f"{command_result.command} failed on {device}")
continue
gain = self.process_AMP(command_result)
gain_results[device] = gain
return gain_results
def process_AMP(self, rc):
j_data = json.loads(rc.output)
vars = {}
jqscript = ".[][].fields | {keys: .[0], value: .[2]}"
results = pyjq.all(jqscript, j_data, vars=vars)
gain = 0
for i in range(len(results)):
if results[i]["value"]["AMPMODE"] == "GAINCLAMP":
gain = results[i]["value"]["GAIN"]
return gain
# query transponder
def bulkrun_cli_command(device_commands, username, password):
fcr_commands = {
fcr_ttypes.Device(
hostname=device,
username=username,
password=password,
session_type=fcr_ttypes.SessionType.TL1,
session_data=fcr_ttypes.SessionData(
extra_options={
"format": "json",
"use_persistent_connection": "auto",
}
),
): device_commands[device]
for device in device_commands
}
with Legacy_FcrClient() as client:
res = client.bulk_run(fcr_commands)
return res
# bulk query
def bulk_query_cli(device_commands):
try:
username = self.get_secret("TACACS_USERNAME", "NETENG_AUTOMATION").secret
ppasswordw = self.get_secret("TACACS_PASSWORD", "NETENG_AUTOMATION").secret
response = bulkrun_cli_command(
device_commands=device_commands,
username=username,
password=password,
)
timestamp = time.localtime()
return response, timestamp
except Exception as ex:
print("exception: {}".format(ex))
# clean the transponder collection bin
def clean_transponder(transponder_box):
ts_command_set = [
"XXXXXXXXXXX" ## anonymized southbound command
]
commands = defaultdict(list)
## construct commands for bulk run
for ts_box in transponder_box:
for transponder in transponder_box[ts_box]:
clear_command = ts_command_set[0] + transponder
commands[ts_box].append(clear_command)
print("commands", commands)
rc, timestamp = bulk_query_cli(commands)
# query transponders adjacency
def get_transponder_adj(transponder_box, metric_bit):
ts_command_set = [
"XXXXXXXXXXX" ## anonymized southbound command
]
commands = defaultdict(list)
## construct commands for bulk run
for ts_box in transponder_box:
for transponder in transponder_box[ts_box]:
Modulation = "N/A"
Frequency = 0
query_command = ts_command_set[0] + transponder ## query performance metric
commands[ts_box].append(query_command)
print("get_transponder_adj commands", commands)
rc, timestamp = bulk_query_cli(commands)
transponder_mod = {}
transponder_fre = {}
for tx_box in transponder_box:
for tx in range(len(commands[tx_box])):
if metric_bit[0]:
regex_mod = r"Modulation Scheme" ## anonymized regex
matches_mod = re.finditer(
regex_mod, rc[tx_box][tx].output, re.MULTILINE
)
for _, match in enumerate(matches_mod, start=1):
for groupNum in range(0, len(match.groups())):
groupNum = groupNum + 1
Modulation = str(match.group(groupNum))
if metric_bit[1]:
regex_fre = r"Frequency \(GHz\)" ## anonymized regex
matches_fre = re.finditer(
regex_fre, rc[tx_box][tx].output, re.MULTILINE
)
for _, match in enumerate(matches_fre, start=1):
for groupNum in range(0, len(match.groups())):
groupNum = groupNum + 1
if match.group(groupNum) == "N/A":
Frequency = 999
else:
Frequency = int(float(match.group(groupNum)) * 1000)
transponder_mod[tx_box + "-" + transponder_box[tx_box][tx]] = Modulation
transponder_fre[tx_box + "-" + transponder_box[tx_box][tx]] = Frequency
return transponder_mod, transponder_fre
## query transponders performance metric
def get_transponder_pm(line, ssh_list, metric_bit, ssh_flag):
clean_transponder(line.transponder_box)
time.sleep(5)
ts_command_set = [
"XXXXXXXXXXX" ## anonymized southbound command
]
if ssh_flag == 1: # use ssh one-time authentication
print(
"\033[1;31m\n** Use SSH direct connection for get_transponder_pm\033[0;0m"
)
for ts_box in line.transponder_box:
for transponder in line.transponder_box[ts_box]:
BER = None
BERMax = None
Qfactor = None
QfactorMin = None
ESNR = None
ESNRmin = None
query_command = (
ts_command_set[0]
+ transponder
+ " bin-type untimed" # untimed bin
) ## query performance metric
rc, micro_timestamp = line.tl1_ssh_runner(
ts_box, query_command, identify_sign=""
)
print("tl1_ssh_runner", rc, "\n")
else: # use FCR
print("\033[1;31m\n** Use FCR for get_transponder_pm\033[0;0m")
commands = defaultdict(list)
## construct commands for bulk run
for ts_box in line.transponder_box:
for transponder in line.transponder_box[ts_box]:
BER = None
BERMax = None
Qfactor = None
QfactorMin = None
ESNR = None
ESNRmin = None
query_command = (
ts_command_set[0]
+ transponder
+ " bin-type untimed" # untimed bin
) ## query performance metric
commands[ts_box].append(query_command)
print("commands", commands)
rc, timestamp = bulk_query_cli(commands)
transponder_Q_set = {}
transponder_Qmin_set = {}
transponder_ber_set = {}
transponder_bermax_set = {}
transponder_esnr_set = {}
transponder_esnrmin_set = {}
for tx_box in line.transponder_box:
for tx in range(len(commands[tx_box])):
BER = None
BERMax = None
Qfactor = None
QfactorMin = None
ESNR = None
ESNRmin = None
if metric_bit[0]:
regex_q = r"Q-factor" ## anonymized regex
matches_q = re.finditer(regex_q, rc[tx_box][tx].output, re.MULTILINE)
for _, match in enumerate(matches_q, start=1):
for groupNum in range(0, len(match.groups())):
groupNum = groupNum + 1
if match.group(groupNum) == "N/A":
Qfactor = None
else:
Qfactor = float(match.group(groupNum))
if metric_bit[1]:
regex_qmin = r"Q-factor Min" ## anonymized regex
matches_qmin = re.finditer(
regex_qmin, rc[tx_box][tx].output, re.MULTILINE
)
for _, match in enumerate(matches_qmin, start=1):
for groupNum in range(0, len(match.groups())):
groupNum = groupNum + 1
if match.group(groupNum) == "N/A":
QfactorMin = None
else:
QfactorMin = float(match.group(groupNum))
if metric_bit[2]:
regex_ber = r"Pre-FEC BER" ## anonymized regex
matches_ber = re.finditer(
regex_ber, rc[tx_box][tx].output, re.MULTILINE
)
for _, match in enumerate(matches_ber, start=1):
for groupNum in range(0, len(match.groups())):
groupNum = groupNum + 1
if match.group(groupNum) == "N/A":
BER = None
else:
BER = float(match.group(groupNum))
if metric_bit[3]:
regex_bermax = r"Pre-FEC BER Max" ## anonymized regex
matches_bermax = re.finditer(
regex_bermax, rc[tx_box][tx].output, re.MULTILINE
)
for _, match in enumerate(matches_bermax, start=1):
for groupNum in range(0, len(match.groups())):
groupNum = groupNum + 1
if match.group(groupNum) == "N/A":
BERMax = None
else:
BERMax = float(match.group(groupNum))
if metric_bit[4]:
regex_ESNR = r"ESNR Avg" ## anonymized regex
matches_bermax = re.finditer(
regex_ESNR, rc[tx_box][tx].output, re.MULTILINE
)
for _, match in enumerate(matches_bermax, start=1):
for groupNum in range(0, len(match.groups())):
groupNum = groupNum + 1
if match.group(groupNum) == "N/A":
ESNR = None
else:
ESNR = float(match.group(groupNum))
if metric_bit[5]:
regex_ESNRmin = r"ESNR Min" ## anonymized regex
matches_bermax = re.finditer(
regex_ESNRmin, rc[tx_box][tx].output, re.MULTILINE
)
for _, match in enumerate(matches_bermax, start=1):
for groupNum in range(0, len(match.groups())):
groupNum = groupNum + 1
if match.group(groupNum) == "N/A":
ESNRmin = None
else:
ESNRmin = float(match.group(groupNum))
frequency = line.transponder_fre[
tx_box + "-" + line.transponder_box[tx_box][tx]
]
transponder_Q_set[frequency] = Qfactor
transponder_Qmin_set[frequency] = QfactorMin
transponder_ber_set[frequency] = BER
transponder_bermax_set[frequency] = BERMax
transponder_esnr_set[frequency] = ESNR
transponder_esnrmin_set[frequency] = ESNRmin
return (
transponder_Q_set,
transponder_Qmin_set,
transponder_ber_set,
transponder_bermax_set,
transponder_esnr_set,
transponder_esnrmin_set,
)
|
# -*- coding: utf-8 -*-
# Module: MockWindowException
# Author: asciidisco
# Created on: 24.07.2017
# License: MIT https://goo.gl/xF5sC4
"""Mock for Kodi Window instance"""
try:
import cPickle as pickle
except ImportError:
import pickle
class MockWindowException(object):
"""Mock for Kodi Window instance"""
def __init__(self):
"""ADD ME"""
self.storage = {}
def getProperty(self, name):
"""ADD ME"""
return ''
def setProperty(self, name, value):
"""ADD ME"""
self.storage[name] = value
|
import pandas as pd
from configparser import ConfigParser
def extract_training_data(data_path: str) -> pd.DataFrame:
"""
Extract only those matches that occurred during
the 2020 season or earlier for the training phase
"""
config = ConfigParser()
config.read("config.ini")
data = pd.read_csv(data_path)
return data[data[config.get("features", "season")] < 2021]
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=W0613
import logging
import os
from typing import Dict, List
from pcluster.api.controllers.common import (
check_cluster_version,
configure_aws_region,
configure_aws_region_from_config,
convert_errors,
get_validator_suppressors,
http_success_status_code,
validate_cluster,
)
from pcluster.api.converters import (
cloud_formation_status_to_cluster_status,
validation_results_to_config_validation_errors,
)
from pcluster.api.errors import (
BadRequestException,
CreateClusterBadRequestException,
DryrunOperationException,
NotFoundException,
UpdateClusterBadRequestException,
)
from pcluster.api.models import (
Change,
CloudFormationStackStatus,
ClusterConfigurationStructure,
ClusterInfoSummary,
CreateClusterBadRequestExceptionResponseContent,
CreateClusterRequestContent,
CreateClusterResponseContent,
DeleteClusterResponseContent,
DescribeClusterResponseContent,
EC2Instance,
InstanceState,
ListClustersResponseContent,
Tag,
UpdateClusterBadRequestExceptionResponseContent,
UpdateClusterRequestContent,
UpdateClusterResponseContent,
UpdateError,
ValidationLevel,
)
from pcluster.api.util import assert_node_executable
from pcluster.aws.aws_api import AWSApi
from pcluster.aws.common import StackNotFoundError
from pcluster.config.update_policy import UpdatePolicy
from pcluster.models.cluster import (
Cluster,
ClusterActionError,
ClusterUpdateError,
ConfigValidationError,
NotFoundClusterActionError,
)
from pcluster.models.cluster_resources import ClusterStack
from pcluster.utils import get_installed_version, to_utc_datetime
from pcluster.validators.common import FailureLevel
LOGGER = logging.getLogger(__name__)
@convert_errors()
@http_success_status_code(202)
def create_cluster(
create_cluster_request_content: Dict,
region: str = None,
suppress_validators: List[str] = None,
validation_failure_level: str = None,
dryrun: bool = None,
rollback_on_failure: bool = None,
) -> CreateClusterResponseContent:
"""
Create a managed cluster in a given region.
:param create_cluster_request_content:
:type create_cluster_request_content: dict | bytes
:param region: AWS Region that the operation corresponds to.
:type region: str
:param suppress_validators: Identifies one or more config validators to suppress.
Format: (ALL|type:[A-Za-z0-9]+)
:param validation_failure_level: Min validation level that will cause the cluster creation to fail.
(Defaults to 'ERROR'.)
:param dryrun: Only perform request validation without creating any resource. May be used to validate the cluster
configuration. (Defaults to 'false'.)
:type dryrun: bool
:param rollback_on_failure: When set it automatically initiates a cluster stack rollback on failures.
(Defaults to 'true'.)
:type rollback_on_failure: bool
"""
assert_node_executable()
# Set defaults
configure_aws_region_from_config(region, create_cluster_request_content["clusterConfiguration"])
rollback_on_failure = rollback_on_failure in {True, None}
validation_failure_level = validation_failure_level or ValidationLevel.ERROR
dryrun = dryrun is True
create_cluster_request_content = CreateClusterRequestContent.from_dict(create_cluster_request_content)
cluster_config = create_cluster_request_content.cluster_configuration
if not cluster_config:
LOGGER.error("Failed: configuration is required and cannot be empty")
raise BadRequestException("configuration is required and cannot be empty")
try:
cluster = Cluster(create_cluster_request_content.cluster_name, cluster_config)
if dryrun:
ignored_validation_failures = cluster.validate_create_request(
get_validator_suppressors(suppress_validators), FailureLevel[validation_failure_level]
)
validation_messages = validation_results_to_config_validation_errors(ignored_validation_failures)
raise DryrunOperationException(validation_messages=validation_messages or None)
stack_id, ignored_validation_failures = cluster.create(
disable_rollback=not rollback_on_failure,
validator_suppressors=get_validator_suppressors(suppress_validators),
validation_failure_level=FailureLevel[validation_failure_level],
)
return CreateClusterResponseContent(
ClusterInfoSummary(
cluster_name=create_cluster_request_content.cluster_name,
cloudformation_stack_status=CloudFormationStackStatus.CREATE_IN_PROGRESS,
cloudformation_stack_arn=stack_id,
region=os.environ.get("AWS_DEFAULT_REGION"),
version=get_installed_version(),
cluster_status=cloud_formation_status_to_cluster_status(CloudFormationStackStatus.CREATE_IN_PROGRESS),
),
validation_messages=validation_results_to_config_validation_errors(ignored_validation_failures) or None,
)
except ConfigValidationError as e:
config_validation_messages = validation_results_to_config_validation_errors(e.validation_failures) or None
raise CreateClusterBadRequestException(
CreateClusterBadRequestExceptionResponseContent(
configuration_validation_errors=config_validation_messages, message=str(e)
)
)
@configure_aws_region()
@convert_errors()
@http_success_status_code(202)
def delete_cluster(cluster_name, region=None):
"""
Initiate the deletion of a cluster.
:param cluster_name: Name of the cluster
:type cluster_name: str
:param region: AWS Region that the operation corresponds to.
:type region: str
:rtype: DeleteClusterResponseContent
"""
try:
cluster = Cluster(cluster_name)
if not check_cluster_version(cluster):
raise BadRequestException(
f"Cluster '{cluster_name}' belongs to an incompatible ParallelCluster major version."
)
if not cluster.status == CloudFormationStackStatus.DELETE_IN_PROGRESS:
# TODO: remove keep_logs logic from delete
cluster.delete(keep_logs=False)
return DeleteClusterResponseContent(
cluster=ClusterInfoSummary(
cluster_name=cluster_name,
cloudformation_stack_status=CloudFormationStackStatus.DELETE_IN_PROGRESS,
cloudformation_stack_arn=cluster.stack.id,
region=os.environ.get("AWS_DEFAULT_REGION"),
version=cluster.stack.version,
cluster_status=cloud_formation_status_to_cluster_status(CloudFormationStackStatus.DELETE_IN_PROGRESS),
)
)
except StackNotFoundError:
raise NotFoundException(
f"Cluster '{cluster_name}' does not exist or belongs to an incompatible ParallelCluster major version. "
"In case you have running instances belonging to a deleted cluster please use the DeleteClusterInstances "
"API."
)
@configure_aws_region()
@convert_errors()
def describe_cluster(cluster_name, region=None):
"""
Get detailed information about an existing cluster.
:param cluster_name: Name of the cluster
:type cluster_name: str
:param region: AWS Region that the operation corresponds to.
:type region: str
:rtype: DescribeClusterResponseContent
"""
cluster = Cluster(cluster_name)
validate_cluster(cluster)
cfn_stack = cluster.stack
fleet_status = cluster.compute_fleet_status
config_url = "NOT_AVAILABLE"
try:
config_url = cluster.config_presigned_url
except ClusterActionError as e:
# Do not fail request when S3 bucket is not available
LOGGER.error(e)
response = DescribeClusterResponseContent(
creation_time=to_utc_datetime(cfn_stack.creation_time),
version=cfn_stack.version,
cluster_configuration=ClusterConfigurationStructure(url=config_url),
tags=[Tag(value=tag.get("Value"), key=tag.get("Key")) for tag in cfn_stack.tags],
cloud_formation_stack_status=cfn_stack.status,
cluster_name=cluster_name,
compute_fleet_status=fleet_status.value,
cloudformation_stack_arn=cfn_stack.id,
last_updated_time=to_utc_datetime(cfn_stack.last_updated_time),
region=os.environ.get("AWS_DEFAULT_REGION"),
cluster_status=cloud_formation_status_to_cluster_status(cfn_stack.status),
)
try:
head_node = cluster.head_node_instance
response.head_node = EC2Instance(
instance_id=head_node.id,
launch_time=to_utc_datetime(head_node.launch_time),
public_ip_address=head_node.public_ip,
instance_type=head_node.instance_type,
state=InstanceState.from_dict(head_node.state),
private_ip_address=head_node.private_ip,
)
except ClusterActionError as e:
# This should not be treated as a failure cause head node might not be running in some cases
LOGGER.info(e)
return response
@configure_aws_region()
@convert_errors()
def list_clusters(region=None, next_token=None, cluster_status=None):
"""
Retrieve the list of existing clusters managed by the API. Deleted clusters are not listed by default.
:param region: List clusters deployed to a given AWS Region.
:type region: str
:param next_token: Token to use for paginated requests.
:type next_token: str
:param cluster_status: Filter by cluster status. (Defaults to all clusters.)
:type cluster_status: list | bytes
:rtype: ListClustersResponseContent
"""
stacks, next_token = AWSApi.instance().cfn.list_pcluster_stacks(next_token=next_token)
stacks = [ClusterStack(stack) for stack in stacks]
clusters = []
for stack in stacks:
current_cluster_status = cloud_formation_status_to_cluster_status(stack.status)
if not cluster_status or current_cluster_status in cluster_status:
cluster_info = ClusterInfoSummary(
cluster_name=stack.cluster_name,
cloudformation_stack_status=stack.status,
cloudformation_stack_arn=stack.id,
region=os.environ.get("AWS_DEFAULT_REGION"),
version=stack.version,
cluster_status=current_cluster_status,
)
clusters.append(cluster_info)
return ListClustersResponseContent(clusters=clusters, next_token=next_token)
@convert_errors()
@http_success_status_code(202)
def update_cluster(
update_cluster_request_content: Dict,
cluster_name,
suppress_validators=None,
validation_failure_level=None,
region=None,
dryrun=None,
force_update=None,
):
"""
Update a cluster managed in a given region.
:param update_cluster_request_content:
:param cluster_name: Name of the cluster
:type cluster_name: str
:param suppress_validators: Identifies one or more config validators to suppress.
Format: (ALL|type:[A-Za-z0-9]+)
:type suppress_validators: List[str]
:param validation_failure_level: Min validation level that will cause the update to fail.
(Defaults to 'error'.)
:type validation_failure_level: dict | bytes
:param region: AWS Region that the operation corresponds to.
:type region: str
:param dryrun: Only perform request validation without creating any resource.
May be used to validate the cluster configuration and update requirements. Response code: 200
:type dryrun: bool
:param force_update: Force update by ignoring the update validation errors.
(Defaults to 'false'.)
:type force_update: bool
:rtype: UpdateClusterResponseContent
"""
assert_node_executable()
# Set defaults
configure_aws_region_from_config(region, update_cluster_request_content["clusterConfiguration"])
validation_failure_level = validation_failure_level or ValidationLevel.ERROR
dryrun = dryrun is True
force_update = force_update is True
update_cluster_request_content = UpdateClusterRequestContent.from_dict(update_cluster_request_content)
cluster_config = update_cluster_request_content.cluster_configuration
if not cluster_config:
LOGGER.error("Failed: configuration is required and cannot be empty")
raise BadRequestException("configuration is required and cannot be empty")
try:
cluster = Cluster(cluster_name)
if not check_cluster_version(cluster, exact_match=True):
raise BadRequestException(
f"the update can be performed only with the same ParallelCluster version ({cluster.stack.version}) "
"used to create the cluster."
)
if dryrun:
_, changes, ignored_validation_failures = cluster.validate_update_request(
target_source_config=cluster_config,
force=force_update,
validator_suppressors=get_validator_suppressors(suppress_validators),
validation_failure_level=FailureLevel[validation_failure_level],
)
change_set, _ = _analyze_changes(changes)
validation_messages = validation_results_to_config_validation_errors(ignored_validation_failures)
raise DryrunOperationException(change_set=change_set, validation_messages=validation_messages or None)
changes, ignored_validation_failures = cluster.update(
target_source_config=cluster_config,
validator_suppressors=get_validator_suppressors(suppress_validators),
validation_failure_level=FailureLevel[validation_failure_level],
force=force_update,
)
change_set, _ = _analyze_changes(changes)
return UpdateClusterResponseContent(
cluster=ClusterInfoSummary(
cluster_name=cluster_name,
cloudformation_stack_status=CloudFormationStackStatus.UPDATE_IN_PROGRESS,
cloudformation_stack_arn=cluster.stack.id,
region=os.environ.get("AWS_DEFAULT_REGION"),
version=cluster.stack.version,
cluster_status=cloud_formation_status_to_cluster_status(CloudFormationStackStatus.UPDATE_IN_PROGRESS),
),
validation_messages=validation_results_to_config_validation_errors(ignored_validation_failures) or None,
change_set=change_set,
)
except ConfigValidationError as e:
config_validation_messages = validation_results_to_config_validation_errors(e.validation_failures) or None
raise UpdateClusterBadRequestException(
UpdateClusterBadRequestExceptionResponseContent(
configuration_validation_errors=config_validation_messages, message=str(e)
)
)
except ClusterUpdateError as e:
raise _handle_cluster_update_error(e)
except (NotFoundClusterActionError, StackNotFoundError):
raise NotFoundException(
f"Cluster '{cluster_name}' does not exist or belongs to an incompatible ParallelCluster major version."
)
def _handle_cluster_update_error(e):
"""Create an UpdateClusterBadRequestExceptionResponseContent in case of failure during patch validation.
Note that patch validation is carried out once we have successfully validated the configuration. For this reason, we
want to avoid adding the suppressed configuration validation errors (which we attach to the response in case of a
successful update) as we do not want to confuse the customer by showing them errors they suppressed, which did not
cause the BadRequest exception.
"""
change_set, errors = _analyze_changes(e.update_changes)
return UpdateClusterBadRequestException(
UpdateClusterBadRequestExceptionResponseContent(
message=str(e), change_set=change_set, update_validation_errors=errors or None
)
)
def _cluster_update_change_succeded(check_result):
"""Describe if check_result represents successful individual change within a larger cluster update."""
return check_result == UpdatePolicy.CheckResult.SUCCEEDED
def _analyze_changes(changes):
if changes is None or len(changes) <= 1:
return [], []
change_set = []
errors = []
key_indexes = {key: index for index, key in enumerate(changes[0])}
for row in changes[1:]:
parameter = _get_yaml_path(row[key_indexes["param_path"]], row[key_indexes["parameter"]])
new_value = row[key_indexes["new value"]]
old_value = row[key_indexes["old value"]]
check_result = row[key_indexes["check"]]
message = _create_message(row[key_indexes["reason"]], row[key_indexes["action_needed"]])
if not _cluster_update_change_succeded(check_result):
errors.append(
UpdateError(parameter=parameter, requested_value=new_value, message=message, current_value=old_value)
)
change_set.append(Change(parameter=parameter, requested_value=new_value, current_value=old_value))
return change_set, errors
def _create_message(failure_reason, action_needed):
message = None
if failure_reason:
message = failure_reason
if action_needed:
message = f"{message}. {action_needed}" if message else action_needed
return message or "Error during update"
def _get_yaml_path(path, parameter):
"""Compose the parameter path following the YAML Path standard.
Standard: https://github.com/wwkimball/yamlpath/wiki/Segments-of-a-YAML-Path#yaml-path-standard
"""
yaml_path = []
if path:
yaml_path.extend(path)
if parameter:
yaml_path.append(parameter)
return ".".join(yaml_path)
|
import os
import hydra
import pytest
from nuplan.common.utils.testing.nuplan_test import NUPLAN_TEST_PLUGIN, nuplan_test
from nuplan.planning.metrics.utils.testing_utils import setup_history
CONFIG_PATH = os.path.join('..', '..', '..', '..', 'script/config/common/simulation_metric/common/')
CONFIG_NAME = 'time_to_collision_statistics'
@nuplan_test(path='json/time_to_collision/time_to_collision.json')
def test_time_to_collision(scene) -> None: # type: ignore
"""
Test predicted time to collision
:param scene: the json scene
"""
history = setup_history(scene)
# Metric
hydra.core.global_hydra.GlobalHydra.instance().clear()
hydra.initialize(config_path=CONFIG_PATH)
cfg = hydra.compose(config_name=CONFIG_NAME)
time_to_collision = hydra.utils.instantiate(cfg)['time_to_collision_statistics']
result = time_to_collision.compute(history)[0]
expected_times_to_collision = [float(t) for t in scene['expected']["times_to_collision"]]
for i, (actual_ttc, expected_ttc) in enumerate(zip(result.time_series.values, expected_times_to_collision)):
assert round(actual_ttc, 2) == expected_ttc, f"Wrong TTC for timestep {i}"
if __name__ == "__main__":
raise SystemExit(pytest.main([__file__], plugins=[NUPLAN_TEST_PLUGIN]))
|
import matplotlib.pyplot as plt
import numpy as np
plot_name = "botbowl-1"
exp_id_nopf = '7f556c38-7853-11eb-b93b-faffc23fefdb.dat'
exp_id_pf = '129055f2-7846-11eb-a07b-faffc23fefdb.dat'
window_size = 2
plot_name = "botbowl-3"
exp_id_nopf = '48ed35ea-79b7-11eb-ba58-19faed4b7487.dat'
exp_id_pf = '4cd8c832-794d-11eb-b2ed-19faed4b7487.dat'
window_size = 20
#plot_name = "botbowl-5"
#exp_id_nopf = 'f1e2354e-79d8-11eb-bb13-19faed4b7487.dat'
#exp_id_pf = '70310ce8-7a80-11eb-8061-19faed4b7487.dat'
#window_size = 100
logfile_nopf = f"logs/logs-pf/{plot_name}/nopf/{exp_id_nopf}"
logfile_pf = f"logs/logs-pf/{plot_name}/pf/{exp_id_pf}"
fig, axs = plt.subplots(1, 3, figsize=(4 * 3, 5))
axs[0].ticklabel_format(axis="x", style="sci", scilimits=(0, 0))
axs[0].set_title('Reward')
axs[1].ticklabel_format(axis="x", style="sci", scilimits=(0, 0))
axs[1].set_title('TD/Episode')
#axs[1].set_ylim(bottom=0.0)
#axs[1].set_xlim(left=0)
axs[2].ticklabel_format(axis="x", style="sci", scilimits=(0, 0))
axs[2].set_title('Win rate')
#axs[2].set_yticks(np.arange(0, 1.001, step=0.1))
#axs[2].set_xlim(left=0)
def plot(log_steps, log_mean_reward, log_td_rate, log_win_rate, label):
axs[0].plot(log_steps, log_mean_reward, label=label)
axs[1].plot(log_steps, log_td_rate, label=label)
axs[2].plot(log_steps, log_win_rate, label=label)
def load_data(logfile, window_size):
data = []
with open(logfile, 'r') as file:
for line in file:
data.append([float(v) for v in line.strip().split(", ")])
cols = []
for i in range(len(data[0])):
col = []
vals = []
for v in np.array(data)[:, i]:
vals.append(v)
if len(vals) == window_size:
col.append(np.average(vals))
vals.clear()
cols.append(col)
return np.array(cols).transpose()
data = load_data(logfile_nopf, window_size=window_size)
plot(log_steps=data[:, 2], log_mean_reward=data[:, 6], log_td_rate=data[:, 4], log_win_rate=data[:, 3], label="No Pathfinding")
data = load_data(logfile_pf, window_size=window_size)
plot(log_steps=data[:, 2], log_mean_reward=data[:, 6], log_td_rate=data[:, 4], log_win_rate=data[:, 3], label="Pathfinding")
# axs[0].legend(['Baseline', 'Pathfinding'])
# axs[1].legend(['Baseline', 'Pathfinding'])
axs[2].legend(['Baseline', 'Pathfinding'])
fig.tight_layout()
plotfile = f"plots/{plot_name}.png"
fig.savefig(plotfile)
plt.close('all')
|
from json import loads, dumps
from pastemyst import *
# you would want to set your key here
# note: saving work if you dont set the key
client = Client(key=None)
paste = None
def store_data(name, data):
return client.create_paste(
Paste(
title=name,
pasties=[
Pasty(
title=name,
code=dumps(data),
language=Language.JSON
)
],
# you would probably wanna change this to never
expires_in=ExpiresIn.ONE_HOUR
)
)
def get_data(id):
return loads(
client.get_paste(id).pasties[0].code
)
def save_data(data):
paste.pasties[0].code = dumps(data)
return client.edit_paste(paste)
if __name__ == '__main__':
xp_data = {
'leaderboard': {
'John': 10,
'Joe': 20,
'Daniel': 69
}
}
paste = store_data("xp.json", xp_data)
id = paste._id
# ...
data = get_data(id)
leaderboard = data['leaderboard']
print(leaderboard)
# this only works if you have set the api key
leaderboard['Joe'] += 1
data['leaderboard'] = leaderboard
save_data(leaderboard)
|
"""
users.py
A Flask Blueprint module for the user manager page.
"""
from flask import Blueprint, render_template, request, jsonify, g, abort
from werkzeug.exceptions import HTTPException
from meerkat_auth.user import User, InvalidCredentialException
from meerkat_auth.role import InvalidRoleException
from meerkat_auth.authorise import auth
from meerkat_auth import app
import datetime
import logging
users_blueprint = Blueprint('users', __name__, url_prefix="/<language>")
@users_blueprint.before_request
def requires_auth():
"""
Checks that the user has authenticated before returning any page from
this Blueprint.
"""
auth.check_auth(['admin'], [''])
# Only allow admin's to edit accounts in their own countries.
# i.e. if admin has non-admin access to another country.
countries = list(g.payload['acc'].keys())
for country in countries:
if 'admin' not in g.payload['acc'][country]:
del g.payload['acc'][country]
logging.warning(g.payload['acc'])
def compare_access(roles, countries):
"""
Utility function to check that the current user has access to the
specified access levels. Aborts the request if current user doesn't have
access.
Args:
roles ([`str`]): A list of access role names, to compare the current \
users access against.
countries ([`str`]): The corresponding countries for each role in roles.
"""
user_access = g.payload['acc']
# Look at each access level the account has.
for i in range(len(roles)):
acc_role = roles[i]
acc_country = countries[i]
# If the account has an access level the current user doesn't have:
# (If the current user has access in that country...)
if acc_role not in user_access.get(acc_country, [acc_role]):
abort(403, "You are not authorised to view or edit this user.")
@users_blueprint.route('/get_users')
def get_users():
"""
Get a list of users for the bootstrap table listing users. We do not allow
people to see accounts that have access they themselves do not have.
Returns:
A json response containing one property "rows" which is a list of
objects where each object represents a row of the user accounts table.
"""
# Set attributes "to get"
# And restrict accounts shown, to those from the user's countries.
acc = g.payload['acc']
countries = list(acc.keys())
attributes = [
"email", "roles", "username", "countries", "creation", "data"
]
rows = User.get_all(countries, attributes)
# Remove any data rows (accounts) that are outside the users access.
# Step backwards through the list so we can smoothly delete as we go.
for j in range(len(rows)-1, -1, -1):
access = (rows[j]['roles'], rows[j]['countries'], acc)
if not auth.check_access(*access, 'AND'):
del rows[j]
return jsonify({'rows': rows})
@users_blueprint.route('/get_user/')
@users_blueprint.route('/get_user/<username>')
def get_user(username=""):
"""
Get the specified user as a json reponse.
Args:
username (`str`): The username of the user to get.
Returns:
A json response containing all the details of the specified user
as specified by the User object "to_dict()" method. If no username
is given, this method returns an empty user. This is useful for
js design in the frontend (means we always have value to auto-fill
the user editor form with).
"""
if username == "":
return jsonify({
"countries": [],
"creation": "",
"data": {},
"email": "",
"password": "",
"roles": [],
"state": "",
"updated": "",
"username": ""
})
else:
user = User.from_db(username)
# Check that the current user has access to view the requested user.
auth.check_auth(user.roles, user.countries, 'AND')
return jsonify(user.to_dict())
@users_blueprint.route('/check_username/<username>')
def check_username(username):
"""
Checks whether or not the specified username is a valid new username.
Args:
username (`str`): the username to check
Returns:
A json response with a single property 'valid', set to true if valid
and false if not.
"""
return jsonify({'valid': not User.check_username(username)})
@users_blueprint.route('/update_user/<username>', methods=['POST'])
@users_blueprint.route('/update_user/', methods=['POST'])
def update_user(username='new'):
"""
Update/create a user. If username is set to "new" it will create and
validate as a new user. Pass the new user details as json in the post
data. Post data should contain the following properties: username, email,
password, countries (list of str), roles (list of str), state, creation
(timestamp), data (json object). Look at the db to see the structure of
data.
Args:
username (`str`): The username of the user to be updated.
Returns:
A string stating success or error.
"""
# Load the form's data and check the current user has access to edit.
data = request.get_json()
auth.check_auth(data["roles"], data["countries"], 'AND')
if username != 'new':
user = User.from_db(username)
auth.check_auth(user.roles, user.countries, 'AND')
# Form's password field default is empty, only update if something entered.
# Original password hash is stored in hidden input so we don't need to
# reload user here.
if data["password"]:
data["password"] = User.hash_password(data["password"])
else:
data["password"] = data["original_password"]
# Create a user object represented by the form input.
user = User(
data["username"],
data["email"],
data["password"],
data["countries"],
data["roles"],
state=data["state"],
updated=datetime.datetime.now().isoformat(),
creation=data["creation"],
data=data["data"]
)
logging.warning(
"Original username: " + username + " New username: " + data['username']
)
# If username changed, then create a new record for validation.
# ...because otherwise validation will say "username already exists".
if username != data["username"]:
user.state = "new"
# Factor out the multiple lines of error handling for writing to database.
def write(user):
try:
logging.warning(user.password)
user.to_db()
except (InvalidRoleException, InvalidCredentialException) as e:
return str(e)
except Exception as e:
return str(e)
raise
# Write the user to the database. Includes server-side validation.
write(user)
# Reset state once validation and writing complete
# Changing username shouldn't wipe the state.
if user.state != data["state"]:
user = User.from_db(user.username)
logging.warning(repr(user))
user.state = data["state"]
write(user)
# When username changes we create a new db record, so delete old one.
if username != data["username"]:
User.delete(username)
return "Successfully Updated"
@users_blueprint.route('/delete_users', methods=['POST'])
def delete_users():
"""
Delete the users specified in the post arguments.
The post arguments takes a list of usernames to be deleted.
Returns:
A string either stating success or the existance of an error.
"""
# Load the list of users to be deleted.
users = request.get_json()
logging.warning('Users: ' + str(users))
# Try to delete users
message = ""
error = False
for username in users:
try:
# Check current user has access to delete the specified user.
user_to_delete = User.from_db(username)
try:
auth.check_auth(['admin'], ['meerkat'])
except HTTPException:
countries_list = user_to_delete.countries
admin_role_list = ['admin'] * len(countries_list)
auth.check_auth(admin_role_list, countries_list, logic='AND')
logging.warning(
g.payload['usr'] + ' is deleting account ' + str(username)
)
# Delete the user
User.delete(username)
message += 'Succesfully deleted "{}"\n'.format(username)
except Exception as e:
error = True
message += 'Cannot delete "{}": {}\n'.format(username, e)
if error:
return ("ERROR: There was an error deleting some users.\n" + message)
else:
return "Users successfully deleted."
@users_blueprint.route('/')
def index():
"""
Renders the user editor/creator/viewer page.
"""
return render_template(
'users/index.html',
user=g.payload,
root=app.config['ROOT_URL']
)
|
default_app_config = "outpost.django.salt.apps.DefaultConfig"
|
##################Read a netcdf file#########################
###########Import necessary modules##########################
import netCDF4 as nc
############ File to be read ####################
############ IMD Gridded Rainfall data ##########
file_name = "/mnt/e/Python_Scripts/Sample_Data/RFone_imd_rf_1x1_2019.nc"
################# open file ######################
f = nc.Dataset(file_name)
print(f) # gives us information about the variables
#contained in the file and their dimensions
for dim in f.dimensions.values():
print(dim) # Metadata for all dimensions
for var in f.variables.values():
print(var) # Metadata for all variables
print(f['rf']) # Metadata of single variable
################# read variables ################
rf = f.variables['rf'][:]
lats = f.variables['lat'][:]
lons = f.variables['lon'][:]
time = f.variables['time'][:]
print(rf.min()," ,",rf.max())
exit()
|
# -*- coding: utf8 -*-
# Copyright 2019 JSALT2019 Distant Supervision Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.distributions.laplace import Laplace
from torch.distributions.normal import Normal
import torch.nn
import torch.nn.functional as F
import torchvision.models as tvmodels
from distsup.utils import safe_squeeze
# Quantizations with losses
class BaseDataQuantizer(torch.nn.Module):
def __init__(self, **kwargs):
super(BaseDataQuantizer, self).__init__(**kwargs)
self.num_levels = 1
def quantize(self, x):
return self(x)
def forward(self, x):
"""Encode the values, e.g. by quantizeing.
Args:
x: the data to quantize
Returns:
tuple of:
- tensor with encoded values
- tensor with
"""
return x, x
def dequantize(self, x):
return x
def mean_field(self, logits):
raise NotImplementedError
def sample(self, logits):
raise NotImplementedError
def loss(self, x, targets):
raise NotImplementedError
class SoftmaxUniformQuantizer(BaseDataQuantizer):
def __init__(self, num_levels, min=0.0, max=1.0, **kwargs):
assert min == 0.0, "Not implemented"
assert max == 1.0, "Not implemented"
super(SoftmaxUniformQuantizer, self).__init__(**kwargs)
self.num_levels = num_levels
def forward(self, x):
assert x.min() >= 0.0
assert x.max() <= 1.0
targets = (x * self.num_levels).clamp(0, self.num_levels - 1).long()
assert targets.min() >= 0
assert targets.max() < self.num_levels
inputs = self.dequantize(targets)
return inputs, targets
def dequantize(self, q):
return q.float() / (self.num_levels - 1)
def mean_field(self, logits):
dim = -1
probs = F.softmax(logits, dim)
ndim = [1] * probs.dim()
ndim[dim] = self.num_levels
probs *= (
torch
.arange(self.num_levels, dtype=torch.float32, device=probs.device)
.view(*ndim))
return probs.sum(dim)
def sample(self, logits):
*lead_dims, softmax_dim = logits.shape
probs = torch.softmax(logits, -1).view(-1, softmax_dim)
samples = torch.multinomial(probs, 1)
samples = samples.view(*lead_dims)
return self.dequantize(samples)
def loss(self, logits, targets):
assert logits.size()[:4] == targets.size()
logits = logits.permute(0, 4, 1, 2, 3)
loss = F.cross_entropy(logits, targets.long(), reduction='none')
return loss
class SoftmaxQuantizer(BaseDataQuantizer):
def __init__(self, levels=[0.0, 0.25, 0.5, 0.75], **kwargs):
super(SoftmaxQuantizer, self).__init__(**kwargs)
assert levels == sorted(levels), "Levels should be sorted"
self.register_buffer('levels', torch.tensor(levels))
self.num_levels = len(levels)
def forward(self, x):
_, targets = torch.min((x.unsqueeze(-1) - self.levels)**2, -1)
return self.dequantize(targets), targets
def dequantize(self, q):
return self.levels[q]
def mean_field(self, logits):
dim = -1
probs = F.softmax(logits, dim)
ndim = [1] * probs.dim()
ndim[dim] = self.num_levels
probs *= self.levels.view(*ndim)
return probs.sum(dim)
def sample(self, logits):
*lead_dims, softmax_dim = logits.shape
probs = torch.softmax(logits, -1).view(-1, softmax_dim)
samples = torch.multinomial(probs, 1)
samples = samples.view(*lead_dims)
return self.dequantize(samples)
def loss(self, logits, targets):
assert logits.size()[:4] == targets.size()
logits = logits.permute(0, 4, 1, 2, 3)
loss = F.cross_entropy(logits, targets.long(), reduction='none')
return loss
class BinaryXEntropy(BaseDataQuantizer):
def __init__(self, **kwargs):
super(BinaryXEntropy, self).__init__(**kwargs)
def forward(self, x):
assert x.min() >= 0.0
assert x.max() <= 1.0
return x, x
def dequantize(self, x):
return x
def mean_field(self, logits):
logits = safe_squeeze(logits, -1)
return torch.sigmoid(logits)
def sample(self, logits):
logits = safe_squeeze(logits, -1)
probs = torch.sigmoid(logits)
return (torch.rand_like(probs) < probs
).float()
def loss(self, logits, targets):
logits = safe_squeeze(logits, -1)
assert logits.size() == targets.size()
return F.binary_cross_entropy_with_logits(
logits, targets, reduction='none')
class L1Loss(BaseDataQuantizer):
def __init__(self, **kwargs):
super(L1Loss, self).__init__(**kwargs)
def forward(self, x):
return x, x
def dequantize(self, x):
return x
def mean_field(self, logits):
logits = safe_squeeze(logits, -1)
return logits
def sample(self, logits):
logits = safe_squeeze(logits, -1)
return Laplace(logits, 1.0).sample()
def loss(self, logits, targets):
logits = safe_squeeze(logits, -1)
assert logits.size() == targets.size(), f"{logits.size()} != {targets.size()}"
return F.l1_loss(logits, targets, reduction='none')
class L2Loss(BaseDataQuantizer):
def __init__(self, **kwargs):
super(L2Loss, self).__init__(**kwargs)
def forward(self, x):
return x, x
def dequantize(self, x):
return x
def mean_field(self, logits):
logits = safe_squeeze(logits, -1)
return logits
def sample(self, logits):
logits = safe_squeeze(logits, -1)
return Normal(logits, 1.0).sample()
def loss(self, logits, targets):
logits = safe_squeeze(logits, -1)
assert logits.size() == targets.size()
return F.mse_loss(logits, targets, reduction='none')
class NormalMeanScaleLoss(BaseDataQuantizer):
def __init__(self, **kwargs):
super(NormalMeanScaleLoss, self).__init__(**kwargs)
self.num_levels = 2
def forward(self, x):
return x, x
def dequantize(self, x):
return x
def mean_field(self, logits):
return logits[:, :, :, :, 0]
def _get_normal(self, logits):
loc, scale = logits.chunk(2, dim=-1)
loc = safe_squeeze(loc, -1)
scale = torch.exp(safe_squeeze(scale, -1))
return Normal(loc, scale)
def sample(self, logits):
return self._get_normal(logits).sample()
def loss(self, logits, targets):
assert logits.size()[:-1] == targets.size()
return -self._get_normal(logits).log_prob(targets)
class PerceptualLoss(BaseDataQuantizer):
def __init__(self, layers=6):
super(PerceptualLoss, self).__init__()
self.vgg = tvmodels.vgg16(pretrained=True).features[:layers]
self.vgg.eval()
for p in self.vgg.parameters():
p.requires_grad_(False)
def forward(self, x):
return x, x
def dequantize(self, x):
return x
def mean_field(self, logits):
logits = safe_squeeze(logits, -1)
return logits
def sample(self, logits):
return safe_squeeze(logits, -1)
def loss(self, logits, targets):
logits = safe_squeeze(logits, -1)
logits = logits.permute(0, 3, 2, 1)
B, C, H, W = logits.shape
logits = logits.expand(B, 3, H, W)
targets = targets.permute(0, 3, 2, 1)
targets = targets.expand(B, 3, H, W)
return F.l1_loss(self.vgg(logits * 2 - 1), self.vgg(targets * 2 - 1),
reduction='none')
|
"""Module containing the namespace table definitions."""
from typing import Optional
from sqlalchemy.sql.schema import Column, Index
from sqlalchemy.ext.declarative import declared_attr
from ..db import DB, MODEL
from .model_helpers import (
FULLTEXT_INDEX_PARAMS,
ChangesMixin,
IdMixin,
UniqueNameDescriptionMixin,
)
class Namespace(IdMixin, UniqueNameDescriptionMixin, ChangesMixin, MODEL):
"""Namespace Table model."""
__tablename__ = "Namespace"
@declared_attr
def __table_args__(cls):
return (
Index(
f"ix_search{cls.__tablename__}",
"name",
"description",
**FULLTEXT_INDEX_PARAMS,
),
)
def __init__(self, name: str, description: Optional[str] = None, **kwargs) -> None:
self.update(name, description, **kwargs)
def update(self, name: str, description: Optional[str] = None, **kwargs):
if kwargs:
raise ValueError("Got unknown keyword arguments!")
self.name = name
self.description = description
|
# -*- coding: utf-8 -*-
"""Application configuration."""
import os
from pathlib import Path
class LocalConfig:
db_type = os.getenv('DB_TYPE', 'mysql')
user = os.getenv('DB_USER', 'root')
passwd = os.getenv('DB_PASSWD', '123456')
host = os.getenv('DB_HOST', '127.0.0.1')
port = os.getenv('DB_PORT', 3306)
db_name = os.getenv('DB_NAME', 'flaskshop')
if db_type == 'postgresql':
db_uri = 'postgresql://{user}:{passwd}@{host}:{port}/{db_name}'.format(
user=user, passwd=passwd, host=host, port=port, db_name=db_name)
elif db_type == u'mysql':
db_uri = "mysql+pymysql://{user}:{passwd}@{host}:{port}/{db_name}?charset=utf8mb4".format(
user=user,passwd=passwd, host=host, port=port, db_name=db_name)
redis_uri = "redis://localhost:6379"
esearch_uri = "localhost"
class Config:
SECRET_KEY = os.getenv("SECRET_KEY", "thisisashop")
# Redis
# if redis is enabled, it can be used for:
# - cache
# - save product description
# - save page content
USE_REDIS = False
REDIS_URL = os.getenv("REDIS_URI", LocalConfig.redis_uri)
# Elasticsearch
# if elasticsearch is enabled, the home page will have a search bar
# and while add a product, the search index will get update
USE_ES = False
ES_HOSTS = [
os.getenv("ESEARCH_URI", LocalConfig.esearch_uri),
]
# SQLALCHEMY
SQLALCHEMY_DATABASE_URI = os.getenv("DB_URI", LocalConfig.db_uri)
SQLALCHEMY_TRACK_MODIFICATIONS = False
DATABASE_QUERY_TIMEOUT = 0.1 # log the slow database query, and unit is second
SQLALCHEMY_RECORD_QUERIES = True
# Dir
APP_DIR = Path(__file__).parent # This directory
PROJECT_ROOT = APP_DIR.parent
STATIC_DIR = APP_DIR / "static"
UPLOAD_FOLDER = "upload"
UPLOAD_DIR = STATIC_DIR / UPLOAD_FOLDER
DASHBOARD_TEMPLATE_FOLDER = APP_DIR / "templates" / "dashboard"
UPLOAD_FOLDER = os.getenv('UPLOAD_FOLDER', 'static/placeholders')
PURCHASE_URI = os.getenv('PURCHASE_URI', '')
BCRYPT_LOG_ROUNDS = 13
DEBUG_TB_ENABLED = os.getenv("FLASK_DEBUG", False) # Disable Debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
DEBUG_TB_PROFILER_ENABLED = True
MESSAGE_QUOTA = 10
LANGUAGES = {
'en': 'English',
'bg': 'Български'
}
BABEL_DEFAULT_LOCALE = os.getenv('BABEL_DEFAULT_LOCALE', 'en_US')
BABEL_DEFAULT_TIMEZONE = os.getenv('BABEL_DEFAULT_TIMEZONE', 'UTC')
BABEL_TRANSLATION_DIRECTORIES = os.getenv('BABEL_TRANSLATION_DIRECTORIES', '../translations')
BABEL_CURRENCY = os.getenv('BABEL_CURRENCY', 'USD')
MAIL_SERVER = os.getenv("MAIL_SERVER", 'localhost')
MAIL_PORT = os.getenv("MAIL_PORT", 25)
MAIL_TLS = os.getenv("MAIL_TLS", True)
if MAIL_TLS:
MAIL_USE_TLS = True
MAIL_USE_SSL = False
else:
MAIL_USE_TLS = False
MAIL_USE_SSL = True
MAIL_DEBUG = DEBUG_TB_ENABLED
MAIL_USERNAME = os.getenv("MAIL_USERNAME", '')
MAIL_PASSWORD = os.getenv("MAIL_PASSWORD", '')
MAIL_DEFAULT_SENDER = os.getenv("MAIL_DEFAULT_SENDER", '')
GA_MEASUREMENT_ID = os.getenv("GA_MEASUREMENT_ID", '')
|
from __future__ import absolute_import
from django.contrib import admin
from .models import Timeline, Blip
admin.site.register(Blip,
list_display=["timeline", "pub_date", "is_private"],
list_filter=["timeline", "is_private"],
raw_id_fields=['user'],
list_per_page=500,
)
admin.site.register(Timeline,
prepopulated_fields={'slug': ('name',)},
)
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
from sacred import Ingredient
from gnnbench.data.preprocess import row_normalize, renormalize_adj
from gnnbench.models.base_model import GNNModel
from gnnbench.util import dropout_supporting_sparse_tensors, to_sparse_tensor
# implementation verified against Kipf data
def graph_convolution(inputs, sparse_renormalized_laplacian, weights, input_is_sparse=False):
"""Implements the graph convolution operation  * inputs * weights, where
 is the renormalized Laplacian  = D~^-0.5 * A~ * D~^-0.5 with
A~ = A + I_N (adjacency matrix with added self-loops) and
D~ = diagonal matrix of node degrees deduced from A~.
"""
if input_is_sparse:
output = tf.sparse_tensor_dense_matmul(inputs, weights)
else:
output = tf.matmul(inputs, weights)
return tf.sparse_tensor_dense_matmul(sparse_renormalized_laplacian, output)
def graph_convolution_layer(output_dim,
inputs, sparse_renormalized_laplacian,
activation_fn,
dropout_prob,
weight_decay,
name,
input_is_sparse=False):
with tf.name_scope(name):
input_dim = int(inputs.get_shape()[1])
weights = tf.get_variable("%s-weights" % name, [input_dim, output_dim], dtype=tf.float32,
initializer=tf.glorot_uniform_initializer(),
regularizer=slim.l2_regularizer(weight_decay))
bias = tf.get_variable("%s-bias" % name, [output_dim], dtype=tf.float32,
initializer=tf.zeros_initializer())
# Apply dropout to inputs if required
inputs = tf.cond(
tf.cast(dropout_prob, tf.bool),
true_fn=(lambda: dropout_supporting_sparse_tensors(inputs, 1 - dropout_prob)),
false_fn=(lambda: inputs),
)
convolved = graph_convolution(inputs, sparse_renormalized_laplacian, weights,
input_is_sparse)
output = convolved + bias
if activation_fn is not None:
output = activation_fn(output)
return output
class GCN(GNNModel):
def __init__(self, features, graph_adj, targets, nodes_to_consider,
num_layers, hidden_size, dropout_prob, weight_decay, normalize_features):
self.normalize_features = normalize_features
with tf.name_scope('extract_relevant_nodes'):
targets = tf.gather(targets, nodes_to_consider)
super().__init__(features, graph_adj, targets)
self.nodes_to_consider = nodes_to_consider
self.num_layers = num_layers
self.hidden_size = hidden_size
self.dropout_prob = dropout_prob
self.weight_decay = weight_decay
self._build_model_graphs()
def _inference(self):
with tf.name_scope('inference'):
x = self.features
for i in range(0, self.num_layers - 1):
x = graph_convolution_layer(output_dim=self.hidden_size,
inputs=x,
sparse_renormalized_laplacian=self.graph_adj,
activation_fn=tf.nn.relu,
dropout_prob=self.dropout_prob,
# original implementation uses L2 regularization only on first layer
weight_decay=self.weight_decay if i == 0 else 0.0,
name="gc%d" % i,
input_is_sparse=i == 0)
output = graph_convolution_layer(output_dim=self.targets.shape[1],
inputs=x,
sparse_renormalized_laplacian=self.graph_adj,
activation_fn=None,
dropout_prob=self.dropout_prob,
weight_decay=0.0,
name="gc%d" % (self.num_layers - 1),
input_is_sparse=False)
with tf.name_scope('extract_relevant_nodes'):
return tf.gather(output, self.nodes_to_consider)
def _preprocess_features(self, features):
if self.normalize_features:
features = row_normalize(features)
return to_sparse_tensor(features)
def _preprocess_adj(self, graph_adj):
return to_sparse_tensor(renormalize_adj(graph_adj))
MODEL_INGREDIENT = Ingredient('model')
@MODEL_INGREDIENT.capture
def build_model(graph_adj, node_features, labels, dataset_indices_placeholder,
train_feed, trainval_feed, val_feed, test_feed,
weight_decay, normalize_features,
num_layers, hidden_size, dropout_prob):
dropout = tf.placeholder(dtype=tf.float32, shape=[])
train_feed[dropout] = dropout_prob
trainval_feed[dropout] = False
val_feed[dropout] = False
test_feed[dropout] = False
return GCN(node_features, graph_adj, labels, dataset_indices_placeholder,
num_layers=num_layers, hidden_size=hidden_size,
dropout_prob=dropout,
weight_decay=weight_decay,
normalize_features=normalize_features)
|
import shutil
import os
def BackupConfigs(configPoolDir, colors):
bakPath = os.getcwd() + "\\configs.bak"
shutil.make_archive(bakPath, 'zip', configPoolDir)
print("\x1b[%sm Backup complete... \x1b[0m" % colors['blue'])
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
# DCT hyoer-parameter
T = 8
K = 4
channel = 3
# DCT weight
def DCT_w(x, y, u, v):
cu = 1.
cv = 1.
if u == 0:
cu /= np.sqrt(2)
if v == 0:
cv /= np.sqrt(2)
theta = np.pi / (2 * T)
return (( 2 * cu * cv / T) * np.cos((2*x+1)*u*theta) * np.cos((2*y+1)*v*theta))
# DCT
def dct(img):
H, W, _ = img.shape
F = np.zeros((H, W, channel), dtype=np.float32)
for c in range(channel):
for yi in range(0, H, T):
for xi in range(0, W, T):
for v in range(T):
for u in range(T):
for y in range(T):
for x in range(T):
F[v+yi, u+xi, c] += img[y+yi, x+xi, c] * DCT_w(x,y,u,v)
return F
# IDCT
def idct(F):
H, W, _ = F.shape
out = np.zeros((H, W, channel), dtype=np.float32)
for c in range(channel):
for yi in range(0, H, T):
for xi in range(0, W, T):
for y in range(T):
for x in range(T):
for v in range(K):
for u in range(K):
out[y+yi, x+xi, c] += F[v+yi, u+xi, c] * DCT_w(x,y,u,v)
out = np.clip(out, 0, 255)
out = np.round(out).astype(np.uint8)
return out
# Quantization
def quantization(F):
H, W, _ = F.shape
Q = np.array(((16, 11, 10, 16, 24, 40, 51, 61),
(12, 12, 14, 19, 26, 58, 60, 55),
(14, 13, 16, 24, 40, 57, 69, 56),
(14, 17, 22, 29, 51, 87, 80, 62),
(18, 22, 37, 56, 68, 109, 103, 77),
(24, 35, 55, 64, 81, 104, 113, 92),
(49, 64, 78, 87, 103, 121, 120, 101),
(72, 92, 95, 98, 112, 100, 103, 99)), dtype=np.float32)
for ys in range(0, H, T):
for xs in range(0, W, T):
for c in range(channel):
F[ys: ys + T, xs: xs + T, c] = np.round(F[ys: ys + T, xs: xs + T, c] / Q) * Q
return F
# MSE
def MSE(img1, img2):
H, W, _ = img1.shape
mse = np.sum((img1 - img2) ** 2) / (H * W * channel)
return mse
# PSNR
def PSNR(mse, vmax=255):
return 10 * np.log10(vmax * vmax / mse)
# bitrate
def BITRATE():
return 1. * T * K * K / T / T
# Read image
img = cv2.imread("imori.jpg").astype(np.float32)
# DCT
F = dct(img)
# quantization
F = quantization(F)
# IDCT
out = idct(F)
# MSE
mse = MSE(img, out)
# PSNR
psnr = PSNR(mse)
# bitrate
bitrate = BITRATE()
print("MSE:", mse)
print("PSNR:", psnr)
print("bitrate:", bitrate)
# Save result
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.imwrite("out.jpg", out)
|
from PySide2.QtCore import QObject
from models.constants import PropType
class PropertyPopupCtrl(QObject):
"""controller for property popup view"""
def __init__(self, popup_model):
super(PropertyPopupCtrl, self).__init__()
self._model = popup_model
def set_popup_value(self, action):
"""move selected item to beginning of list and set """
selected_value_name = str(action.data())
self._model.value = list(filter(lambda choice: str(choice) == selected_value_name, self._model.choices))[0]
|
import typing
class BitCount():
def __getitem__(self, n: int) -> int: return self.__a[n]
def __call__(self, n: int) -> int: return self.__a[n]
def __init__(self, n: int) -> typing.NoReturn:
a = [0] * n
for i in range(1, n):
a[i] = a[i >> 1] + i & 1
self.__a = a
|
#!/usr/bin/env python
""" Tests covering the command-line code.
"""
import nf_core.__main__
from click.testing import CliRunner
import mock
import unittest
@mock.patch("nf_core.__main__.nf_core_cli")
def test_header(mock_cli):
"""Just try to execute the header function"""
nf_core.__main__.run_nf_core()
def test_cli_help():
"""Test the main launch function with --help"""
runner = CliRunner()
result = runner.invoke(nf_core.__main__.nf_core_cli, ["--help"])
assert result.exit_code == 0
assert "Show the version and exit." in result.output
def test_cli_bad_subcommand():
"""Test the main launch function with verbose flag and an unrecognised argument"""
runner = CliRunner()
result = runner.invoke(nf_core.__main__.nf_core_cli, ["-v", "foo"])
assert result.exit_code == 2
# Checks that -v was considered valid
assert "No such command" in result.output
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import collections
import ctypes
import json
import os
import re
import struct
import sys
import numpy as np
import pytest
import tvm
import tvm.relay
import tvm.testing
from tvm.contrib import utils
INPUT_SHAPE = (1, 3, 16, 16)
KERNEL_SHAPE = (3, 3, 3, 3)
# The data types that are linkable.
LINKABLE_DTYPES = (
[f"uint{b}" for b in (8, 16, 32, 64)]
+ [f"int{b}" for b in (8, 16, 32, 64)]
+ ["float32", "float64"]
)
def dtype_info(dtype):
"""Lookup numpy type info for the given string dtype (of LINKABLE_DTYPES above)."""
if "int" in dtype:
return np.iinfo(getattr(np, dtype))
else:
return np.finfo(getattr(np, dtype))
# Note: for debugging, set this to an integer (i.e. 1.0). Then all "random" tensors will become
# predictable
RANDOM_TENSOR_START = None
def _make_random_tensor(dtype, shape):
"""Create a random test tensor with given shape and dtype."""
global RAND_SEED
if RANDOM_TENSOR_START is not None:
to_return = np.arange(
RANDOM_TENSOR_START, RANDOM_TENSOR_START + np.prod(shape), dtype=dtype
).reshape(shape)
RAND_SEED += np.prod(shape)
return to_return
dinfo = dtype_info(dtype)
if "int" in dtype:
return np.random.randint(dinfo.min, dinfo.max, shape, dtype=dtype)
else:
to_return = np.random.uniform(0, dinfo.max, shape).astype(dtype)
np.reshape(to_return, np.prod(shape))[::2] *= -1
return to_return
def _lookup_sid(graph, name):
"""Lookup the storage id of a named parameter.
Arguments
---------
graph : dict
Parsed JSON graph.
name : str
Name of the tensor parameter to lookup.
Returns
-------
int :
The storage_id of the parameter.
"""
num_outputs_seen = 0
for i, n in enumerate(graph["nodes"]):
if n["name"] == name:
print("sid", name, graph["attrs"]["storage_id"][1], num_outputs_seen)
return graph["attrs"]["storage_id"][1][num_outputs_seen]
else:
if "attrs" in n and "num_outputs" in n["attrs"]:
num_outputs_seen += int(n["attrs"]["num_outputs"])
else:
num_outputs_seen += 1
raise KeyError(f"no such param: {name}")
def _get_ctypes_dtype(dt):
"""Return a ctypes c_* datatype given a string data type."""
if "int" in dt:
return getattr(ctypes, f"c_{dt}")
elif dt == "float32":
return ctypes.c_float
elif dt == "float64":
return ctypes.c_double
else:
assert False, f"unknown dtype: {dt}"
def _verify_linked_param(dtype, lib, mod, graph, name):
"""Directly read memory from the linked library to verify the linked parameter is correct."""
sid = _lookup_sid(graph, name)
# NOTE: query_imports=True because when loading a module from disk (i.e. for C backend),
# a GraphRuntimeFactory module is created instead of the module itself.
param_ptr = mod.get_function("_lookup_linked_param", True)(sid)
gen_param = lib.params[name]
arr_data = (_get_ctypes_dtype(dtype) * np.prod(gen_param.shape)).from_address(param_ptr.value)
arr = np.ndarray(shape=gen_param.shape, dtype=gen_param.dtype, buffer=arr_data, order="C")
if "int" in gen_param.dtype:
np.testing.assert_equal(gen_param.asnumpy(), arr)
else:
np.testing.assert_allclose(gen_param.asnumpy(), arr)
return dtype == gen_param.dtype
def _make_mod_and_params(dtype):
"""Create a Relay module and parameters to test the given datatype."""
param_decls = collections.OrderedDict()
param_init = {}
def _add_decl(name, dtype):
param_decls[name] = f"%{name} : Tensor[{KERNEL_SHAPE}, {dtype}]"
param_init[name] = _make_random_tensor(dtype, KERNEL_SHAPE)
# Add several parameters so that the number of parameters
_add_decl(f"{dtype}_a", dtype)
_add_decl(f"{dtype}_b", dtype)
mod_lines = [
'#[version = "0.0.5"]',
f"def @main(%rand_input : Tensor[{INPUT_SHAPE}, {dtype}], { ', '.join(param_decls.values()) } ) {{",
# This program ensures that GraphPlanMemory alternates between the same two storage IDs for a
# while. In doing this, it ensures that param %{dtype}_b will be placed into the graph at an
# index unequal to its storage_id. This ensures that GraphRuntimeCodegen encodes the storage_id
# and not the parameter index into the graph.
(
f' %0 = nn.conv2d(%rand_input, %{dtype}_a, data_layout="NCHW", kernel_layout="OIHW", '
f'kernel_size=[3, 3], out_dtype="{dtype}");'
),
(
f' %1 = nn.conv2d(%0, %{dtype}_a, data_layout="NCHW", kernel_layout="OIHW", '
f'kernel_size=[3, 3], out_dtype="{dtype}");'
),
(
f' %2 = nn.conv2d(%1, %{dtype}_a, data_layout="NCHW", kernel_layout="OIHW", '
f'kernel_size=[3, 3], out_dtype="{dtype}");'
),
(
f' %3 = nn.conv2d(%2, %{dtype}_b, data_layout="NCHW", kernel_layout="OIHW", '
f'kernel_size=[3, 3], out_dtype="{dtype}");'
),
" %3",
"}",
]
mod = tvm.parser.fromtext("\n".join(mod_lines))
return mod, param_init
@tvm.testing.requires_llvm
def test_llvm_link_params():
for dtype in LINKABLE_DTYPES:
mod, param_init = _make_mod_and_params(dtype)
rand_input = _make_random_tensor(dtype, INPUT_SHAPE)
main_func = mod["main"]
target = "llvm --runtime=c --system-lib --link-params"
with tvm.transform.PassContext(opt_level=3):
lib = tvm.relay.build(mod, target, params=param_init)
assert set(lib.params.keys()) == {"p0", "p1"} # NOTE: op folded
print("graph", lib.graph_json)
graph = json.loads(lib.graph_json)
for p in lib.params:
_verify_linked_param(dtype, lib, lib.lib, graph, p) or found_one
# Wrap in function to explicitly deallocate the runtime.
def _run_linked(lib):
graph_json, mod, _ = lib
graph_rt = tvm.contrib.graph_runtime.create(graph_json, mod, tvm.cpu(0))
graph_rt.set_input("rand_input", rand_input) # NOTE: params not required.
graph_rt.run()
return graph_rt.get_output(0)
linked_output = _run_linked(lib)
with tvm.transform.PassContext(opt_level=3):
lib = tvm.relay.build(mod, "llvm --system-lib", params=param_init)
def _run_unlinked(lib):
graph_json, mod, lowered_params = lib
graph_rt = tvm.contrib.graph_runtime.create(graph_json, mod, tvm.cpu(0))
graph_rt.set_input("rand_input", rand_input, **lowered_params)
graph_rt.run()
return graph_rt.get_output(0)
unlinked_output = _run_unlinked(lib)
if "int" in dtype:
np.testing.assert_equal(unlinked_output.asnumpy(), linked_output.asnumpy())
else:
np.testing.assert_allclose(unlinked_output.asnumpy(), linked_output.asnumpy())
def _get_c_datatype(dtype):
"""Translate LINKABLE_DTYPES element to c datatype."""
if "int" in dtype:
return f"{dtype}_t"
elif dtype == "float32":
return "float"
elif dtype == "float64":
return "double"
else:
assert False, f"unknown dtype {dtype}"
def _format_c_value(dtype, width, x):
if "int" in dtype:
hex_formatstr = f'{{:{"+" if dtype.startswith("int") else ""}#0{width}x}}'
return hex_formatstr.format(x)
elif "float" in dtype:
to_ret = float(x).hex()
if "inf" in to_ret:
return ("-" if x < 0 else "") + "INFINITY"
elif "nan" in to_ret:
return "NAN"
before, after = to_ret.split("p")
return f'{before.rstrip("0")}p{after}'
else:
assert False, f"don't know dtype {dtype}"
HEX_NUM_RE = re.compile(r"[+\-]?(?:(?:0x[0-9A-Fa-f.p+-]+)|(?:INFINITY)|(?:NAN))")
def test_c_link_params():
temp_dir = utils.tempdir()
for dtype in LINKABLE_DTYPES:
mod, param_init = _make_mod_and_params(dtype)
rand_input = _make_random_tensor(dtype, INPUT_SHAPE)
main_func = mod["main"]
target = "c --link-params"
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
lib = tvm.relay.build(mod, target, params=param_init)
assert set(lib.params.keys()) == {"p0", "p1"} # NOTE: op folded
src = lib.lib.get_source()
lib.lib.save("test.c", "cc")
c_dtype = _get_c_datatype(dtype)
src_lines = src.split("\n")
param = lib.params["p0"].asnumpy().reshape(np.prod(KERNEL_SHAPE))
param_def = f"static const {c_dtype} __tvm_param__p0[{np.prod(param.shape)}] = {{"
for i, line in enumerate(src_lines):
if line == param_def:
i += 1
break
else:
assert False, f'did not find parameter definition "{param_def}":\n{src}'
cursor = 0
width = dtype_info(dtype).bits // 4 + 2
if dtype.startswith("int"):
width += 1 # Account for sign
while "};" not in src_lines[i]:
for match in HEX_NUM_RE.finditer(src_lines[i]):
assert match.group() == _format_c_value(dtype, width, param[cursor]), (
f'p0 byte {cursor}: want "{_format_c_value(dtype, width, param[cursor])}" got '
f'"{match.group(0)}"; full p0 follows:\n{src}'
)
cursor += 1
i += 1
assert cursor == np.prod(param.shape)
temp = utils.tempdir()
# Need a unique name per library to avoid dlopen caching the lib load.
lib_path = temp_dir.relpath(f"test-{dtype}-linked.so")
lib["remove_params"]().export_library(lib_path)
lib_mod = tvm.runtime.load_module(lib_path)
# lib_mod = lib_factory['default']()
graph = json.loads(lib.graph_json)
for p in lib.params:
_verify_linked_param(dtype, lib, lib_mod, graph, p)
# Wrap in function to explicitly deallocate the runtime.
def _run_linked(lib_mod):
graph_rt = tvm.contrib.graph_runtime.GraphModule(lib_mod["default"](tvm.cpu(0)))
graph_rt.set_input("rand_input", rand_input) # NOTE: params not required.
graph_rt.run()
return graph_rt.get_output(0)
linked_output = _run_linked(lib_mod)
linked_params = lib.params
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
lib = tvm.relay.build(mod, "c", params=param_init)
_, _, params = lib
# Need a unique name per library to avoid dlopen caching the lib load.
lib_path = temp_dir.relpath(f"test-{dtype}-unlinked.so")
lib.export_library(lib_path)
lib_mod = tvm.runtime.load_module(lib_path)
def _run_unlinked(lib_mod):
graph_rt = tvm.contrib.graph_runtime.GraphModule(lib_mod["default"](tvm.cpu(0)))
graph_rt.set_input("rand_input", rand_input, **params)
graph_rt.run()
return graph_rt.get_output(0)
unlinked_output = _run_unlinked(lib_mod)
if "int" in dtype:
np.testing.assert_equal(unlinked_output.asnumpy(), linked_output.asnumpy())
else:
np.testing.assert_allclose(unlinked_output.asnumpy(), linked_output.asnumpy())
@tvm.testing.requires_micro
def test_crt_link_params():
import tvm.micro
for dtype in LINKABLE_DTYPES:
mod, param_init = _make_mod_and_params(dtype)
rand_input = _make_random_tensor(dtype, INPUT_SHAPE)
main_func = mod["main"]
target = "c -mcpu=native --system-lib --runtime=c --link-params"
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
graph_json, lib, params = tvm.relay.build(mod, target, params=param_init)
assert set(params.keys()) == {"p0", "p1"} # NOTE: op folded
workspace = tvm.micro.Workspace()
compiler = tvm.micro.DefaultCompiler(target=target)
opts = tvm.micro.default_options(os.path.join(tvm.micro.CRT_ROOT_DIR, "host"))
opts["bin_opts"]["ldflags"].append("-DTVM_HOST_USE_GRAPH_RUNTIME_MODULE")
micro_binary = tvm.micro.build_static_runtime(
# the x86 compiler *expects* you to give the exact same dictionary for both
# lib_opts and bin_opts. so the library compiler is mutating lib_opts and
# the binary compiler is expecting those mutations to be in bin_opts.
# TODO(weberlo) fix this very bizarre behavior
workspace,
compiler,
lib,
lib_opts=opts["bin_opts"],
bin_opts=opts["bin_opts"],
extra_libs=[
os.path.join(tvm.micro.CRT_ROOT_DIR, m)
for m in ("memory", "graph_runtime_module", "graph_runtime")
],
)
flasher_kw = {
"debug": False,
}
flasher = compiler.flasher(**flasher_kw)
with tvm.micro.Session(binary=micro_binary, flasher=flasher) as sess:
graph_rt = tvm.micro.session.create_local_graph_runtime(
graph_json, sess.get_system_lib(), sess.context
)
# NOTE: not setting params here.
graph_rt.set_input("rand_input", rand_input)
graph_rt.run()
linked_output = graph_rt.get_output(0).asnumpy()
with tvm.transform.PassContext(opt_level=3):
lib = tvm.relay.build(mod, "llvm --system-lib", params=param_init)
def _run_unlinked(lib):
graph_json, mod, lowered_params = lib
graph_rt = tvm.contrib.graph_runtime.create(graph_json, mod, tvm.cpu(0))
graph_rt.set_input("rand_input", rand_input, **lowered_params)
graph_rt.run()
return graph_rt.get_output(0).asnumpy()
unlinked_output = _run_unlinked(lib)
if "int" in dtype:
np.testing.assert_equal(unlinked_output, linked_output)
else:
np.testing.assert_allclose(unlinked_output, linked_output)
if __name__ == "__main__":
sys.exit(pytest.main([__file__] + sys.argv[1:]))
|
from setuptools import setup
setup(
name='cookiecutter-serverless-aws-lambda',
version='0.0.1',
url='https://github.com/ninjabit/cookiecutter-serverless-aws-lambda',
packages=[],
setup_requires=['pytest-runner'],
tests_require=['pytest']
)
|
from metac.metac import Instance, load
from metac import stream
vm = load('metac/vm.capnp')
instance = Instance('10.234.0.1')
serv = instance.node_admin.getServiceAdmin('vm').service.cast_as(vm.VMServiceAdmin)
launcher = serv.getLauncher().launcher
config = vm.LaunchConfiguration.new_message(
memory=512,
vcpu=1,
machineInfo=vm.MachineInfo.new_message(type=vm.MachineInfo.Type.host),
serialPorts=[
vm.SerialPort.new_message(
driver=vm.SerialPort.Driver.virtio,
stream=stream.debug_print_stream(instance, '[serial] ')
)
]
)
config.boot.disk = 0
vm = launcher.launch(config).wait()
|
from typing import Dict, Any
import json
from pytest import raises
from e2e.Libs.Ristretto.Ristretto import BASEPOINT, RistrettoScalar, RistrettoPoint, hashToCurve
from e2e.Tests.Errors import TestError
def RistrettoTest() -> None:
vectors: Dict[str, Any]
with open("e2e/Vectors/Libs/Ristretto.json", "r") as file:
vectors = json.loads(file.read())
basepoint: RistrettoPoint = RistrettoPoint(BASEPOINT)
for b in range(len(vectors["multiples"])):
#Encoding.
if vectors["multiples"][b] != (basepoint * RistrettoScalar(b)).serialize().hex():
raise TestError("Basepoint multiple was incorrect.")
#Decoding.
if vectors["multiples"][b] != RistrettoPoint(bytes.fromhex(vectors["multiples"][b])).serialize().hex():
raise TestError("Couldn't encode and decode.")
#Test the equality operator.
if RistrettoPoint(bytes.fromhex(vectors["multiples"][0])) != RistrettoPoint(bytes.fromhex(vectors["multiples"][0])):
raise Exception("Equal points were considered inequal.")
if RistrettoPoint(bytes.fromhex(vectors["multiples"][0])) == RistrettoPoint(bytes.fromhex(vectors["multiples"][1])):
raise Exception("Inequal points were considered equal.")
#Test decoding invalid points.
for point in vectors["badEncodings"]:
with raises(Exception):
RistrettoPoint(bytes.fromhex(point))
#Test hash to curve. It's not used anywhere in Meros, yet it ensures accuracy of our Ristretto implementation.
for hTP in vectors["hashToPoints"]:
if hTP[1] != hashToCurve(hTP[0].encode("utf-8")).serialize().hex():
raise TestError("Hash to point was incorrect.")
|
from numpy.linalg import norm as l21_norm
def U_converged(old, new, tol=5e-1):
delta = l21_norm(old - new)
return delta, delta < tol
|
#!/usr/bin/env python
from unittest import TestCase
from fundamentals.lists.linked_list import LL
class TestLL(TestCase):
def test_rev_ll(self):
ll = LL()
ll.add(3).add(4).add(5).add(6)
self.assertEquals("3, 4, 5, 6", ll.head.pretty_walk())
self.assertEquals("6, 5, 4, 3", ll.recursive_reverse_ll().pretty_walk())
self.assertEquals("6, 5, 4, 3", ll.iterative_reverse_ll().pretty_walk())
def test_del(self):
ll = LL()
ll.add(3).add(4).add(5).add(6)
self.assertEquals("3, 4, 5, 6", ll.head.pretty_walk())
ll.delkey(3)
self.assertEquals("4, 5, 6", ll.head.pretty_walk())
ll.delkey(10)
self.assertEquals("4, 5, 6", ll.head.pretty_walk())
ll.delkey(5)
self.assertEquals("4, 6", ll.head.pretty_walk())
ll.delkey(6)
self.assertEquals("4", ll.head.pretty_walk())
|
import pkg_resources
import yaml
def print_hello():
print("hello")
pass
def get_versions():
from ._version import get_versions as _get_versions
return _get_versions()
def get_data():
file_path=pkg_resources.resource_filename('project_name','data/data.yml')
with open(file_path) as file:
data_tests = yaml.load(file, Loader=yaml.FullLoader)
return data_tests
def list_files_in_data():
return pkg_resources.resource_listdir('project_name', 'data')
|
# xmlimportfile
#########################################################################################################
# Imports
import xml.etree.ElementTree as __xml_etree
from ..error import SfcparseError
# Exception for Module
class XmlImportFile(SfcparseError): __module__ = SfcparseError.set_module_name()
#########################################################################################################
# Import xml file
def xmlimportfile(filename: str) -> __xml_etree.Element:
"""
Imports xml data from a file.
Returns a xml Parsed Element obj with the root. Assign the output to var
Enter xml file location as str to import.
[Example Use]
xmlimportfile('path/to/filename.xml')
This is using the native xml library via etree shipped with the python standard library.
For more information on the xml.etree api, visit: https://docs.python.org/3/library/xml.etree.elementtree.html#module-xml.etree.ElementTree
"""
__err_msg_str = f"Only str is allowed for filename"
if not isinstance(filename, str): raise XmlImportFile(__err_msg_str, f'\nFILE: "{filename}"')
try: return __xml_etree.parse(filename).getroot()
except FileNotFoundError as __err_msg: raise XmlImportFile(__err_msg, f'\nFILE: "{filename}"')
except __xml_etree.ParseError as __err_msg: raise XmlImportFile(__err_msg, f'\nFILE: "{filename}"')
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Arista Networks, Inc. All rights reserved.
# Arista Networks, Inc. Confidential and Proprietary.
import sys
__version_info__ = (2, 2, 22)
__version__ = '2.2.22'
if sys.version_info < (2, 7):
raise RuntimeError('You need Python 2.7+ for arcomm.')
__title__ = 'arcomm'
__description__ = 'Library for connecting to Arista switches'
__author__ = 'Jesse R. Mather'
__email__ = 'jmather@arista.com'
__uri__ = 'https://github.com/aristanetworks/arcomm'
__license__ = 'MIT License'
__copyright__ = '2016 Arista Networks, Inc.'
from arcomm import util
from arcomm.api import (background, batch, configure, connect, creds, execute,
tap)
#
# old v1 funcs
#
from arcomm.api import (authorize, authorized, clone, create_pool, execute_once,
execute_pool, execute_bg, execute_until, close,
get_credentials)
from arcomm.async import Pool
from arcomm.command import Command, commands_from_list, command_from_dict, mkcmd
from arcomm.credentials import Creds, BasicCreds
from arcomm.session import session, Session
from arcomm.protocols import BaseProtocol
from arcomm.response import (ResponseStore, Response, get_subscribers,
subscribe, unsubscribe)
from arcomm.exceptions import (ConnectFailed, AuthenticationFailed,
AuthorizationFailed, ExecuteFailed)
|
from distutils.core import setup
from Cython.Build import cythonize
setup(
ext_modules = cythonize("kepo.py")
)
|
import hashlib
reader = open('being_a_boy.txt', 'r').read() #Read from the book text file
lst = reader.replace("\n", " ").split(" ")
lst[:] = (value for value in lst if value != '')
lst[:] = (x.lower() for x in lst) #All this is formatting basically, removing punctuations,converting to lowercase, etc. Don't want to explain it all, it is better to understand each function used for oneself
s = 'b754015b1f05de447a0915eea5238c1f3310a4826b3190d5b7fe4739bc3bc992'
for i in lst:
word = "zenseCTF{"+i+"}" #Wrapping around
hashval = hashlib.sha256(word.encode()).hexdigest()
if hashval == s: #Checking equality
print(word)
|
"""
Tests for django.utils.
"""
from __future__ import absolute_import
from .archive import TestBzip2Tar, TestGzipTar, TestTar, TestZip
from .baseconv import TestBaseConv
from .checksums import TestUtilsChecksums
from .crypto import TestUtilsCryptoMisc, TestUtilsCryptoPBKDF2
from .datastructures import (DictWrapperTests, ImmutableListTests,
MergeDictTests, MultiValueDictTests, SortedDictTests)
from .dateformat import DateFormatTests
from .dateparse import DateParseTests
from .datetime_safe import DatetimeTests
from .decorators import DecoratorFromMiddlewareTests
from .encoding import TestEncodingUtils
from .feedgenerator import FeedgeneratorTest
from .functional import FunctionalTestCase
from .html import TestUtilsHtml
from .http import TestUtilsHttp, ETagProcessingTests, HttpDateProcessingTests
from .ipv6 import TestUtilsIPv6
from .jslex import JsToCForGettextTest, JsTokensTest
from .module_loading import (CustomLoader, DefaultLoader, EggLoader,
ModuleImportTestCase)
from .numberformat import TestNumberFormat
from .os_utils import SafeJoinTests
from .regex_helper import NormalizeTests
from .simplelazyobject import TestUtilsSimpleLazyObject
from .termcolors import TermColorTests
from .text import TestUtilsText
from .timesince import TimesinceTests
from .timezone import TimezoneTests
from .tzinfo import TzinfoTests
|
params = raw_input()
m,n,k = map(int, params.split(' '))
result = []
for j in range(k):
l = raw_input()
l = map(int, l.split(' '))
max = 0
sub = []
ok = True
for index, i in enumerate(l):
if i - index > m:
ok = False
break
if i > max:
max = i
if sub:
if sorted(sub, reverse=True) != sub:
ok = False
break
sub = []
continue
else:
sub.append(i)
else:
if sub:
if sorted(sub, reverse=True) != sub:
ok = False
if ok:
result.append("YES")
else:
result.append("NO")
for res in result:
print(res)
|
# -*- python -*-
from zope.interface import implementer
from nevow import loaders
from nevow import rend
from nevow import tags
from nevow import inevow
from nevow import url
from formless import annotate
from formless import webform
from twisted.internet import defer
#oldChoicesWay = annotate.Choice(choicesAttribute='theChoices') # Doing this gives you a DeprecationWarning now
# If you still want to use an attribute or method of some other object, you should use a function as shown below,
# but look up IResource(ctx) or IConfigurable(ctx), whichever is more appropriate.
newChoicesWay = annotate.Choice(lambda c, d: list(range(30)))
deferChoicesWay = annotate.Choice(lambda c, d: defer.succeed(['abcd', 'efgh', 'ijkl']))
radioChoices = annotate.Radio(["Old", "Tyme", "Radio"])
## An example of using custom valueToKey and keyToValue functions to serialize/deserialize items
values = {0: dict(name="Zero", stuff=1234), 1: dict(name="One", stuff=1234), 2: dict(name="Two", stuff=2345435)}
customValueToKey = annotate.Choice(
[0, 1, 2], # Perhaps these are primary keys in a database
stringify=lambda x: values[x]['name'], # Do a database lookup to render a nice label in the ui
valueToKey=str, # Convert the primary key to a value suitable for sending across the web
keyToValue=lambda x: values[int(x)]) # Do a database lookup to get the actual value to pass to the binding
class IMyForm(annotate.TypedInterface):
foo = annotate.Integer()
def bar(baz=annotate.Integer(),
slam=newChoicesWay, ham=deferChoicesWay, radio=radioChoices, custom=customValueToKey):
pass
bar = annotate.autocallable(bar)
@implementer(IMyForm)
class Implementation(object):
foo = 5
def bar(self, baz, slam, ham, radio, custom):
return "You called bar! %s %s %s %s %r" % (baz, slam, ham, radio, custom)
theChoices = [1, 2, 3]
class FormPage(rend.Page):
addSlash = True
child_webform_css = webform.defaultCSS
def render_hand(self, ctx, data):
hand = inevow.IHand(ctx, None)
if hand is not None:
return ctx.tag[hand]
return ''
docFactory = loaders.stan(
tags.html[
tags.head[
tags.link(rel='stylesheet', type='text/css', href=url.here.child('webform_css')),
],
tags.body[
tags.h3(render=render_hand, style="color: red; font-size: xx-large"),
"Hello! Here is a form:",
# We want to render forms defined by the Implementation instance.
# When we pass the Implementation instance to FormPage below,
# rend.Page sets it as the .original attribute. To tell webform to render
# forms described by this object, we use the configurable name "original".
webform.renderForms('original'),
],
],
)
|
import sys
import traceback
import json
import logging
import boto3
import argparse
import aws
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def logger_(exception_type, exception_value, exception_traceback, excp): #pylint: disable=unused-argument
"""Simple logger"""
traceback_string = traceback.format_exception(
exception_type,
exception_value,
exception_traceback
)
err_msg = json.dumps({
"errorType": exception_type.__name__,
"errorMessage": str(exception_value),
"stackTrace": traceback_string
})
logger.error(err_msg)
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(description='List cloud resources')
parser.add_argument('--aws-profile', type=str, dest='aws_profile', help='Name of an AWS profile to use')
args = parser.parse_args()
session = aws.get_aws_session(profile_name=args.aws_profile)
regions = aws.get_regions(session)
instances = aws.get_ec2_instances(session)
try:
print('Program starting')
print(regions)
print(instances)
except Exception as excp: # pylint: disable=broad-except
exception_type, exception_value, exception_traceback = sys.exc_info()
logger_(exception_type, exception_value, exception_traceback, excp)
return
if __name__ == "__main__":
main()
|
import math
import random
import numpy
class MinHash():
def __init__(self):
# choose four random 8 bit tables
self.t1 = [random.randint(0, 2**32 - 1) for _ in range(2**8)]
self.t2 = [random.randint(0, 2**32 - 1) for _ in range(2**8)]
self.t3 = [random.randint(0, 2**32 - 1) for _ in range(2**8)]
self.t4 = [random.randint(0, 2**32 - 1) for _ in range(2**8)]
def intern_hash(self, x):
return self.t1[(x >> 24) & 0xff] ^ self.t2[(x >> 16) & 0xff] ^\
self.t3[(x >> 8) & 0xff] ^ self.t4[x & 0xff]
def hash(self, L):
return min([self.intern_hash(x) for x in L])
def get_element(self, L):
h = self.hash(L)
for x in L:
if self.intern_hash(x) == h:
return x
class OneBitMinHash():
def __init__(self):
self.h = MinHash()
def hash(self, L):
return self.h.hash(L) % 2
class BucketHashing():
def __init__(self, universe_size, buckets, eps):
self.universe_size = universe_size
self.buckets = buckets
self.eps = eps
self.h = MinHash()
self.bucket_id = {}
self.threshold = (self.buckets - 1) / \
(math.exp(self.eps) + self.buckets - 1)
for x in range(universe_size):
self.bucket_id[x] = random.choice(range(self.buckets))
def hash(self, L):
x = self.h.get_element(L)
bucket = self.bucket_id[x]
if random.random() <= self.threshold:
return random.choice([i for i in range(self.buckets) if i != bucket])
return bucket
class NoisyMinHash():
def __init__(self, scale):
self.h = OneBitMinHash()
self.scale = scale
def hash(self, L):
return self.h.hash(L) + numpy.random.laplace(scale=self.scale)
class NoisyMinHashBuckets():
def __init__(self, universe_size, buckets, scale):
self.universe_size = universe_size
self.buckets = buckets
self.scale = scale
self.h = MinHash()
self.bucket_id = {}
for x in range(universe_size):
self.bucket_id[x] = random.choice(range(self.buckets))
def hash(self, L):
x = self.h.get_element(L)
bucket = self.bucket_id[x]
return bucket + numpy.random.laplace(scale=self.scale)
class RROneBitMinHash():
def __init__(self, eps):
self.h = OneBitMinHash()
self.eps = eps
self.threshold = 1 / (math.exp(eps) + 1)
def hash(self, L):
hv = self.h.hash(L)
if random.random() <= self.threshold:
return 1 - hv
else:
return hv
|
# -*- coding: utf-8 -*-
"""Scripts for aggregating recently added logs by action type; pushes results
to the specified project.
"""
import bson
import datetime
from cStringIO import StringIO
import pymongo
from dateutil.relativedelta import relativedelta
from framework.mongo import database
from website import models
from website.app import app, init_app
from scripts.analytics import utils
RESULTS_COLLECTION = 'logmetrics'
TIME_OFFSET = relativedelta(days=1)
NUM_ROWS = 20
FILE_NAME = 'log-counts.csv'
CONTENT_TYPE = 'text/css'
NODE_ID = '95nv8'
USER_ID = 'icpnw'
mapper = bson.Code('''function() {
emit(this.action, 1);
}''')
reducer = bson.Code('''function(key, values) {
var count = 0;
for (var i=0; i<values.length; i++) {
count += values[i];
}
return count;
}''')
def run_map_reduce(**kwargs):
return database['nodelog'].map_reduce(
mapper,
reducer,
RESULTS_COLLECTION,
**kwargs
)
def main():
node = models.Node.load(NODE_ID)
user = models.User.load(USER_ID)
cutoff = datetime.datetime.utcnow() - TIME_OFFSET
result = run_map_reduce(query={'date': {'$gt': cutoff}})
sio = StringIO()
utils.make_csv(
sio,
(
(row['_id'], row['value'])
for row in result.find().sort([('value', pymongo.DESCENDING)])
),
['name', 'count'],
)
utils.send_file(app, FILE_NAME, CONTENT_TYPE, sio, node, user)
if __name__ == '__main__':
init_app()
main()
|
from django.db import models
# Create your models here.
class museos (models.Model):
museo = models.TextField()
id_entidad = models.TextField()
descripcion = models.TextField()
horario = models.TextField()
transporte = models.TextField()
accesibilidad = models.TextField()
content_url = models.TextField()
nombre_via = models.TextField()
clase_vial = models.TextField()
tipo_num = models.TextField()
num = models.TextField()
localidad = models.TextField()
provincia = models.TextField()
codigo_postal = models.TextField()
barrio = models.TextField()
distrito = models.TextField()
coordenada_x = models.TextField()
coordenada_y = models.TextField()
latitud = models.TextField()
longitud = models.TextField()
telefono = models.TextField()
fax = models.TextField()
email = models.TextField()
descripcion_entidad = models.TextField()
def __str__self():
return self.museo
class comentario (models.Model):
museo = models.ForeignKey(museos)
usuario = models.TextField()
fecha = models.TextField()
texto = models.TextField()
|
from time import sleep
from random import randint
print("\033[33m-=-" * 54)
jo = ("\033[34m Jogo de batalha")
print(jo.center(170))
print("\033[33m-=-" * 54)
#----------------------------------------------------------------------------------------------------------------------
inicio = input("Pressione enter para começar...")
print("Iniciando...")
sleep(1)
#---------------------------------------------------------------------Introdução-------------------------------------------------------------------------------
print("\033[36m\nBem vindo ao sistema de batalha em Pycharm, o jogo a seguir consiste em batalha entre personagens, onda de inimigos, armas para o personagem e Loja de armas.")
print("OBS: Jogo feito em Python por programador iniciante!")
inicio = input("\nPressione enter para começar...")
#--------------------------------------------------------------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------Criação de Personagem-------------------------------------------------------------------
print("\n\033[32m[Escolha um nome para seu personagem]\033[m")
NomeDePersonagem = str(input("\n\033[36mDigite o nome do personagem:\033[m "))
while NomeDePersonagem == "":
print("\n\033[31m[Nome invalido, tente novamente]\033[m")
NomeDePersonagem = str(input("\n\033[36mDigite o nome do personagem: "))
EscolhaDeArma = int(input(f"\n\033[36mOlá {NomeDePersonagem},escolha uma arma para começar:\n[1]Arco - ATA = (Min 3 a Max 5)\n[2]Adaga - ATA = (Min 2 a Max 4)\n[3]Espada - ATA = (Min 1 a Max 7)\nEscolha = "))
inicio = input("\nPressione enter para começar...")
#-------------------------------------------------------------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------Armas-----------------------------------------------------------------------------------
#ARMAS INICIO
Arco = randint(3, 5)
Adaga = randint(2, 4)
Espada = randint(1, 7)
#LOJA DE ARMAS
Katana_Simples = randint(2, 9)
Katana_SimplesPreço = 100
Katana_Afiada = randint(3,10)
Katana_AfiadaPreço = 170
Katana_Espiritual = randint(5,11)
Katana_EspiritualPreço = 300
Besta_Simples = randint(4,6)
Besta_SimplesPreço = 120
Besta_Imperial = randint(5,8)
Besta_ImperialPreço = 170
Besta_Selvagem = randint(6,10)
Besta_SelvagemPreço = 250
Sabre_de_Luz = randint(20,45)
Sabre_de_LuzPreço = 600
#-------------------------------------------------------------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------Inimigos--------------------------------------------------------------------------------
Zumbi = 12
Nzumbi = 12
ResVidaIni = randint(1,5)
AtaqIni = randint(1,3)
#-------------------------------------------------------------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------Variaveis do jogo-----------------------------------------------------------------------
Vida = 15
ResVida = randint(1,3)
Nvida = 15
Moeda = 0
MoedaMais = randint(5,20)
Nivel = 0
#-------------------------------------------------------------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------Caso Arco-------------------------------------------------------------------------------
while True:
if EscolhaDeArma == 1:
while Nivel < 10:
print("\033[32m-=-" * 54)
primeira = ("\033[34mBATALHA")
print(primeira.center(170),"\n")
DesqNivel = (f"\033[32:7mNIVEL = {Nivel}\033[m \033[33:7mMOEDAS = {Moeda}\033[m")
print(DesqNivel.center(187))
Loja = int(input("\033[33mPressione \n\033[33:7m[1]\033[m \033[33mpara acessar a loja\033[n\n\033[32:7m[2]\033[m \033[33mPara ir para o jogo\033[m\n\033[33mEscolha =\033[m "))
if Loja == 1:
print("\033[33:7m-=-\033[m" * 54)
print(f"\033[36mBem vindo a loja de armas, aqui encontrara armas para melhorar a batalha, quando quiser comprar uma arma veja o numero dela e digite para comprar\nIMPORTANTE: se voce comprar alguma, ela ira substituir a anterior e não podera ser capaz de usar a anterior novamente ao menos que a compre novamente\n\033[33:7mMOEDAS = {Moeda}\033[m")
DentroLoja = int(input("\n\033[36m[1] Katana Simples (Min 2 a Max 9) Preço = R$100\n[2] Katana Afiada (Min 3 a Max 10) Preço = R$170\n[3] Katana Espiritual (Min 5 a Max 11) Preço = R$300\n[4] Besta Simples (Min 4 a Max 6) Preço = R$120\n[5] Besta Imperial (Min 5 a Max 8) Preço = R$170\n[6] Besta Selvagem (Min 6 a Max 10) Preço = R$250\n[7] Sabre de Luz (Min 20 a 45) Preço = R$600\n[0]Para Sair da loja pressione 0(zero)\nEscolha = "))
print("\033[33:7m-=-\033[m" * 54)
if DentroLoja == 1:
if Moeda >= Katana_SimplesPreço:
Moeda -= Katana_SimplesPreço
Arco = Katana_Simples
print("\n\033[32:7mAgora sua arma é uma Katana Simples\033[m")
else:
print("Voce não tem dinheiro suficiente para comprar esta arma.")
elif DentroLoja == 2:
if Moeda >= Katana_AfiadaPreço:
Moeda -= Katana_AfiadaPreço
Arco = Katana_Afiada
print("\n\033[32:7mAgora sua arma é uma Katana Afiada\033[m")
else:
print("Voce não tem dinheiro suficiente para comprar esta arma.")
elif DentroLoja == 3:
if Moeda >= Katana_EspiritualPreço:
Moeda -= Katana_EspiritualPreço
Arco = Katana_Espiritual
print("\n\033[32:7mAgora sua arma é uma Katana Espiritual\033[m")
else:
print("Voce não tem dinheiro suficiente para comprar esta arma.")
elif DentroLoja == 4:
if Moeda >= Besta_SimplesPreço:
Moeda -= Besta_SimplesPreço
Arco = Besta_Simples
print("\n\033[32:7mAgora sua arma é uma Besta Simples\033[m")
else:
print("Voce não tem dinheiro suficiente para comprar esta arma.")
elif DentroLoja == 5:
if Moeda >= Besta_ImperialPreço:
Moeda -= Besta_ImperialPreço
Arco = Besta_Imperial
print("\n\033[32:7mAgora sua arma é uma Besta Imperial\033[m")
else:
print("Voce não tem dinheiro suficiente para comprar esta arma.")
elif DentroLoja == 6:
if Moeda >= Besta_SelvagemPreço:
Moeda -= Besta_SelvagemPreço
Arco = Besta_Selvagem
print("\n\033[32:7mAgora sua arma é uma Besta Selvagem\033[m")
else:
print("Voce não tem dinheiro suficiente para comprar esta arma.")
elif DentroLoja == 7:
if Moeda >= Sabre_de_LuzPreço:
Moeda -= Sabre_de_LuzPreço
Arco = Sabre_de_Luz
print("\n\033[37:7mAgora sua arma é um Sabre de luz! QUE A FORÇA ESTEJA COM VOCÊ!\033[m")
else:
print("Voce não tem dinheiro suficiente para comprar esta arma.")
elif DentroLoja == 0:
print("Saindo da loja...")
sleep(1)
inicio = input("\n\033[33mPressione enter para começar o jogo...\033[m")
elif Loja == 2:
print("Iniciando jogo...")
sleep(1)
print(f"\n\033[33mVamos começar a batalha! {NomeDePersonagem} a frente existe um inimigo com {Zumbi} de vida, role o dado para saber quem atacara primeiro...(1 Voce ataca primeiro | 2 O inimigo ataca primeiro)\033[m\n\033[34mTente chegar ao Nivel 100, quanto maior o nivel mais dificil é! A cada 10 niveis voce ganha mais vida e dano! e os inimigos tambem\033[m")
Vida = Nvida
Zumbi = Nzumbi
if Nivel >= 10 and Nivel <= 15:
Vida = Nvida + ResVida
Zumbi = Nzumbi + ResVidaIni
Arco += 1
print(f"\033[33mVida de inimigo =\033[m \033[31m{Zumbi}\033[m \033[33m|| || || Vida {NomeDePersonagem} =\033[m \033[31m{Vida}\033[m")
while True:
RolarDados = input("\n\033[33mPressione enter para rolar o dado...\033[m")
print("\033[31mRolando dados...\033[m")
sleep(1)
Dado = randint(1,2)
print(f"\033[33mO dado foi de =\033[m \033[32m{Dado}\033[m ")
sleep(1)
if Dado == 1:
print("\033[32mVoce ira atacar primeiro!\033[m")
PreAtaq = str(input("\033[33mEscreva Atacar para atacar:\033[m "))
if PreAtaq == "Atacar" or PreAtaq == "atacar":
AtaqPersonagem = Arco
Zumbi -= AtaqPersonagem
print(f"\033[33mInimigo =\033[m \033[31m{Zumbi}\033[m\n\033[33m{NomeDePersonagem}=\033[m \033[31m{Vida}\033[m")
if Zumbi == 0:
print(f"\033[33mParabens {NomeDePersonagem} voce derrotou o inimigo !\033[m")
Nivel += 1
Moeda += MoedaMais
print(f"\033[33mPersonagem subiu de nivel + 1 || Nivel =\033[m \033[32:7m{Nivel}\033[m")
break
if Dado == 2:
print("\033[31mO inimigo ira atacar primeiro!\033[m")
Vida -= AtaqIni
print(f"\033[33m{NomeDePersonagem} =\033[m \033[31m{Vida}\033[m\n\033[33mInimigo =\033[m \033[31m{Zumbi}\033[m")
if Vida <= 0:
print("\033[31mQue pena :( voce perdeu, mas não se preocupe tente novamente\033[m")
break
while Nivel >= 10:
print("\033[32m-=-" * 54)
primeira = ("\033[34mBATALHA")
print(primeira.center(170), "\n")
DesqNivel = (f"\033[32:7mNIVEL = {Nivel}\033[m \033[33:7mMOEDAS = {Moeda}\033[m")
print(DesqNivel.center(187))
print(
f"\033[33mVamos começar a batalha! {NomeDePersonagem} a frente existe um inimigo com {Zumbi} de vida, role o dado para saber quem atacara primeiro...(1 Voce ataca primeiro | 2 O inimigo ataca primeiro)\033[m\n\033[34mTente chegar ao Nivel 100, quanto maior o nivel mais dificil é! A cada 10 niveis voce ganha mais vida e dano! e os inimigos tambem\033[m")
Vida = Nvida
Zumbi = Nzumbi
if Nivel >= 10 and Nivel <= 15:
Vida = Nvida + ResVida
Zumbi = Nzumbi + ResVidaIni
Arco += 1
print(f"\033[33mVida de inimigo =\033[m \033[31m{Zumbi}\033[m \033[33m|| || || Vida {NomeDePersonagem} =\033[m \033[31m{Vida}\033[m")
while True:
RolarDados = input("\n\033[33mPressione enter para rolar o dado...\033[m")
print("\033[31mRolando dadosssssssssssssssss...\033[m")
sleep(1)
Dado = randint(1, 2)
print(f"\033[33mO dado foi de =\033[m \033[32m{Dado}\033[m ")
sleep(1)
if Dado == 1:
print("\033[32mVoce ira atacar primeiro!\033[m")
PreAtaq = str(input("\033[33mEscreva Atacar para atacar:\033[m "))
if PreAtaq == "Atacar" or PreAtaq == "atacar":
AtaqPersonagem = Arco
Zumbi -= AtaqPersonagem
print(
f"\033[33mInimigo =\033[m \033[31m{Zumbi}\033[m\n\033[33m{NomeDePersonagem}=\033[m \033[31m{Vida}\033[m")
if Zumbi <= 0:
print(f"\033[33mParabens {NomeDePersonagem} voce derrotou o primeiro inimigo !\033[m")
Nivel += 1
print(f"Personagem subiu de nivel + 1 || Nivel = {Nivel}")
break
if Dado == 2:
print("\033[31mO inimigo ira atacar primeiro!\033[m")
Vida -= AtaqIni
print(
f"\033[33m{NomeDePersonagem} =\033[m \033[31m{Vida}\033[m\n\033[33mInimigo =\033[m \033[31m{Zumbi}\033[m")
if Vida <= 0:
print("\033[31mQue pena :( voce perdeu, mas não se preocupe tente novamente\033[m")
RolarDados = input("\n\033[33mPressione enter para tentar novamente...\033[m")
break
# -------------------------------------------------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------Caso Adaga-------------------------------------------------------------------------------
elif EscolhaDeArma == 2:
print("\033[32m-=-" * 54)
primeira = ("\033[34m PRIMEIRA BATALHA")
print(primeira.center(170), "\n")
DesqNivel = (f"\033[32m NIVEL = {Nivel}\033[m")
print(DesqNivel.center(170))
print(f"\033[33mVamos começar a batalha! {NomeDePersonagem} a frente existe um inimigo com 100 de vida, role o dado para saber quem atacara primeiro...(1 Voce ataca primeiro | 2 O inimigo ataca primeiro)\033[m")
while True:
RolarDados = input("\n\033[33mPressione enter para rolar o dado...\033[m")
print("\033[31mRolando dados...\033[m")
sleep(1)
Dado = randint(1, 2)
print(f"\033[33mO dado foi de =\033[m \033[32m{Dado}\033[m ")
sleep(1)
if Dado == 1:
print("\033[32mVoce ira atacar primeiro!\033[m")
PreAtaq = str(input("\033[33mEscreva Atacar para atacar:\033[m "))
if PreAtaq == "Atacar" or PreAtaq == "atacar":
AtaqPersonagem = Adaga
Zumbi -= AtaqPersonagem
print(
f"\033[33mInimigo =\033[m \033[31m{Zumbi}\033[m\n\033[33m{NomeDePersonagem}=\033[m \033[31m{Vida}\033[m")
if Zumbi <= 0:
print(f"\033[33mParabens {NomeDePersonagem} voce derrotou o primeiro inimigo !\033[m")
Nivel += 1
print(f"Personagem subiu de nivel + 1 || Nivel = {Nivel}")
break
if Dado == 2:
print("\033[31mO inimigo ira atacar primeiro!\033[m")
Vida -= AtaqIni
print(
f"\033[33m{NomeDePersonagem} =\033[m \033[31m{Vida}\033[m\n\033[33mInimigo =\033[m \033[31m{Zumbi}\033[m")
if Vida <= 0:
print("\033[31mQue pena :( voce perdeu, mas não se preocupe tente novamente\033[m")
RolarDados = input("\n\033[33mPressione enter para tentar novamente...\033[m")
break
break
# -------------------------------------------------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------Caso Espada-------------------------------------------------------------------------------
elif EscolhaDeArma == 3:
print("\033[32m-=-" * 54)
primeira = ("\033[34m PRIMEIRA BATALHA")
print(primeira.center(170), "\n")
DesqNivel = (f"\033[32m NIVEL = {Nivel}\033[m")
print(DesqNivel.center(170))
print(f"\033[33mVamos começar a batalha! {NomeDePersonagem} a frente existe um inimigo com 100 de vida, role o dado para saber quem atacara primeiro...(1 Voce ataca primeiro | 2 O inimigo ataca primeiro)\033[m")
while True:
RolarDados = input("\n\033[33mPressione enter para rolar o dado...\033[m")
print("\033[31mRolando dados...\033[m")
sleep(1)
Dado = randint(1, 2)
print(f"\033[33mO dado foi de =\033[m \033[32m{Dado}\033[m ")
sleep(1)
if Dado == 1:
print("\033[32mVoce ira atacar primeiro!\033[m")
PreAtaq = str(input("\033[33mEscreva Atacar para atacar:\033[m "))
if PreAtaq == "Atacar" or PreAtaq == "atacar":
AtaqPersonagem = Espada
Zumbi -= AtaqPersonagem
print(
f"\033[33mInimigo =\033[m \033[31m{Zumbi}\033[m\n\033[33m{NomeDePersonagem}=\033[m \033[31m{Vida}\033[m")
if Zumbi <= 0:
print(f"\033[33mParabens {NomeDePersonagem} voce derrotou o primeiro inimigo !\033[m")
Nivel += 1
print(f"Personagem subiu de nivel + 1 || Nivel = {Nivel}")
break
if Dado == 2:
print("\033[31mO inimigo ira atacar primeiro!\033[m")
Vida -= AtaqIni
print(
f"\033[33m{NomeDePersonagem} =\033[m \033[31m{Vida}\033[m\n\033[33mInimigo =\033[m \033[31m{Zumbi}\033[m")
if Vida <= 0:
print("\033[31mQue pena :( voce perdeu, mas não se preocupe tente novamente\033[m")
RolarDados = input("\n\033[33mPressione enter para tentar novamente...\033[m")
break
break
# -------------------------------------------------------------------------------------------------------------------------------------------------------------
nova = input("VAMOS JOGAR ")
print("\033[32m-=-" * 54)
#-------------------------------------------------------------------------------------------------------------------------------------------------------------
|
# -*- coding: utf-8 -*-
"""Top-level package for tf."""
__author__ = """tf"""
__email__ = 'miffyrcee@outlook.com'
__version__ = '0.1.0'
|
#!/usr/bin/python
###############################################################################
# Calculates the global mean using 'fldmean' operation in the CDO
# linux command (using annual mean data)
# The resulting netcdf file contains a single time series
# (but lon, lat coordinate dimensions will still exist in the output file)
###############################################################################
import os
#import sys
#sys.path.append("./modules")
from clens import *
def global_mean(scen,run,v):
"""Calculates the global mean (time series) using CDO.
Input variables:
scen,run,v: strings indicating the scenario,
ensemble member run, and the variable name.
These variables are used to form the netcdf file names
that are processed with cdo.
"""
app="fldmean" # app is used in the output file name
cesmscen=TRANSLATE[scen]['scen']
cesmtime=TRANSLATE[scen]['time']
infile=MODEL+"_"+cesmscen+"_"+v+"_"+cesmtime+"_"+run+"_ann_ano.nc"
# OUTPATH: Input path and output path are the same.
outfile=MODEL+"_"+cesmscen+"_"+v+"_"+cesmtime+"_"+run+\
"_ann_"+app+".nc"
cdo="cdo -v fldmean "+\
OUTPATH+infile+" "+OUTPATH+outfile
print(cdo)
os.system(cdo)
print ("Infile: "+infile)
print ("Outfile:"+outfile)
print ("Folder: "+OUTPATH)
return
# Loop over scenarios
iscen=0
for scen in SCENARIOLIST:
nmodel=0
for run in ENSEMBLELIST:
i=0
for v in VARLIST:
global_mean(scen,run,v)
i+=1
nmodel+=1
print ("----------------------------------------------------------")
print ("stats for simulations "+scen+" : variable "+v)
print ("models: "+str(nmodel)+" variables: "+str(i))
iscen+=1
|
import itertools
import copy
import numpy as np
import scipy
import scipy.linalg
from functools import reduce
__all__ = ['ImplicitSurface',
'ImplicitCollection',
'ImplicitPlane',
'ImplicitSphere',
'ImplicitXAlignedCylinder',
'ImplicitEllipse',
'ImplicitIntersection',
'ImplicitUnion',
'ImplicitDifference',
'ImplicitComplement',
'GridMapBase',
'GridMap',
'GridSlip'
]
class ImplicitSurface(object):
def __init__(self):
pass
def __call__(self):
raise NotImplementedError('Must be implemented by subclass.')
def interior(self, grid, asarray=False):
val = self.__call__(grid)
if asarray:
retval = np.zeros_like(val)
retval[np.where(val < 0.0)] = 1.0
return retval
else:
return np.where(val < 0.0)
def exterior(self, grid, asarray=False):
val = self.__call__(grid)
if asarray:
retval = np.zeros_like(val)
retval[np.where(val >= 0.0)] = 1.0
return retval
else:
return np.where(val >= 0.0)
class ImplicitCollection(ImplicitSurface):
def __init__(self, *items):
if np.iterable(items[0]):
self.items = list(items[0])
else:
self.items = list(items)
def __call__(self):
raise NotImplementedError('Must be implemented by subclass.')
class ImplicitPlane(ImplicitSurface):
def __init__(self, p, n):
self.p = np.array(p)
self.n = np.array(n)
self.n = self.n/np.linalg.norm(self.n)
self.d = -np.dot(self.p,self.n)
def __call__(self, grid):
return self.d + reduce(lambda x,y:x+y, list(map(lambda x,y:x*y,self.n,grid)))
class ImplicitSphere(ImplicitSurface):
def __init__(self, c=None, r=1.0):
if c is None:
self.c = None
else:
self.c = np.array(c)
self.r = r
def __call__(self, grid):
if self.c is None:
c = np.zeros_like(grid[0].shape)
else:
c = self.c
return reduce(lambda x,y:x+y, list(map(lambda x,y:(y-x)**2,c,grid))) - self.r**2
class ImplicitXAlignedCylinder(ImplicitSurface):
def __init__(self, c=None, length=1.0, r=1.0):
if c is None:
self.c = None
else:
self.c = np.array(c)
self.len = length
self.r = r
def __call__(self, grid):
if self.c is None:
c = np.zeros_like(grid[0].shape)
else:
c = self.c
g = grid[1:]
cc = c[1:]
# longways = (grid[1]-c[1])**2 - self.r**2
longways = reduce(lambda x,y:x+y, list(map(lambda x,y:(y-x)**2,cc,g))) - self.r**2
cutoff = np.abs(grid[0] - c[0]) - self.len/2
return np.maximum(longways, cutoff)
class ImplicitEllipse(ImplicitSurface):
def __init__(self, c=None, a=None, r=1.0):
if c is None:
self.c = None
else:
self.c = np.array(c)
if a is None:
self.a = None
else:
self.a = np.array(a)
self.r = r
def __call__(self, grid):
if self.c is None:
c = np.zeros(len(grid))
else:
c = self.c
if self.a is None:
a = np.ones(len(grid))
else:
a = self.a
return reduce(lambda x,y:x+y, list(map(lambda x,y,z:(((y-x)**2)/z), c,grid,a)) ) - self.r**2
class ImplicitIntersection(ImplicitCollection):
def __init__(self, *items):
ImplicitCollection.__init__(self, *items)
def __call__(self, grid):
return reduce(lambda x,y: np.maximum(x,y), [x(grid) for x in self.items])
class ImplicitUnion(ImplicitCollection):
def __init__(self, *items):
ImplicitCollection.__init__(self, *items)
def __call__(self, grid):
return reduce(lambda x,y: np.minimum(x,y), [x(grid) for x in self.items])
class ImplicitDifference(ImplicitCollection):
# Maybe sometime, this should just take *items, and pop the first one for
# base. This way, we can allow a single list to be passed. For now, whatever.
def __init__(self, base, *items):
ImplicitCollection.__init__(self, *items)
self.base = base
def __call__(self, grid):
items = [self.base] + self.items
return reduce(lambda x,y: np.maximum(x,-y), [x(grid) for x in items])
class ImplicitComplement(ImplicitSurface):
def __init__(self, base):
self.base = base
def __call__(self, grid):
return -1.0*self.base(grid)
# These must be defined so that the equality is switched if the complement
# is the calling surface
def interior(self, grid, asarray=False):
val = self.__call__(grid)
if asarray:
retval = np.zeros_like(val)
retval[np.where(val <= 0.0)] = 1.0
return retval
else:
return np.where(val <= 0.0)
def exterior(self, grid, asarray=False):
val = self.__call__(grid)
if asarray:
retval = np.zeros_like(val)
retval[np.where(val > 0.0)] = 1.0
return retval
else:
return np.where(val > 0.0)
class GridMapBase(object):
def __init__(self):
pass
def __call__(self):
raise NotImplementedError('Must be implemented by subclass.')
class GridMap(GridMapBase):
def __init__(self, funcs):
self.funcs = funcs
def __call__(self, grid):
new_grid = []
for f,g in zip(self.funcs,grid):
if f is not None:
new_grid.append(f(g))
else:
new_grid.append(g)
return tuple(new_grid)
class GridSlip(GridMapBase):
# Creates a slip or a fault along the specifid plane
def __init__(self, p, n):
self.p = np.array(p)
self.n = np.array(n)
self.n = self.n/np.linalg.norm(self.n)
self.d = -np.dot(self.p,self.n)
self.basis = None
def __call__(self, grid, direction, amount):
# amount is a scalar
# direction is a vector that will be normalized
# the slip occurs along that vector's projection onto the plane, i.e., it is orthogonal to the normal
# the exterior of the slip plane (the part the normal points to) is the part that is modified.
d = np.array(direction)
d = d/np.linalg.norm(d)
dim = len(d)
if self.basis is None:
basis = [self.n]
# Gramm schmidt
while len(basis) != dim:
c = np.random.rand(3)
for b in basis:
c -= np.dot(b,c)*b
cn = np.linalg.norm(c)
if cn > 1e-4:
basis.append(c/cn)
self.basis = basis[1:]
# Project the slip direction onto the basis
proj=0
for b in self.basis:
proj += np.dot(d,b)*b
# Scale the projection
proj = amount*proj
# Evaluate the plane to figure out what components of the old grid are shifted.
val = self.d + reduce(lambda x,y:x+y, list(map(lambda x,y:x*y,self.n,grid)))
loc = np.where(val >= 0.0)
# Perform the shift.
new_grid = [copy.deepcopy(g) for g in grid]
for ng,p in zip(new_grid,proj):
ng[loc] -=p
return tuple(new_grid)
class Weird(ImplicitSurface):
def __init__(self, p,n, c=None, r=1.0):
if c is None:
self.c = None
else:
self.c = np.array(c)
self.r = r
self.p = np.array(p)
self.n = np.array(n)
self.n = self.n/np.linalg.norm(self.n)
self.d = -np.dot(self.p,self.n)
def __call__(self, grid):
if self.c is None:
c = np.zeros_like(grid[0].shape)
else:
c = self.c
# Creates flat eye thingies
# val1 = reduce(lambda x,y:x+y, map(lambda x,y:(y-x)**2,c,grid)) - self.r**2
#
# val2 = self.d + reduce(lambda x,y:x+y, map(lambda x,y:x*y,self.n,grid))
#
# return 4*val1/np.max(val1)+val2
val1 = reduce(lambda x,y:x+y, list(map(lambda x,y:(y-x)**2,c,grid))) - self.r**2
val2 = self.d + reduce(lambda x,y:x+y, list(map(lambda x,y:x*y,self.n,grid)))
return 4*val1/np.max(val1)+val2
class Hyperbola(ImplicitSurface):
def __init__(self, c=None, r=1.0):
if c is None:
self.c = None
else:
self.c = np.array(c)
self.r = r
def __call__(self, grid):
if self.c is None:
c = np.zeros_like(grid[0].shape)
else:
c = self.c
return reduce(lambda x,y:-x+y, list(map(lambda x,y:(y-x)**2,c,grid))) - self.r**2
class Weird2(ImplicitSurface):
def __init__(self, c=None, r=1.0, s=None):
if c is None:
self.c = None
else:
self.c = np.array(c)
if s is None:
self.s = None
else:
self.s = np.array(s)
self.r = r
def __call__(self, grid):
c = np.zeros_like(grid[0].shape) if self.c is None else self.c
s = np.ones_like(grid[0].shape) if self.s is None else self.s
return ((grid[0]-c[0])**2)/s[0] + ((grid[1]-c[1])**1)/s[1] - self.r**2
#reduce(lambda x,y:-x+y, map(lambda x,y:(y-x)**2,c,grid)) - self.r**2
# FIXME
#if __name__ == "__main__":
#
# import numpy as np
# import matplotlib.pyplot as mpl
#
# from pysit import *
#
# x_lbc = PML(0.0, 100.0)
# x_rbc = PML(0.0, 100.0)
# z_lbc = PML(0.0, 100.0)
# z_rbc = PML(0.0, 100.0)
#
# xmin, xmax = 0.0, 20
# zmin, zmax = 0.0, 10
# nx, nz = 500,250
#
# x_config = (xmin, xmax, nx, x_lbc, x_rbc)
# z_config = (zmin, zmax, nz, z_lbc, z_rbc)
#
# d = Domain((x_config, z_config))
#
# grid = d.generate_grid()
# grid = grid[0].reshape(d.shape).T, grid[1].reshape(d.shape).T
#
# p1 = ImplicitPlane((0.0,4.0),(0.0,-1.0))
# p2 = ImplicitPlane((0.0,6.0),(0.0,1.0))
#
# w1 = Weird((0.0,5.0),(0.0,-1.0), (10,5))
# w2 = Weird((0.0,6.0),(0.0,1.0), (10,5))
#
# w3 = Weird2((10,4),1.5,(-100.0,1.))
# w4 = Weird2((10,4),0.5,( 100.0,1.))
#
# band = ImplicitIntersection(w1,w2)
# band = ImplicitIntersection(p1,p2)
# band = ImplicitDifference(w3,w4)
#
# g = GridSlip((10.0,5.0), (1,0.5))
# d = (0.0,-1.0)
# a = 0.5
# new_grid = g(grid,d,a)
#
#
# plt.figure()
# plt.imshow(band(grid))
# plt.colorbar()
#
# plt.figure()
# plt.imshow(band(new_grid))
# plt.colorbar()
#
# plt.figure()
# plt.imshow(band.interior(grid))
# plt.colorbar()
#
# plt.figure()
# plt.imshow(band.interior(new_grid))
# plt.colorbar()
#
# plt.show()
|
import sys
from pol.server import Server
from settings import DATABASES, SNAPSHOT_DIR, DOWNLOADER_USER_AGENT, DEBUG
port = sys.argv[1] if len(sys.argv) >= 2 else 1234
Server(port, DATABASES['default'], SNAPSHOT_DIR, DOWNLOADER_USER_AGENT).run()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['AppSecReputationProfileArgs', 'AppSecReputationProfile']
@pulumi.input_type
class AppSecReputationProfileArgs:
def __init__(__self__, *,
config_id: pulumi.Input[int],
reputation_profile: pulumi.Input[str]):
"""
The set of arguments for constructing a AppSecReputationProfile resource.
:param pulumi.Input[int] config_id: . Unique identifier of the security configuration associated with the reputation profile being modified.
:param pulumi.Input[str] reputation_profile: . Path to a JSON file containing a definition of the reputation profile. You can view a sample JSON file in the [Create a reputation profile](https://developer.akamai.com/api/cloud_security/application_security/v1.html#postreputationprofiles) section of the Application Security API documentation.
"""
pulumi.set(__self__, "config_id", config_id)
pulumi.set(__self__, "reputation_profile", reputation_profile)
@property
@pulumi.getter(name="configId")
def config_id(self) -> pulumi.Input[int]:
"""
. Unique identifier of the security configuration associated with the reputation profile being modified.
"""
return pulumi.get(self, "config_id")
@config_id.setter
def config_id(self, value: pulumi.Input[int]):
pulumi.set(self, "config_id", value)
@property
@pulumi.getter(name="reputationProfile")
def reputation_profile(self) -> pulumi.Input[str]:
"""
. Path to a JSON file containing a definition of the reputation profile. You can view a sample JSON file in the [Create a reputation profile](https://developer.akamai.com/api/cloud_security/application_security/v1.html#postreputationprofiles) section of the Application Security API documentation.
"""
return pulumi.get(self, "reputation_profile")
@reputation_profile.setter
def reputation_profile(self, value: pulumi.Input[str]):
pulumi.set(self, "reputation_profile", value)
@pulumi.input_type
class _AppSecReputationProfileState:
def __init__(__self__, *,
config_id: Optional[pulumi.Input[int]] = None,
reputation_profile: Optional[pulumi.Input[str]] = None,
reputation_profile_id: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering AppSecReputationProfile resources.
:param pulumi.Input[int] config_id: . Unique identifier of the security configuration associated with the reputation profile being modified.
:param pulumi.Input[str] reputation_profile: . Path to a JSON file containing a definition of the reputation profile. You can view a sample JSON file in the [Create a reputation profile](https://developer.akamai.com/api/cloud_security/application_security/v1.html#postreputationprofiles) section of the Application Security API documentation.
"""
if config_id is not None:
pulumi.set(__self__, "config_id", config_id)
if reputation_profile is not None:
pulumi.set(__self__, "reputation_profile", reputation_profile)
if reputation_profile_id is not None:
pulumi.set(__self__, "reputation_profile_id", reputation_profile_id)
@property
@pulumi.getter(name="configId")
def config_id(self) -> Optional[pulumi.Input[int]]:
"""
. Unique identifier of the security configuration associated with the reputation profile being modified.
"""
return pulumi.get(self, "config_id")
@config_id.setter
def config_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "config_id", value)
@property
@pulumi.getter(name="reputationProfile")
def reputation_profile(self) -> Optional[pulumi.Input[str]]:
"""
. Path to a JSON file containing a definition of the reputation profile. You can view a sample JSON file in the [Create a reputation profile](https://developer.akamai.com/api/cloud_security/application_security/v1.html#postreputationprofiles) section of the Application Security API documentation.
"""
return pulumi.get(self, "reputation_profile")
@reputation_profile.setter
def reputation_profile(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reputation_profile", value)
@property
@pulumi.getter(name="reputationProfileId")
def reputation_profile_id(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "reputation_profile_id")
@reputation_profile_id.setter
def reputation_profile_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "reputation_profile_id", value)
class AppSecReputationProfile(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config_id: Optional[pulumi.Input[int]] = None,
reputation_profile: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
**Scopes**: Security policy
Creates or modifies a reputation profile.
Reputation profiles grade the security risk of an IP address based on previous activities associated with that address.
Depending on the reputation score and how your configuration has been set up, requests from a specific IP address can trigger an alert or even be blocked.
**Related API Endpoint**: [/appsec/v1/configs/{configId}/versions/{versionNumber}/reputation-profiles](https://developer.akamai.com/api/cloud_security/application_security/v1.html#postreputationprofiles)
## Example Usage
Basic usage:
```python
import pulumi
import pulumi_akamai as akamai
configuration = akamai.get_app_sec_configuration(name="Documentation")
reputation_profile = akamai.AppSecReputationProfile("reputationProfile",
config_id=configuration.config_id,
reputation_profile=(lambda path: open(path).read())(f"{path['module']}/reputation_profile.json"))
pulumi.export("reputationProfileId", reputation_profile.reputation_profile_id)
```
## Output Options
The following options can be used to determine the information returned, and how that returned information is formatted:
- `reputation_profile_id`. ID of the newly-created or newly-modified reputation profile.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] config_id: . Unique identifier of the security configuration associated with the reputation profile being modified.
:param pulumi.Input[str] reputation_profile: . Path to a JSON file containing a definition of the reputation profile. You can view a sample JSON file in the [Create a reputation profile](https://developer.akamai.com/api/cloud_security/application_security/v1.html#postreputationprofiles) section of the Application Security API documentation.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AppSecReputationProfileArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
**Scopes**: Security policy
Creates or modifies a reputation profile.
Reputation profiles grade the security risk of an IP address based on previous activities associated with that address.
Depending on the reputation score and how your configuration has been set up, requests from a specific IP address can trigger an alert or even be blocked.
**Related API Endpoint**: [/appsec/v1/configs/{configId}/versions/{versionNumber}/reputation-profiles](https://developer.akamai.com/api/cloud_security/application_security/v1.html#postreputationprofiles)
## Example Usage
Basic usage:
```python
import pulumi
import pulumi_akamai as akamai
configuration = akamai.get_app_sec_configuration(name="Documentation")
reputation_profile = akamai.AppSecReputationProfile("reputationProfile",
config_id=configuration.config_id,
reputation_profile=(lambda path: open(path).read())(f"{path['module']}/reputation_profile.json"))
pulumi.export("reputationProfileId", reputation_profile.reputation_profile_id)
```
## Output Options
The following options can be used to determine the information returned, and how that returned information is formatted:
- `reputation_profile_id`. ID of the newly-created or newly-modified reputation profile.
:param str resource_name: The name of the resource.
:param AppSecReputationProfileArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AppSecReputationProfileArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config_id: Optional[pulumi.Input[int]] = None,
reputation_profile: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AppSecReputationProfileArgs.__new__(AppSecReputationProfileArgs)
if config_id is None and not opts.urn:
raise TypeError("Missing required property 'config_id'")
__props__.__dict__["config_id"] = config_id
if reputation_profile is None and not opts.urn:
raise TypeError("Missing required property 'reputation_profile'")
__props__.__dict__["reputation_profile"] = reputation_profile
__props__.__dict__["reputation_profile_id"] = None
super(AppSecReputationProfile, __self__).__init__(
'akamai:index/appSecReputationProfile:AppSecReputationProfile',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
config_id: Optional[pulumi.Input[int]] = None,
reputation_profile: Optional[pulumi.Input[str]] = None,
reputation_profile_id: Optional[pulumi.Input[int]] = None) -> 'AppSecReputationProfile':
"""
Get an existing AppSecReputationProfile resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] config_id: . Unique identifier of the security configuration associated with the reputation profile being modified.
:param pulumi.Input[str] reputation_profile: . Path to a JSON file containing a definition of the reputation profile. You can view a sample JSON file in the [Create a reputation profile](https://developer.akamai.com/api/cloud_security/application_security/v1.html#postreputationprofiles) section of the Application Security API documentation.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AppSecReputationProfileState.__new__(_AppSecReputationProfileState)
__props__.__dict__["config_id"] = config_id
__props__.__dict__["reputation_profile"] = reputation_profile
__props__.__dict__["reputation_profile_id"] = reputation_profile_id
return AppSecReputationProfile(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="configId")
def config_id(self) -> pulumi.Output[int]:
"""
. Unique identifier of the security configuration associated with the reputation profile being modified.
"""
return pulumi.get(self, "config_id")
@property
@pulumi.getter(name="reputationProfile")
def reputation_profile(self) -> pulumi.Output[str]:
"""
. Path to a JSON file containing a definition of the reputation profile. You can view a sample JSON file in the [Create a reputation profile](https://developer.akamai.com/api/cloud_security/application_security/v1.html#postreputationprofiles) section of the Application Security API documentation.
"""
return pulumi.get(self, "reputation_profile")
@property
@pulumi.getter(name="reputationProfileId")
def reputation_profile_id(self) -> pulumi.Output[int]:
return pulumi.get(self, "reputation_profile_id")
|
import random
def bubble(items):
for t in range(len(items)-1):
for i in range(len(items)-1):
if items[i] > items[i+1]:
items[i], items[i+1] = items[i+1], items[i]
def reverseBubble(items):
for t in range(len(items)-1):
for i in range(len(items)-1):
if items[i] < items[i+1]:
items[i], items[i+1] = items[i+1], items[i]
def mergeSort(items):
if len(items) < 2:
return None
mid = int(len(items)/2)
left = []
right = []
for i in range(mid):
left.append(items[i])
for i in range(mid, len(items)):
right.append(items[i])
mergeSort(left)
mergeSort(right)
#zipper
L = 0
R = 0
M = 0
while L < len(left) and R < len(right):
if left[L] < right[R]:
items[M] = left[L]
L += 1
M += 1
else:
items[M] = right[R]
R += 1
M += 1
while L < len(left):
items[M] = left[L]
L += 1
M += 1
while R < len(right):
items[M] = right[R]
R += 1
M += 1
def cupidShuffle(items):
for i in range(len(items)):
index = random.randint(i, len(items)-1)
items[i], items[index] = items[index], items[i]
|
"""
Pytorch dataset class and collate function for building the dataloader
"""
import torch
import bcolz
import re, collections
import numpy as np
import utils
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pack_padded_sequence
def pad_sequence(sequences, batch_first=False):
r"""
Note:
This function returns a Tensor of size ``T x B x *`` or ``B x T x *`` where `T` is the
length of longest sequence.
Function assumes trailing dimensions and type of all the Tensors
in sequences are same.
Arguments:
sequences (list[Tensor]): list of variable length sequences.
batch_first (bool, optional): output will be in ``B x T x *`` if True, or in
``T x B x *`` otherwise
padding_value (float, optional): value for padded elements.
Returns:
Tensor of size ``T x B x *`` if batch_first is False
Tensor of size ``B x T x *`` otherwise
"""
# sort the sequences from largest to smallest length
lengths = np.array([sequence.size(0) for sequence in sequences])
order = np.argsort(lengths)[::-1]
# sorted_seqs = sorted(sequences, key=lambda x: len(x), reverse=True)
# use size of largest sequence
max_size = sequences[order[0]].size()
max_len, trailing_dims = max_size[0], max_size[1:]
if batch_first:
out_dims = (len(sequences), max_len) + trailing_dims
else:
out_dims = (max_len, len(sequences)) + trailing_dims
# create an empty tensor of the desired output size
# use negative one to prevent errors when using embeddings
# if trailing_dims:
out_tensor = torch.zeros(out_dims)
# else:
# out_tensor = 20*torch.ones(out_dims)
for i, tensor in enumerate(sequences):
length = tensor.size(0)
if batch_first:
out_tensor[i, :length, ...] = tensor
else:
out_tensor[:length, i, ...] = tensor
return out_tensor
def sequence_collate(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 1, out=out).float()
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
sequences = [torch.from_numpy(b).float() for b in batch]
padded = pad_sequence(sequences)
return padded
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], str):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: sequence_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [sequence_collate(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
class ProteinDataset(Dataset):
def __init__(self, bcolz_path, encoding, indices=None):
"""encoding has 3 states, None, onehot, and tokens"""
self.encoding = encoding
# TODO: in the future, when using multiple different protein chain lengths
# try using pytorch ConcatDataset class
if indices is None:
self.data_array = bcolz.carray(rootdir=bcolz_path)
else:
self.data_array = bcolz.carray(rootdir=bcolz_path)[indices]
def __len__(self):
return len(self.data_array)
def __getitem__(self, idx):
name, sequence, coords = self.data_array[idx]
length = len(sequence[0])
sequence_vec = sequence
if self.encoding == 'onehot':
sequence_vec = utils.encode_sequence(sequence, onehot=True)
elif self.encoding == 'tokens':
sequence_vec = utils.encode_sequence(sequence, onehot=False)
sample = {'name': name,
'sequence': sequence_vec,
'coords': coords,
'length': length}
return sample
class ProteinNet(Dataset):
def __init__(self, bcolz_path):
self.data_array = bcolz.carray(rootdir=bcolz_path)
def __len__(self):
return len(self.data_array)
def __getitem__(self, idx):
name, sequence, pssm, coords, mask = self.data_array[idx]
length = len(sequence)
sequence_vec = utils.encode_sequence(sequence, onehot=True)
seq_pssm = np.concatenate([sequence_vec, pssm], axis=1)
sample = {'name': name,
'sequence': seq_pssm,
'coords': coords,
'length': length,
'mask': mask
}
return sample
|
"""
Euler Problem #24
A permutation is an ordered arrangement of objects.
For example, 3124 is one possible permutation of the digits 1, 2, 3 and 4.
If all of the permutations are listed numerically or alphabetically,
we call it lexicographic order. The lexicographic permutations of
0, 1 and 2 are:
012 021 102 120 201 210
What is the millionth lexicographic permutation of the digits
0, 1, 2, 3, 4, 5, 6, 7, 8 and 9?
"""
from permutation import permute
for i, perm in enumerate(permute("0123456789"), 1):
if i == 1000000:
print(perm)
break
|
import sys
import json
import attr
import os
import logging
from galaxy_importer import collection
from galaxy_importer import config
def main():
filepath = sys.argv[1]
savepath = sys.argv[2]
logging.basicConfig(
stream=sys.stdout,
format='%(levelname)s: %(message)s',
level=logging.INFO)
config_data = config.ConfigFile.load()
cfg = config.Config(config_data=config_data)
# Modified importer to importer directory instead of tarball
data = collection.CollectionLoader(
filepath, filepath, cfg=cfg, logger=logging).load()
json_data = json.dumps(attr.asdict(data), indent=2)
with open(os.path.join(savepath), 'w+') as output_file:
output_file.write(json_data)
main()
|
import threading, socket, sys, logging
fmt = '%(asctime)-16s %(levelname)-8s %(name)-30s %(message)s'
dtfmt = '%Y-%m-%d %H:%M'
logging.basicConfig(format=fmt, datefmt=dtfmt)
def getcustomlogger(name):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
return logger
## required to allow the sockets to work on 2 and 3
_enc = lambda x: str(x)
_dec = lambda x: x
if sys.version_info[0] == 3:
_enc = lambda x: bytes(x, 'utf-8')
_dec = lambda x: str(x, 'utf-8')
## pasted, because it's easier than writing this and importing it.
## TODO: add async
def socket_listen(port, action, shutdown = None, consume_size = 16, shutdown_command = None, async = False):
socketslogger = getcustomlogger("socket server handler")
''' listen on a TCP port
port: the numerical port to listen on
action: a function with prototype `action(conn, data)`
'''
if not shutdown_command:
shutdown_command = _enc("!X")
def start():
socketslogger.warn("starting server on port %s", port)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Fixes 'TIME_WAIT' issue
socketslogger.debug("socket acquired %s", sock)
sock.bind(('', int(port)))
socketslogger.debug("socket bound to port %s", port)
backlog = 0
sock.listen(backlog)
socketslogger.debug("socket listening, allowing %s backlogged connections", backlog)
while True:
conn, addr = sock.accept()
socketslogger.info("socket connection accepted %s", addr)
try:
while True:
data = conn.recv(consume_size).strip()
socketslogger.debug("socket is listening on port %s", port)
if data:
if (data == shutdown_command):
conn.send(_enc('shutting down\n'))
return
action(conn, _dec(data))
else:
break
conn.send(_enc('command success\n'))
except BaseException as x:
conn.send(_enc('command failure: %s\n', type(x).__name__))
raise x
finally:
conn.close()
socketslogger.debug("closed connection %s", addr)
finally:
sock.close()
if shutdown: shutdown()
socketslogger.warn("shutting down server")
if not async:
socketslogger.warn("starting server in synchronous mode")
start()
else:
socketslogger.warn("starting server in asynchronous mode")
threading.Thread(target=start, args=(), name="server_thread", daemon = True).start()
def socket_send(port, content):
socketslogger = getcustomlogger("socket client handler")
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('localhost', port))
socketslogger.debug("Connected to port %s", port)
sock.sendall(content.encode())
socketslogger.debug("sent '%s'", content.encode())
sock.shutdown(socket.SHUT_WR)
while True:
data = sock.recv(4096)
if not data:
break
socketslogger.info("recieved: '%s'", _dec(data.strip()))
finally:
sock.close()
socketslogger.debug("connection closed")
|
#!/usr/bin/env python3
import sys
output_dict = {}
def handle_data(arg):
a = arg.split(':')
output_dict[a[0]] = a[1]
return output_dict
def print_data(key,output_dict):
print("ID:{} Name:{}".format(key,output_dict))
if __name__=='__main__':
for arg in sys.argv[1:]:
handle_data(arg)
for key in output_dict:
print_data(key,output_dict[key])
|
from django.contrib import admin
from arescaler.models import *
class ItemAdmin(admin.ModelAdmin):
list_display = ["name", "size"]
admin.site.register(Item, ItemAdmin)
|
import setpath
import functions
import json
import ast
registered=True
class formattreetotableoutput(functions.vtable.vtbase.VT):
def VTiter(self, *parsedArgs,**envars):
largs, dictargs = self.full_parse(parsedArgs)
if 'query' not in dictargs:
raise functions.OperatorError(__name__.rsplit('.')[-1],"No query argument ")
query = dictargs['query']
cur = envars['db'].cursor()
c=cur.execute(query)
schema = cur.getdescriptionsafe()
resulttable = []
for myrow in c: # assume that it is ordered by nodeno
level = int(myrow[0]) #currentnode
nodestoinsert = ast.literal_eval(myrow[1]) #colname,colval,nextnode,leafval
set1=[]
set2=[]
for node in nodestoinsert:
if node[3]=="":
set1.append(node)
set1 = sorted(set1, key=lambda (colname,colval,nextnode,leafval):(colval,leafval))
else :
set2.append(node)
set2 = sorted(set2, key=lambda (colname,colval,nextnode,leafval):(colval,nextnode))
# print "AA",nodestoinsert
# print "Set1",set1
# print "Set2",set2
nodestoinsert= set2 + set1
# print "BB",nodestoinsert
# print nodestoinsert
indexi = 0
levelsi = 0
for i in xrange(len(resulttable)):
if str(resulttable[i][4])== str(level):
indexi = i
levelsi= int(resulttable[i][1])+ 1
for i in xrange(len(nodestoinsert)):
resulttable.insert(i + indexi+1, [level,levelsi,nodestoinsert[i][0],nodestoinsert[i][1],nodestoinsert[i][2],nodestoinsert[i][3]])
yield [('no',),('result',),]
# print resulttable
for i in xrange(len(resulttable)):
result =""
for j in xrange(resulttable[i][1]):
result+="| "
result=result + resulttable[i][2] + "=" + resulttable[i][3]
if resulttable[i][5]!="":
result+=":"+resulttable[i][5]
yield i,result
# formattreetotableoutput select currentnode as nodeno ,jgroup(jpack(colname||"="||colval,nextnode,leafval)) as nodeinfo from mytree group by currentnode ;
def Source():
return functions.vtable.vtbase.VTGenerator(formattreetotableoutput)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.tes
|
#!/usr/bin/env python
# coding=utf-8
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import re
import datetime
import unittest
import pytz
import uuid
from datetime import timedelta
from lxml import etree
from spyne.util import total_seconds
from spyne.const import xml_ns as ns
from spyne.model import Null, AnyDict, Uuid
from spyne.model.complex import Array
from spyne.model.complex import ComplexModel
from spyne.model.primitive import Date
from spyne.model.primitive import Time
from spyne.model.primitive import Boolean
from spyne.model.primitive import DateTime
from spyne.model.primitive import Duration
from spyne.model.primitive import Float
from spyne.model.primitive import Integer
from spyne.model.primitive import UnsignedInteger
from spyne.model.primitive import Unicode
from spyne.model.primitive import String
from spyne.model.primitive import Decimal
from spyne.protocol import ProtocolBase
from spyne.protocol.xml import XmlDocument
ns_test = 'test_namespace'
from spyne.model import ModelBase
class TestPrimitive(unittest.TestCase):
def test_nillable_quirks(self):
assert ModelBase.Attributes.nillable == True
class Attributes(ModelBase.Attributes):
nillable = False
nullable = False
assert Attributes.nillable == False
assert Attributes.nullable == False
class Attributes(ModelBase.Attributes):
nillable = True
assert Attributes.nillable == True
assert Attributes.nullable == True
class Attributes(ModelBase.Attributes):
nillable = False
assert Attributes.nillable == False
assert Attributes.nullable == False
class Attributes(ModelBase.Attributes):
nullable = True
assert Attributes.nillable == True
assert Attributes.nullable == True
class Attributes(ModelBase.Attributes):
nullable = False
assert Attributes.nillable == False
assert Attributes.nullable == False
class Attributes(ModelBase.Attributes):
nullable = False
class Attributes(Attributes):
pass
assert Attributes.nullable == False
def test_nillable_inheritance_quirks(self):
class Attributes(ModelBase.Attributes):
nullable = False
class AttrMixin:
pass
class NewAttributes(Attributes, AttrMixin):
pass
assert NewAttributes.nullable is False
class AttrMixin:
pass
class NewAttributes(AttrMixin, Attributes):
pass
assert NewAttributes.nullable is False
def test_decimal(self):
assert Decimal(10,4).Attributes.total_digits == 10
assert Decimal(10,4).Attributes.fraction_digits == 4
def test_decimal_format(self):
f = 123456
str_format='${0}'
element = etree.Element('test')
XmlDocument().to_parent(None, Decimal(str_format=str_format), f, element, ns_test)
element = element[0]
self.assertEquals(element.text, '$123456')
def test_string(self):
s = String()
element = etree.Element('test')
XmlDocument().to_parent(None, String, 'value', element, ns_test)
element=element[0]
self.assertEquals(element.text, 'value')
value = XmlDocument().from_element(None, String, element)
self.assertEquals(value, 'value')
def test_datetime(self):
n = datetime.datetime.now(pytz.utc)
element = etree.Element('test')
XmlDocument().to_parent(None, DateTime, n, element, ns_test)
element = element[0]
self.assertEquals(element.text, n.isoformat())
dt = XmlDocument().from_element(None, DateTime, element)
self.assertEquals(n, dt)
def test_datetime_format(self):
n = datetime.datetime.now().replace(microsecond=0)
format = "%Y %m %d %H %M %S"
element = etree.Element('test')
XmlDocument().to_parent(None, DateTime(format=format), n, element, ns_test)
element = element[0]
assert element.text == datetime.datetime.strftime(n, format)
dt = XmlDocument().from_element(None, DateTime(format=format), element)
assert n == dt
def test_date_format(self):
t = datetime.date.today()
format = "%Y %m %d"
element = etree.Element('test')
XmlDocument().to_parent(None, Date(format=format), t, element, ns_test)
assert element[0].text == datetime.date.strftime(t, format)
dt = XmlDocument().from_element(None, Date(format=format), element[0])
assert t == dt
def test_datetime_timezone(self):
import pytz
n = datetime.datetime.now(pytz.timezone('EST'))
element = etree.Element('test')
cls = DateTime(as_timezone=pytz.utc, timezone=False)
XmlDocument().to_parent(None, cls, n, element, ns_test)
element = element[0]
c = n.astimezone(pytz.utc).replace(tzinfo=None)
self.assertEquals(element.text, c.isoformat())
dt = XmlDocument().from_element(None, cls, element)
assert dt.tzinfo is not None
dt = dt.replace(tzinfo=None)
self.assertEquals(c, dt)
def test_date_timezone(self):
elt = etree.Element('wot')
elt.text = '2013-08-09+02:00'
dt = XmlDocument().from_element(None, Date, elt)
print("ok without validation.")
dt = XmlDocument(validator='soft').from_element(None, Date, elt)
print(dt)
def test_time(self):
n = datetime.time(1, 2, 3, 4)
ret = ProtocolBase().to_string(Time, n)
self.assertEquals(ret, n.isoformat())
dt = ProtocolBase().from_string(Time, ret)
self.assertEquals(n, dt)
def test_date(self):
n = datetime.date(2011,12,13)
ret = ProtocolBase().to_string(Date, n)
self.assertEquals(ret, n.isoformat())
dt = ProtocolBase().from_string(Date, ret)
self.assertEquals(n, dt)
def test_utcdatetime(self):
datestring = '2007-05-15T13:40:44Z'
e = etree.Element('test')
e.text = datestring
dt = XmlDocument().from_element(None, DateTime, e)
self.assertEquals(dt.year, 2007)
self.assertEquals(dt.month, 5)
self.assertEquals(dt.day, 15)
datestring = '2007-05-15T13:40:44.003Z'
e = etree.Element('test')
e.text = datestring
dt = XmlDocument().from_element(None, DateTime, e)
self.assertEquals(dt.year, 2007)
self.assertEquals(dt.month, 5)
self.assertEquals(dt.day, 15)
def test_integer(self):
i = 12
integer = Integer()
element = etree.Element('test')
XmlDocument().to_parent(None, Integer, i, element, ns_test)
element = element[0]
self.assertEquals(element.text, '12')
value = XmlDocument().from_element(None, integer, element)
self.assertEquals(value, i)
def test_limits(self):
try:
ProtocolBase().from_string(Integer, "1" * (Integer.__max_str_len__ + 1))
except:
pass
else:
raise Exception("must fail.")
ProtocolBase().from_string(UnsignedInteger, "-1") # This is not supposed to fail.
try:
UnsignedInteger.validate_native(-1) # This is supposed to fail.
except:
pass
else:
raise Exception("must fail.")
def test_large_integer(self):
i = 128375873458473
integer = Integer()
element = etree.Element('test')
XmlDocument().to_parent(None, Integer, i, element, ns_test)
element = element[0]
self.assertEquals(element.text, '128375873458473')
value = XmlDocument().from_element(None, integer, element)
self.assertEquals(value, i)
def test_float(self):
f = 1.22255645
element = etree.Element('test')
XmlDocument().to_parent(None, Float, f, element, ns_test)
element = element[0]
self.assertEquals(element.text, repr(f))
f2 = XmlDocument().from_element(None, Float, element)
self.assertEquals(f2, f)
def test_array(self):
type = Array(String)
type.resolve_namespace(type, "zbank")
values = ['a', 'b', 'c', 'd', 'e', 'f']
element = etree.Element('test')
XmlDocument().to_parent(None, type, values, element, ns_test)
element = element[0]
self.assertEquals(len(values), len(element.getchildren()))
values2 = XmlDocument().from_element(None, type, element)
self.assertEquals(values[3], values2[3])
def test_array_empty(self):
type = Array(String)
type.resolve_namespace(type, "zbank")
values = []
element = etree.Element('test')
XmlDocument().to_parent(None, type, values, element, ns_test)
element = element[0]
self.assertEquals(len(values), len(element.getchildren()))
values2 = XmlDocument().from_element(None, type, element)
self.assertEquals(len(values2), 0)
def test_unicode(self):
s = u'\x34\x55\x65\x34'
self.assertEquals(4, len(s))
element = etree.Element('test')
XmlDocument().to_parent(None, String, s, element, 'test_ns')
element = element[0]
value = XmlDocument().from_element(None, String, element)
self.assertEquals(value, s)
def test_unicode_pattern_mult_cust(self):
assert Unicode(pattern='a').Attributes.pattern == 'a'
assert Unicode(pattern='a')(5).Attributes.pattern == 'a'
def test_unicode_upattern(self):
patt = r'[\w .-]+'
attr = Unicode(unicode_pattern=patt).Attributes
assert attr.pattern == patt
assert attr._pattern_re.flags & re.UNICODE
assert attr._pattern_re.match(u"Ğ Ğ ç .-")
assert attr._pattern_re.match(u"\t") is None
def test_unicode_nullable_mult_cust_false(self):
assert Unicode(nullable=False).Attributes.nullable == False
assert Unicode(nullable=False)(5).Attributes.nullable == False
def test_unicode_nullable_mult_cust_true(self):
assert Unicode(nullable=True).Attributes.nullable == True
assert Unicode(nullable=True)(5).Attributes.nullable == True
def test_null(self):
element = etree.Element('test')
XmlDocument().to_parent(None, Null, None, element, ns_test)
print(etree.tostring(element))
element = element[0]
self.assertTrue( bool(element.attrib.get('{%s}nil' % ns.xsi)) )
value = XmlDocument().from_element(None, Null, element)
self.assertEquals(None, value)
def test_point(self):
from spyne.model.primitive.geo import _get_point_pattern
a=re.compile(_get_point_pattern(2))
assert a.match('POINT (10 40)') is not None
assert a.match('POINT(10 40)') is not None
assert a.match('POINT(10.0 40)') is not None
assert a.match('POINT(1.310e4 40)') is not None
def test_multipoint(self):
from spyne.model.primitive.geo import _get_multipoint_pattern
a=re.compile(_get_multipoint_pattern(2))
assert a.match('MULTIPOINT (10 40, 40 30, 20 20, 30 10)') is not None
# FIXME:
#assert a.match('MULTIPOINT ((10 40), (40 30), (20 20), (30 10))') is not None
def test_linestring(self):
from spyne.model.primitive.geo import _get_linestring_pattern
a=re.compile(_get_linestring_pattern(2))
assert a.match('LINESTRING (30 10, 10 30, 40 40)') is not None
def test_multilinestring(self):
from spyne.model.primitive.geo import _get_multilinestring_pattern
a=re.compile(_get_multilinestring_pattern(2))
assert a.match('''MULTILINESTRING ((10 10, 20 20, 10 40),
(40 40, 30 30, 40 20, 30 10))''') is not None
def test_polygon(self):
from spyne.model.primitive.geo import _get_polygon_pattern
a=re.compile(_get_polygon_pattern(2))
assert a.match('POLYGON ((30 10, 10 20, 20 40, 40 40, 30 10))') is not None
def test_multipolygon(self):
from spyne.model.primitive.geo import _get_multipolygon_pattern
a=re.compile(_get_multipolygon_pattern(2))
assert a.match('''MULTIPOLYGON (((30 20, 10 40, 45 40, 30 20)),
((15 5, 40 10, 10 20, 5 10, 15 5)))''') is not None
assert a.match('''MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)),
((20 35, 45 20, 30 5, 10 10, 10 30, 20 35),
(30 20, 20 25, 20 15, 30 20)))''') is not None
def test_boolean(self):
b = etree.Element('test')
XmlDocument().to_parent(None, Boolean, True, b, ns_test)
b = b[0]
self.assertEquals('true', b.text)
b = etree.Element('test')
XmlDocument().to_parent(None, Boolean, 0, b, ns_test)
b = b[0]
self.assertEquals('false', b.text)
b = etree.Element('test')
XmlDocument().to_parent(None, Boolean, 1, b, ns_test)
b = b[0]
self.assertEquals('true', b.text)
b = XmlDocument().from_element(None, Boolean, b)
self.assertEquals(b, True)
b = etree.Element('test')
XmlDocument().to_parent(None, Boolean, False, b, ns_test)
b = b[0]
self.assertEquals('false', b.text)
b = XmlDocument().from_element(None, Boolean, b)
self.assertEquals(b, False)
b = etree.Element('test')
XmlDocument().to_parent(None, Boolean, None, b, ns_test)
b = b[0]
self.assertEquals('true', b.get('{%s}nil' % ns.xsi))
b = XmlDocument().from_element(None, Boolean, b)
self.assertEquals(b, None)
def test_new_type(self):
"""Customized primitives go into namespace based on module name."""
custom_type = Unicode(pattern='123')
self.assertEqual(custom_type.get_namespace(), custom_type.__module__)
def test_default_nullable(self):
"""Test if default nullable changes nullable attribute."""
try:
self.assertTrue(Unicode.Attributes.nullable)
orig_default = Unicode.Attributes.NULLABLE_DEFAULT
Unicode.Attributes.NULLABLE_DEFAULT = False
self.assertFalse(Unicode.Attributes.nullable)
self.assertFalse(Unicode.Attributes.nillable)
finally:
Unicode.Attributes.NULLABLE_DEFAULT = orig_default
self.assertEqual(Unicode.Attributes.nullable, orig_default)
def test_simple_type_explicit_customization(self):
assert Unicode(max_len=5).__extends__ is not None
assert Unicode.customize(max_len=5).__extends__ is not None
def test_anydict_customization(self):
from spyne.model import json
assert isinstance(AnyDict.customize(store_as='json').Attributes.store_as, json)
def test_uuid_serialize(self):
value = uuid.UUID('12345678123456781234567812345678')
assert ProtocolBase().to_string(Uuid, value) == \
'12345678-1234-5678-1234-567812345678'
assert ProtocolBase().to_string(Uuid(serialize_as='hex'), value) == \
'12345678123456781234567812345678'
assert ProtocolBase().to_string(Uuid(serialize_as='urn'), value) == \
'urn:uuid:12345678-1234-5678-1234-567812345678'
assert ProtocolBase().to_string(Uuid(serialize_as='bytes'), value) == \
'\x124Vx\x124Vx\x124Vx\x124Vx'
assert ProtocolBase().to_string(Uuid(serialize_as='bytes_le'), value) == \
'xV4\x124\x12xV\x124Vx\x124Vx'
assert ProtocolBase().to_string(Uuid(serialize_as='fields'), value) == \
(305419896, 4660, 22136, 18, 52, 95073701484152)
assert ProtocolBase().to_string(Uuid(serialize_as='int'), value) == \
24197857161011715162171839636988778104
def test_uuid_deserialize(self):
value = uuid.UUID('12345678123456781234567812345678')
assert ProtocolBase().from_string(Uuid,
'12345678-1234-5678-1234-567812345678') == value
assert ProtocolBase().from_string(Uuid(serialize_as='hex'),
'12345678123456781234567812345678') == value
assert ProtocolBase().from_string(Uuid(serialize_as='urn'),
'urn:uuid:12345678-1234-5678-1234-567812345678') == value
assert ProtocolBase().from_string(Uuid(serialize_as='bytes'),
'\x124Vx\x124Vx\x124Vx\x124Vx') == value
assert ProtocolBase().from_string(Uuid(serialize_as='bytes_le'),
'xV4\x124\x12xV\x124Vx\x124Vx') == value
assert ProtocolBase().from_string(Uuid(serialize_as='fields'),
(305419896, 4660, 22136, 18, 52, 95073701484152)) == value
assert ProtocolBase().from_string(Uuid(serialize_as='int'),
24197857161011715162171839636988778104) == value
def test_uuid_validate(self):
assert Uuid.validate_string(Uuid,
'12345678-1234-5678-1234-567812345678')
assert Uuid.validate_native(Uuid,
uuid.UUID('12345678-1234-5678-1234-567812345678'))
def test_datetime_serialize_as(self):
i = 1234567890123456
v = datetime.datetime.fromtimestamp(i / 1e6)
assert ProtocolBase().to_string(
DateTime(serialize_as='sec'), v) == i//1e6
assert ProtocolBase().to_string(
DateTime(serialize_as='sec_float'), v) == i/1e6
assert ProtocolBase().to_string(
DateTime(serialize_as='msec'), v) == i//1e3
assert ProtocolBase().to_string(
DateTime(serialize_as='msec_float'), v) == i/1e3
assert ProtocolBase().to_string(
DateTime(serialize_as='usec'), v) == i
def test_datetime_deserialize(self):
i = 1234567890123456
v = datetime.datetime.fromtimestamp(i / 1e6)
assert ProtocolBase().from_string(
DateTime(serialize_as='sec'), i//1e6) == \
datetime.datetime.fromtimestamp(i//1e6)
assert ProtocolBase().from_string(
DateTime(serialize_as='sec_float'), i/1e6) == v
assert ProtocolBase().from_string(
DateTime(serialize_as='msec'), i//1e3) == \
datetime.datetime.fromtimestamp(i/1e3//1000)
assert ProtocolBase().from_string(
DateTime(serialize_as='msec_float'), i/1e3) == v
assert ProtocolBase().from_string(
DateTime(serialize_as='usec'), i) == v
### Duration Data Type
## http://www.w3schools.com/schema/schema_dtypes_date.asp
# Duration Data type
# The time interval is specified in the following form "PnYnMnDTnHnMnS" where:
# P indicates the period (required)
# nY indicates the number of years
# nM indicates the number of months
# nD indicates the number of days
# T indicates the start of a time section (*required* if you are going to
# specify hours, minutes, seconds or microseconds)
# nH indicates the number of hours
# nM indicates the number of minutes
# nS indicates the number of seconds
class SomeBlob(ComplexModel):
__namespace__ = 'myns'
howlong = Duration()
class TestDurationPrimitive(unittest.TestCase):
def test_onehour_oneminute_onesecond(self):
answer = 'PT1H1M1S'
gg = SomeBlob()
gg.howlong = timedelta(hours=1, minutes=1, seconds=1)
element = etree.Element('test')
XmlDocument().to_parent(None, SomeBlob, gg, element, gg.get_namespace())
element = element[0]
print(gg.howlong)
print(etree.tostring(element, pretty_print=True))
assert element[0].text == answer
data = element.find('{%s}howlong' % gg.get_namespace()).text
self.assertEquals(data, answer)
s1 = XmlDocument().from_element(None, SomeBlob, element)
assert total_seconds(s1.howlong) == total_seconds(gg.howlong)
def test_4suite(self):
# borrowed from 4Suite
tests_seconds = [
(0, u'PT0S'),
(1, u'PT1S'),
(59, u'PT59S'),
(60, u'PT1M'),
(3599, u'PT59M59S'),
(3600, u'PT1H'),
(86399, u'PT23H59M59S'),
(86400, u'P1D'),
(86400*60, u'P60D'),
(86400*400, u'P400D')
]
for secs, answer in tests_seconds:
gg = SomeBlob()
gg.howlong = timedelta(seconds=secs)
element = etree.Element('test')
XmlDocument().to_parent(None, SomeBlob, gg, element, gg.get_namespace())
element = element[0]
print(gg.howlong)
print(etree.tostring(element, pretty_print=True))
assert element[0].text == answer
data = element.find('{%s}howlong' % gg.get_namespace()).text
self.assertEquals(data, answer)
s1 = XmlDocument().from_element(None, SomeBlob, element)
assert total_seconds(s1.howlong) == secs
for secs, answer in tests_seconds:
if secs > 0:
secs *= -1
answer = '-' + answer
gg = SomeBlob()
gg.howlong = timedelta(seconds=secs)
element = etree.Element('test')
XmlDocument().to_parent(None, SomeBlob, gg, element, gg.get_namespace())
element = element[0]
print(gg.howlong)
print(etree.tostring(element, pretty_print=True))
assert element[0].text == answer
data = element.find('{%s}howlong' % gg.get_namespace()).text
self.assertEquals(data, answer)
s1 = XmlDocument().from_element(None, SomeBlob, element)
assert total_seconds(s1.howlong) == secs
def test_duration_positive_seconds_only(self):
answer = 'PT35S'
gg = SomeBlob()
gg.howlong = timedelta(seconds=35)
element = etree.Element('test')
XmlDocument().to_parent(None, SomeBlob, gg, element, gg.get_namespace())
element = element[0]
print(gg.howlong)
print(etree.tostring(element, pretty_print=True))
assert element[0].text == answer
data = element.find('{%s}howlong' % gg.get_namespace()).text
self.assertEquals(data, answer)
s1 = XmlDocument().from_element(None, SomeBlob, element)
assert total_seconds(s1.howlong) == total_seconds(gg.howlong)
def test_duration_positive_minutes_and_seconds_only(self):
answer = 'PT5M35S'
gg = SomeBlob()
gg.howlong = timedelta(minutes=5, seconds=35)
element = etree.Element('test')
XmlDocument().to_parent(None, SomeBlob, gg, element, gg.get_namespace())
element = element[0]
print(gg.howlong)
print(etree.tostring(element, pretty_print=True))
assert element[0].text == answer
data = element.find('{%s}howlong' % gg.get_namespace()).text
self.assertEquals(data, answer)
s1 = XmlDocument().from_element(None, SomeBlob, element)
assert total_seconds(s1.howlong) == total_seconds(gg.howlong)
def test_duration_positive_milliseconds_only(self):
answer = 'PT0.666000S'
gg = SomeBlob()
gg.howlong = timedelta(milliseconds=666)
element = etree.Element('test')
XmlDocument().to_parent(None, SomeBlob, gg, element, gg.get_namespace())
element = element[0]
print(gg.howlong)
print(etree.tostring(element, pretty_print=True))
assert element[0].text == answer
data = element.find('{%s}howlong' % gg.get_namespace()).text
self.assertEquals(data, answer)
s1 = XmlDocument().from_element(None, SomeBlob, element)
assert total_seconds(s1.howlong) == total_seconds(gg.howlong)
def test_duration_xml_duration(self):
dur = datetime.timedelta(days=5 + 30 + 365, hours=1, minutes=1,
seconds=12, microseconds=8e5)
str1 = 'P400DT3672.8S'
str2 = 'P1Y1M5DT1H1M12.8S'
self.assertEquals(dur, ProtocolBase().from_string(Duration, str1))
self.assertEquals(dur, ProtocolBase().from_string(Duration, str2))
self.assertEquals(dur, ProtocolBase().from_string(Duration,
ProtocolBase().to_string(Duration, dur)))
if __name__ == '__main__':
unittest.main()
|
import _pickle as pickle
from keras.models import load_model
class BaseModel(object):
def __init__(self, model_size):
self.model_size = model_size
self.model = None
def save(self, filename):
if self.model is not None:
self.model.save(filename + '.model')
d = dict(self.__dict__)
d.pop('model')
f = open(filename, 'wb')
pickle.dump(d, f)
f.close()
@classmethod
def load(cls, filename):
model = load_model(filename + '.model')
f = open(filename, 'rb')
attrs = pickle.load(f)
f.close()
obj = cls(attrs['model_size'])
for key, value in attrs.items():
setattr(obj, key, value)
obj.model = model
return obj
|
__author__ = 'user'
import nltk
import random
import pickle
import os.path
from nltk.corpus import movie_reviews
word_features = []
def find_features(document):
words = set(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
def train_classifier():
if(os.path.isfile("classifier.pickle")):
return
documents = []
for category in movie_reviews.categories():
for fileid in movie_reviews.fileids(category):
documents.append((list(movie_reviews.words(fileid)),category))
#random.shuffle(documents)
all_words = []
for w in movie_reviews.words():
all_words.append(w.lower())
all_words = nltk.FreqDist(all_words)
for w in all_words.most_common(9000):
if(len(w[0]) >= 3):
word_features.append(w[0])
feature_sets = [(find_features(rev), category) for (rev, category) in documents]
random.shuffle(feature_sets)
#classifier = nltk.NaiveBayesClassifier.train(feature_sets[:2000])
classifier = nltk.NaiveBayesClassifier.train(feature_sets[:2000])
#print(nltk.classify.accuracy(classifier,feature_sets[1500:])*100)
save_classifier = open("classifier.pickle","wb")
pickle.dump(classifier,save_classifier)
save_classifier.close()
|
import uuid
import pytest
from sentry_sdk.envelope import Envelope, Item, PayloadRef
import queue
def _create_transaction_item():
"""
Creates an transaction item that can be added to an envelope
:return: a tuple (transaction_item, trace_id)
"""
trace_id = uuid.uuid4().hex
event_id = uuid.uuid4().hex
item = {
"event_id": event_id,
"type": "transaction",
"transaction": "tr1",
"start_timestamp": 1597976392.6542819,
"timestamp": 1597976400.6189718,
"contexts": {
"trace": {
"trace_id": trace_id,
"span_id": "FA90FDEAD5F74052",
"type": "trace",
}
},
"spans": [],
"extra": {"id": event_id},
}
return item, trace_id, event_id
def _create_event_item(environment=None, release=None):
"""
Creates an event with the specified environment and release
:return: a tuple (event_item, event_id)
"""
event_id = uuid.uuid4().hex
trace_id = uuid.uuid4().hex
item = {
"event_id": event_id,
"message": "Hello, World!",
"extra": {"id": event_id},
}
if environment is not None:
item["environment"] = environment
if release is not None:
item["release"] = release
return item, trace_id, event_id
def _outcomes_enabled_config():
"""
Returns a configuration for Relay that enables outcome generation
"""
return {
"outcomes": {
"emit_outcomes": True,
"batch_size": 1,
"batch_interval": 1,
"source": "relay",
}
}
def _add_sampling_config(
config,
sample_rate,
rule_type,
releases=None,
user_segments=None,
environments=None,
):
"""
Adds a sampling configuration rule to a project configuration
"""
rules = config["config"].setdefault("dynamicSampling", {}).setdefault("rules", [])
if rule_type is None:
rule_type = "trace"
conditions = []
field_prefix = "trace." if rule_type == "trace" else "event."
if releases is not None:
conditions.append(
{"op": "glob", "name": field_prefix + "release", "value": releases,}
)
if user_segments is not None:
conditions.append(
{
"op": "eq",
"name": field_prefix + "user",
"value": user_segments,
"options": {"ignoreCase": True,},
}
)
if environments is not None:
conditions.append(
{
"op": "eq",
"name": field_prefix + "environment",
"value": environments,
"options": {"ignoreCase": True,},
}
)
rule = {
"sampleRate": sample_rate,
"type": rule_type,
"condition": {"op": "and", "inner": conditions},
"id": len(rules) + 1,
}
rules.append(rule)
return rules
def _add_trace_info(envelope, trace_id, public_key, release=None, user_segment=None):
"""
Adds trace information to an envelope (to the envelope headers)
"""
if envelope.headers is None:
envelope.headers = {}
trace_info = {"trace_id": trace_id, "public_key": public_key}
envelope.headers["trace"] = trace_info
if release is not None:
trace_info["release"] = release
if user_segment is not None:
trace_info["user"] = user_segment
def test_it_removes_transactions(mini_sentry, relay):
"""
Tests that when sampling is set to 0% for the trace context project the transactions are removed
"""
project_id = 42
relay = relay(mini_sentry, _outcomes_enabled_config())
# create a basic project config
config = mini_sentry.add_basic_project_config(project_id)
# add a sampling rule to project config that removes all transactions (sample_rate=0)
public_key = config["publicKeys"][0]["publicKey"]
rules = _add_sampling_config(config, sample_rate=0, rule_type="trace")
# create an envelope with a trace context that is initiated by this project (for simplicity)
envelope = Envelope()
transaction, trace_id, event_id = _create_transaction_item()
envelope.add_transaction(transaction)
_add_trace_info(envelope, trace_id=trace_id, public_key=public_key)
# send the event, the transaction should be removed.
relay.send_envelope(project_id, envelope)
# the event should be removed by Relay sampling
with pytest.raises(queue.Empty):
mini_sentry.captured_events.get(timeout=1)
outcomes = mini_sentry.captured_outcomes.get(timeout=2)
assert outcomes is not None
outcome = outcomes["outcomes"][0]
assert outcome.get("outcome") == 1
assert outcome.get("reason") == f"Sampled:{rules[0]['id']}"
def test_it_keeps_transactions(mini_sentry, relay):
"""
Tests that when sampling is set to 100% for the trace context project the transactions are kept
"""
project_id = 42
relay = relay(mini_sentry, _outcomes_enabled_config())
# create a basic project config
config = mini_sentry.add_basic_project_config(project_id)
# add a sampling rule to project config that keeps all transactions (sample_rate=1)
public_key = config["publicKeys"][0]["publicKey"]
_add_sampling_config(config, sample_rate=1, rule_type="trace")
# create an envelope with a trace context that is initiated by this project (for simplicity)
envelope = Envelope()
transaction, trace_id, event_id = _create_transaction_item()
envelope.add_transaction(transaction)
_add_trace_info(envelope, trace_id=trace_id, public_key=public_key)
# send the event, the transaction should be removed.
relay.send_envelope(project_id, envelope)
# the event should be left alone by Relay sampling
evt = mini_sentry.captured_events.get(timeout=1).get_transaction_event()
assert evt is not None
# double check that we get back our trace object (check the trace_id from the object)
evt_trace_id = (
evt.setdefault("contexts", {}).setdefault("trace", {}).get("trace_id")
)
assert evt_trace_id == trace_id
# no outcome should be generated since we forward the event to upstream
with pytest.raises(queue.Empty):
mini_sentry.captured_outcomes.get(timeout=2)
def _create_event_envelope(public_key):
envelope = Envelope()
event, trace_id, event_id = _create_event_item()
envelope.add_event(event)
_add_trace_info(envelope, trace_id=trace_id, public_key=public_key)
return envelope, trace_id, event_id
def _create_transaction_envelope(public_key):
envelope = Envelope()
transaction, trace_id, event_id = _create_transaction_item()
envelope.add_transaction(transaction)
_add_trace_info(envelope, trace_id=trace_id, public_key=public_key)
return envelope, trace_id, event_id
@pytest.mark.parametrize(
"rule_type, event_factory",
[("error", _create_event_envelope), ("transaction", _create_transaction_envelope)],
)
def test_it_removes_events(mini_sentry, relay, rule_type, event_factory):
"""
Tests that when sampling is set to 0% for the trace context project the events are removed
"""
project_id = 42
relay = relay(mini_sentry, _outcomes_enabled_config())
# create a basic project config
config = mini_sentry.add_basic_project_config(project_id)
public_key = config["publicKeys"][0]["publicKey"]
# add a sampling rule to project config that removes all transactions (sample_rate=0)
rules = _add_sampling_config(config, sample_rate=0, rule_type=rule_type)
# create an envelope with a trace context that is initiated by this project (for simplicity)
envelope, trace_id, event_id = event_factory(public_key)
# send the event, the transaction should be removed.
relay.send_envelope(project_id, envelope)
# the event should be removed by Relay sampling
with pytest.raises(queue.Empty):
mini_sentry.captured_events.get(timeout=1)
outcomes = mini_sentry.captured_outcomes.get(timeout=2)
assert outcomes is not None
outcome = outcomes["outcomes"][0]
assert outcome.get("outcome") == 1
assert outcome.get("reason") == f"Sampled:{rules[0]['id']}"
@pytest.mark.parametrize(
"rule_type, event_factory",
[("error", _create_event_envelope), ("transaction", _create_transaction_envelope)],
)
def test_it_keeps_events(mini_sentry, relay, rule_type, event_factory):
"""
Tests that when sampling is set to 100% for the trace context project the events are kept
"""
project_id = 42
relay = relay(mini_sentry, _outcomes_enabled_config())
# create a basic project config
config = mini_sentry.add_basic_project_config(project_id)
public_key = config["publicKeys"][0]["publicKey"]
# add a sampling rule to project config that keeps all events (sample_rate=1)
_add_sampling_config(config, sample_rate=1, rule_type=rule_type)
# create an envelope with a trace context that is initiated by this project (for simplicity)
envelope, trace_id, event_id = event_factory(public_key)
# send the event, the transaction should be removed.
relay.send_envelope(project_id, envelope)
# the event should be left alone by Relay sampling
envelope = mini_sentry.captured_events.get(timeout=1)
assert envelope is not None
# double check that we get back our object
# we put the id in extra since Relay overrides the initial event_id
items = [item for item in envelope]
assert len(items) == 1
evt = items[0].payload.json
evt_id = evt.setdefault("extra", {}).get("id")
assert evt_id == event_id
# no outcome should be generated since we forward the event to upstream
with pytest.raises(queue.Empty):
mini_sentry.captured_outcomes.get(timeout=2)
@pytest.mark.parametrize("should_remove", [True, False])
@pytest.mark.parametrize(
"rule_type, event_factory",
[
("error", _create_event_envelope),
("transaction", _create_transaction_envelope),
("trace", _create_transaction_envelope),
],
)
def test_bad_dynamic_rules_in_processing_relays(
mini_sentry,
relay_with_processing,
events_consumer,
transactions_consumer,
should_remove,
rule_type,
event_factory,
):
"""
Configurations that contain bad (unrecognized) rules should be handled by
removing the offending rules and sampling using the correct rules
"""
sample_rate = 0 if should_remove else 1
relay = relay_with_processing()
project_id = 42
config = mini_sentry.add_full_project_config(project_id)
public_key = config["publicKeys"][0]["publicKey"]
if rule_type == "error":
consumer = events_consumer()
else:
consumer = transactions_consumer()
# add a bad condition (with the opposite sample rate to make it evident it is not applied)
rules = _add_sampling_config(
config, sample_rate=1 - sample_rate, rule_type=rule_type
)
last_rule = rules[-1]
last_rule["condition"]["inner"].append(
{"op": "BadOperator", "name": "foo", "value": "bar",}
)
_add_sampling_config(config, sample_rate=sample_rate, rule_type=rule_type)
envelope, trace_id, event_id = event_factory(public_key)
relay.send_envelope(project_id, envelope)
if should_remove:
consumer.assert_empty()
else:
consumer.get_event()
@pytest.mark.parametrize(
"rule_type, event_factory",
[
("error", _create_event_envelope),
("transaction", _create_transaction_envelope),
("trace", _create_transaction_envelope),
],
)
def test_bad_dynamic_rules_in_non_processing_relays(
mini_sentry, relay, rule_type, event_factory
):
"""
Configurations that contain bad (unrecognized) rules should effectively disable
any sampling (everything passes through)
"""
project_id = 42
relay = relay(mini_sentry, _outcomes_enabled_config())
# create a basic project config
config = mini_sentry.add_basic_project_config(project_id)
public_key = config["publicKeys"][0]["publicKey"]
rules = _add_sampling_config(config, sample_rate=0, rule_type=rule_type)
last_rule = rules[-1]
last_rule["condition"]["inner"].append(
{"op": "BadOperator", "name": "foo", "value": "bar",}
)
# add a sampling rule to project config that drops all events (sample_rate=0), it should be ignored
# because there is an invalid rule in the configuration
_add_sampling_config(config, sample_rate=0, rule_type=rule_type)
# create an envelope with a trace context that is initiated by this project (for simplicity)
envelope, trace_id, event_id = event_factory(public_key)
# send the event, the transaction should be removed.
relay.send_envelope(project_id, envelope)
# the event should be left alone by Relay sampling
envelope = mini_sentry.captured_events.get(timeout=1)
assert envelope is not None
# double check that we get back our object
# we put the id in extra since Relay overrides the initial event_id
items = [item for item in envelope]
assert len(items) == 1
evt = items[0].payload.json
evt_id = evt.setdefault("extra", {}).get("id")
assert evt_id == event_id
# no outcome should be generated since we forward the event to upstream
with pytest.raises(queue.Empty):
mini_sentry.captured_outcomes.get(timeout=2)
def test_uses_trace_public_key(mini_sentry, relay):
"""
Tests that the public_key from the trace context is used
The project configuration corresponding to the project pointed to
by the context public_key DSN is used (not the dsn of the request)
Create a trace context for projectA and send an event from projectB
using projectA's trace.
Configure project1 to sample out all events (sample_rate=0)
Configure project2 to sample in all events (sample_rate=1)
First:
Send event to project2 with trace from project1
It should be removed (sampled out)
Second:
Send event to project1 with trace from project2
It should pass through
"""
relay = relay(mini_sentry, _outcomes_enabled_config())
# create basic project configs
project_id1 = 42
config1 = mini_sentry.add_basic_project_config(project_id1)
public_key1 = config1["publicKeys"][0]["publicKey"]
_add_sampling_config(config1, sample_rate=0, rule_type="trace")
project_id2 = 43
config2 = mini_sentry.add_basic_project_config(project_id2)
public_key2 = config2["publicKeys"][0]["publicKey"]
_add_sampling_config(config2, sample_rate=1, rule_type="trace")
# First
# send trace with project_id1 context (should be removed)
envelope = Envelope()
transaction, trace_id, event_id = _create_transaction_item()
envelope.add_transaction(transaction)
_add_trace_info(envelope, trace_id=trace_id, public_key=public_key1)
# send the event, the transaction should be removed.
relay.send_envelope(project_id2, envelope)
# the event should be removed by Relay sampling
with pytest.raises(queue.Empty):
mini_sentry.captured_events.get(timeout=1)
# and it should create an outcome
outcomes = mini_sentry.captured_outcomes.get(timeout=2)
assert outcomes is not None
# Second
# send trace with project_id2 context (should go through)
envelope = Envelope()
transaction, trace_id, event_id = _create_transaction_item()
envelope.add_transaction(transaction)
_add_trace_info(envelope, trace_id=trace_id, public_key=public_key2)
# send the event.
relay.send_envelope(project_id1, envelope)
# the event should be passed along to upstream (with the transaction unchanged)
evt = mini_sentry.captured_events.get(timeout=1).get_transaction_event()
assert evt is not None
# no outcome should be generated (since the event is passed along to the upstream)
with pytest.raises(queue.Empty):
mini_sentry.captured_outcomes.get(timeout=2)
@pytest.mark.parametrize(
"rule_type, event_factory",
[
("error", _create_event_envelope),
("transaction", _create_transaction_envelope),
("trace", _create_transaction_envelope),
],
)
def test_multi_item_envelope(mini_sentry, relay, rule_type, event_factory):
"""
Associated items are removed together with event item.
The event is sent twice to account for both fast and slow paths.
When sampling decides to remove a transaction it should also remove all
dependent items (attachments).
"""
project_id = 42
relay = relay(mini_sentry, _outcomes_enabled_config())
# create a basic project config
config = mini_sentry.add_basic_project_config(project_id)
# add a sampling rule to project config that removes all transactions (sample_rate=0)
public_key = config["publicKeys"][0]["publicKey"]
# add a sampling rule to project config that drops all events (sample_rate=0), it should be ignored
# because there is an invalid rule in the configuration
_add_sampling_config(config, sample_rate=0, rule_type=rule_type)
for i in range(2):
# create an envelope with a trace context that is initiated by this project (for simplicity)
envelope = Envelope()
# create an envelope with a trace context that is initiated by this project (for simplicity)
envelope, trace_id, event_id = event_factory(public_key)
envelope.add_item(
Item(payload=PayloadRef(json={"x": "some attachment"}), type="attachment")
)
envelope.add_item(
Item(
payload=PayloadRef(json={"y": "some other attachment"}),
type="attachment",
)
)
# send the event, the transaction should be removed.
relay.send_envelope(project_id, envelope)
# the event should be removed by Relay sampling
with pytest.raises(queue.Empty):
mini_sentry.captured_events.get(timeout=1)
outcomes = mini_sentry.captured_outcomes.get(timeout=2)
assert outcomes is not None
|
# coding= utf-8
# Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ddt
from tests.api import base
from tests.api.utils.schema import admin
@ddt.ddt
class TestServiceStatus(base.TestBase):
def setUp(self):
super(TestServiceStatus, self).setUp()
if self.test_config.run_operator_tests is False:
self.skipTest(
'Test Operator Functions is disabled in configuration')
self.flavor_id = self.test_flavor
self.service_urls = []
@ddt.data((1, u'deployed'), (1, u'failed'),
(3, u'deployed'), (3, u'failed'),
(5, u'deployed'), (5, u'failed'))
def test_set_services(self, services_status):
no_of_services, status = services_status
self.service_urls = \
[self._service_limit_create_test_service(client=self.client)
for _ in range(no_of_services)]
service_ids = [url.rsplit('/')[-1:][0] for url in self.service_urls]
project_id = self.user_project_id
for service_id, service_url in zip(service_ids, self.service_urls):
set_service_resp = self.operator_client.set_service_status(
project_id=project_id,
service_id=service_id,
status=status)
self.assertEqual(set_service_resp.status_code, 201)
service_resp = self.client.get_service(service_url)
resp_body = service_resp.json()
resp_status = resp_body['status']
self.assertEqual(resp_status, status)
get_service_resp = self.operator_client.get_by_service_status(
status=status)
self.assertSchema(get_service_resp.json(),
admin.get_service_project_status)
self.assertIn(service_id, get_service_resp.content)
self.assertIn(project_id, get_service_resp.content)
def tearDown(self):
for service_url in self.service_urls:
self.client.delete_service(location=service_url)
if self.test_config.generate_flavors:
self.client.delete_flavor(flavor_id=self.flavor_id)
super(TestServiceStatus, self).tearDown()
|
from collections import namedtuple
from typing import Any
class FunctionRegisterBorg:
__shared_state = {"register": {}}
def __init__(self):
self.__dict__ = self.__shared_state
def _register(coin: Any, key: str, val: Any) -> None:
borg = FunctionRegisterBorg()
if coin not in borg.register:
borg.register[coin] = {}
borg.register[coin][key] = val
def register_url(coin: Any) -> Any:
def decorator(func):
_register(coin, "url_func", func)
return decorator
def register_account(coin: Any) -> Any:
def decorator(func):
_register(coin, "account_func", func)
return decorator
CoinPrice = namedtuple("CoinPrice", ["coin", "fiat", "rate"])
Fiat = namedtuple("Fiat", ["balance", "type"])
Account = namedtuple("Account", ["balance", "type", "address"])
NullFiat = Fiat(type="Null", balance=0)
NullCoinPrice = CoinPrice(coin="Null", fiat="Null", rate=0)
NullAccount = Account(type="Null", balance=0, address="")
|
from construct import *
from construct.lib import *
combine_bytes = Struct(
'bytes_term' / NullTerminated(GreedyBytes, term=b'\x7C', include=False, consume=True),
'bytes_limit' / FixedSized(4, GreedyBytes),
'bytes_eos' / GreedyBytes,
'limit_or_calc' / Computed(lambda this: (this.bytes_limit if False else this.bytes_calc)),
'term_or_limit' / Computed(lambda this: (this.bytes_term if True else this.bytes_limit)),
'limit_or_eos' / Computed(lambda this: (this.bytes_limit if True else this.bytes_eos)),
'eos_or_calc' / Computed(lambda this: (this.bytes_eos if True else this.bytes_calc)),
'term_or_calc' / Computed(lambda this: (this.bytes_term if True else this.bytes_calc)),
'bytes_calc' / Computed(lambda this: b"\x52\x6E\x44"),
'term_or_eos' / Computed(lambda this: (this.bytes_term if False else this.bytes_eos)),
)
_schema = combine_bytes
|
#!/bin/python
# -*- coding: utf-8 -*-
import json
import RestApiz
from flask import Flask, send_file, jsonify
app = Flask(__name__)
@app.route("/admin")
def admin():
return send_file("/home/RestApi/dashboard/public/index.html")
@app.errorhandler(404)
def page_not_found(e):
return jsonify({"error":'404.html'}), 404
def main():
app.run(debug=True, port=60000, host='127.0.1.1')
if __name__ == "__main__":
app.run(debug=True, port=60000, host='127.0.1.1')
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os_vm_expire.model import models
from os_vm_expire.model import repositories
from os_vm_expire.tests import utils
class WhenTestingVmExcludesResource(utils.OsVMExpireAPIBaseTestCase):
def setUp(self):
super(WhenTestingVmExcludesResource, self).setUp()
def tearDown(self):
super(WhenTestingVmExcludesResource, self).tearDown()
repo = repositories.get_vmexpire_repository()
repo.delete_all_entities()
repositories.commit()
repo = repositories.get_vmexclude_repository()
repo.delete_all_entities()
repositories.commit()
def test_can_get_vmexcludes(self):
entity = create_vmexclude_model(exclude_type=0)
create_vmexclude(entity)
exclude_id = entity.exclude_id
exclude_type = entity.exclude_type
_get_resp = self.app.get('/12345project/vmexcludes/')
self.assertEqual(200, _get_resp.status_int)
self.assertIn('vmexcludes', _get_resp.json)
self.assertEqual(len(_get_resp.json['vmexcludes']), 1)
self.assertEqual(
_get_resp.json['vmexcludes'][0]['exclude_id'],
exclude_id
)
self.assertEqual(
_get_resp.json['vmexcludes'][0]['exclude_type'],
exclude_type
)
def test_can_get_vmexclude(self):
entity = create_vmexclude_model()
instance = create_vmexclude(entity)
_get_resp = self.app.get(
'/12345project/vmexcludes/' + instance.id
)
self.assertEqual(200, _get_resp.status_int)
self.assertIn('vmexclude', _get_resp.json)
self.assertEqual(
_get_resp.json['vmexclude']['exclude_id'],
entity.exclude_id)
def test_can_create_vmexclude(self):
entity = create_vmexclude_model(exclude_type=1)
_get_resp = self.app.post_json(
'/12345/vmexcludes/',
{
'id': entity.exclude_id,
'type': 'project'
},
headers={'Content-Type': 'application/json'}
)
self.assertEqual(202, _get_resp.status_int)
self.assertIn('vmexclude', _get_resp.json)
self.assertEqual(
_get_resp.json['vmexclude']['exclude_id'],
entity.exclude_id
)
self.assertEqual(
_get_resp.json['vmexclude']['exclude_type'],
entity.exclude_type
)
_get_existing_resp = self.app.get(
'/12345project/vmexcludes/' + _get_resp.json['vmexclude']['id']
)
self.assertEqual(200, _get_existing_resp.status_int)
self.assertIn('vmexclude', _get_existing_resp.json)
self.assertEqual(
_get_resp.json['vmexclude']['exclude_id'],
entity.exclude_id
)
def test_can_delete_vmexclude(self):
entity = create_vmexclude_model()
instance = create_vmexclude(entity)
_get_resp = self.app.delete(
'/12345project/vmexcludes/' + instance.id,
headers={'Content-Type': 'application/json'}
)
self.assertEqual(204, _get_resp.status_int)
_get_resp = self.app.get('/12345project/vmexcludes/')
self.assertEqual(200, _get_resp.status_int)
self.assertIn('vmexcludes', _get_resp.json)
self.assertEqual(len(_get_resp.json['vmexcludes']), 0)
def create_vmexclude_model(prefix=None, exclude_type=0):
if not prefix:
prefix = '12345'
entity = models.VmExclude()
entity.exclude_id = prefix
entity.exclude_type = exclude_type
return entity
def create_vmexclude(entity):
repo = repositories.get_vmexclude_repository()
instance = repo.create_exclude(entity)
repositories.commit()
return instance
|
# Generated by Django 2.2.13 on 2020-07-20 19:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('viewer', '0036_auto_20200705_1237'),
]
operations = [
migrations.CreateModel(
name='User_Lipid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_lipid_name', models.CharField(max_length=100, verbose_name='user lipid name')),
('hg_scattering', models.FloatField(default=0, verbose_name='user head group scattering length')),
('hg_electrons', models.FloatField(default=0, verbose_name='user head group electrons')),
('hg_volume', models.FloatField(default=0, verbose_name='user head group volume')),
('tg_scattering', models.FloatField(default=0, verbose_name='user tail group scattering length')),
('tg_electrons', models.FloatField(default=0, verbose_name='user tail group electrons')),
('tm_scattering', models.FloatField(default=0, verbose_name='user terminal methyl scattering length')),
('tm_electrons', models.FloatField(default=0, verbose_name='user terminal methyl electrons')),
('owner', models.ForeignKey(default='admin', on_delete=django.db.models.deletion.CASCADE, related_name='user_lipid_owner', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'user lipid',
'verbose_name_plural': 'user lipids',
},
),
migrations.CreateModel(
name='Project_User_Lipid_Volume',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_lipid_volume', models.FloatField(default=0, verbose_name='user lipid volume')),
('project_title', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='project_user_lipid', to='viewer.Project')),
('project_user_lipid_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='project_user_lipid_name', to='viewer.User_Lipid')),
],
options={
'verbose_name': 'user lipid volume',
'verbose_name_plural': 'user lipid volumes',
},
),
]
|
import argparse
import cv2
import os
def extractImages(input, output, frameRate):
if not os.path.exists(output):
os.makedirs(output)
count=1
vidcap = cv2.VideoCapture(input)
def getFrame(sec):
vidcap.set(cv2.CAP_PROP_POS_MSEC,sec*1000)
hasFrames,image = vidcap.read()
if hasFrames:
cv2.imwrite(output+"/"+str(count)+".jpg", image) # Save frame as JPG file
return hasFrames
sec = 0
frameRate = int(frameRate) # Change this number to 1 for each 1 second, 0.5 for 500ms etc...
# frameRate = 0.5 # Use this line if you want to take 2 images per second
success = getFrame(sec)
while success:
print ('Read a new frame: ', success, '; at ', sec, 's ; frame number: ', count)
count = count + 1
sec = sec + frameRate
sec = round(sec, 2)
success = getFrame(sec)
if __name__=="__main__":
print("Extracting Frames from video")
a = argparse.ArgumentParser()
a.add_argument("--input", help="path to video input")
a.add_argument("--output", help="path to images")
a.add_argument("--frameRate", help="frame rates, set 1 for 1 image per second, 2 for 1 images every 2 seconds, etc...")
args = a.parse_args()
print(args)
extractImages(args.input, args.output, args.frameRate)
# example: python3 extract_frames.py --input ../../dataset/dataset.mp4 --output output --frameRate 2
|
import unittest
from tests.test_utils import get_sample_pdf_with_labels, get_sample_pdf, get_sample_sdf, get_sample_pdf_with_extra_cols, get_sample_pdf_with_no_text_col ,get_sample_spark_dataframe
from nlu import *
class TestToxic(unittest.TestCase):
def test_toxic_model(self):
# nlu.load('en.ner.dl.bert').predict("I like Angela Merkel")
pipe = nlu.load('toxic',verbose=True)
data = ['You are so dumb you goofy dummy', 'You stupid person with an identity that shall remain unnamed, such a filthy identity that you have go to a bad place you person!']
df = pipe.predict(data, output_level='sentence')
print(df)
print(df.columns)
print(df['sentence'], df[['toxic_classes']])
print(df['sentence'], df[['toxic_confidences']])
df = pipe.predict(data, output_level='document',metadata=True)
print(df)
print(df.columns)
print(df['document'], df[['toxic_obscene_confidence']])
print(df['toxic_severe_toxic_confidence'], df[['toxic_insult_confidence']])
print(df['toxic_toxic_confidence'], df[['toxic_obscene_confidence']])
if __name__ == '__main__':
unittest.main()
|
from thumt.data.dataset import get_dataset
from thumt.data.dataset_torchtext import get_dataset as get_dataset_torchtext
from thumt.data.vocab import load_vocabulary, lookup
|
BOX_ACCESS_TOKEN = ""
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Collect error messages.
"""
__author__ = "Sofie & Bernd Krietenstein"
__copyright__ = "Copyright (C) 2018 Sofie & Bernd Krietenstein"
__license__ = "see LICENSE file"
import logging
_LEVEL = logging.DEBUG
LOGGER = logging.getLogger('errors')
LOGGER.setLevel(_LEVEL)
_CONSOLE_HANDLER = logging.StreamHandler()
_CONSOLE_HANDLER.setLevel(logging.ERROR)
_FORMATTER = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
_CONSOLE_HANDLER.setFormatter(_FORMATTER)
LOGGER.addHandler(_CONSOLE_HANDLER)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['EndpointVariantArgs', 'EndpointVariant']
@pulumi.input_type
class EndpointVariantArgs:
def __init__(__self__, *,
compute_type: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
workspace_name: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
environment_image_request: Optional[pulumi.Input['CreateServiceRequestEnvironmentImageRequestArgs']] = None,
is_default: Optional[pulumi.Input[bool]] = None,
keys: Optional[pulumi.Input['CreateServiceRequestKeysArgs']] = None,
kv_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
service_name: Optional[pulumi.Input[str]] = None,
traffic_percentile: Optional[pulumi.Input[float]] = None,
type: Optional[pulumi.Input[Union[str, 'VariantType']]] = None):
"""
The set of arguments for constructing a EndpointVariant resource.
:param pulumi.Input[str] compute_type: The compute environment type for the service.
Expected value is 'Custom'.
:param pulumi.Input[str] resource_group_name: Name of the resource group in which workspace is located.
:param pulumi.Input[str] workspace_name: Name of Azure Machine Learning workspace.
:param pulumi.Input[str] description: The description of the service.
:param pulumi.Input['CreateServiceRequestEnvironmentImageRequestArgs'] environment_image_request: The Environment, models and assets needed for inferencing.
:param pulumi.Input[bool] is_default: Is this the default variant.
:param pulumi.Input['CreateServiceRequestKeysArgs'] keys: The authentication keys.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] kv_tags: The service tag dictionary. Tags are mutable.
:param pulumi.Input[str] location: The name of the Azure location/region.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: The service properties dictionary. Properties are immutable.
:param pulumi.Input[str] service_name: Name of the Azure Machine Learning service.
:param pulumi.Input[float] traffic_percentile: The amount of traffic variant receives.
:param pulumi.Input[Union[str, 'VariantType']] type: The type of the variant.
"""
pulumi.set(__self__, "compute_type", 'Custom')
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "workspace_name", workspace_name)
if description is not None:
pulumi.set(__self__, "description", description)
if environment_image_request is not None:
pulumi.set(__self__, "environment_image_request", environment_image_request)
if is_default is not None:
pulumi.set(__self__, "is_default", is_default)
if keys is not None:
pulumi.set(__self__, "keys", keys)
if kv_tags is not None:
pulumi.set(__self__, "kv_tags", kv_tags)
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if service_name is not None:
pulumi.set(__self__, "service_name", service_name)
if traffic_percentile is not None:
pulumi.set(__self__, "traffic_percentile", traffic_percentile)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="computeType")
def compute_type(self) -> pulumi.Input[str]:
"""
The compute environment type for the service.
Expected value is 'Custom'.
"""
return pulumi.get(self, "compute_type")
@compute_type.setter
def compute_type(self, value: pulumi.Input[str]):
pulumi.set(self, "compute_type", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group in which workspace is located.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> pulumi.Input[str]:
"""
Name of Azure Machine Learning workspace.
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "workspace_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the service.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="environmentImageRequest")
def environment_image_request(self) -> Optional[pulumi.Input['CreateServiceRequestEnvironmentImageRequestArgs']]:
"""
The Environment, models and assets needed for inferencing.
"""
return pulumi.get(self, "environment_image_request")
@environment_image_request.setter
def environment_image_request(self, value: Optional[pulumi.Input['CreateServiceRequestEnvironmentImageRequestArgs']]):
pulumi.set(self, "environment_image_request", value)
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> Optional[pulumi.Input[bool]]:
"""
Is this the default variant.
"""
return pulumi.get(self, "is_default")
@is_default.setter
def is_default(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_default", value)
@property
@pulumi.getter
def keys(self) -> Optional[pulumi.Input['CreateServiceRequestKeysArgs']]:
"""
The authentication keys.
"""
return pulumi.get(self, "keys")
@keys.setter
def keys(self, value: Optional[pulumi.Input['CreateServiceRequestKeysArgs']]):
pulumi.set(self, "keys", value)
@property
@pulumi.getter(name="kvTags")
def kv_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The service tag dictionary. Tags are mutable.
"""
return pulumi.get(self, "kv_tags")
@kv_tags.setter
def kv_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "kv_tags", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Azure location/region.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The service properties dictionary. Properties are immutable.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Azure Machine Learning service.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter(name="trafficPercentile")
def traffic_percentile(self) -> Optional[pulumi.Input[float]]:
"""
The amount of traffic variant receives.
"""
return pulumi.get(self, "traffic_percentile")
@traffic_percentile.setter
def traffic_percentile(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "traffic_percentile", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'VariantType']]]:
"""
The type of the variant.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'VariantType']]]):
pulumi.set(self, "type", value)
class EndpointVariant(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
environment_image_request: Optional[pulumi.Input[pulumi.InputType['CreateServiceRequestEnvironmentImageRequestArgs']]] = None,
is_default: Optional[pulumi.Input[bool]] = None,
keys: Optional[pulumi.Input[pulumi.InputType['CreateServiceRequestKeysArgs']]] = None,
kv_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
traffic_percentile: Optional[pulumi.Input[float]] = None,
type: Optional[pulumi.Input[Union[str, 'VariantType']]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Machine Learning service object wrapped into ARM resource envelope.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] compute_type: The compute environment type for the service.
Expected value is 'Custom'.
:param pulumi.Input[str] description: The description of the service.
:param pulumi.Input[pulumi.InputType['CreateServiceRequestEnvironmentImageRequestArgs']] environment_image_request: The Environment, models and assets needed for inferencing.
:param pulumi.Input[bool] is_default: Is this the default variant.
:param pulumi.Input[pulumi.InputType['CreateServiceRequestKeysArgs']] keys: The authentication keys.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] kv_tags: The service tag dictionary. Tags are mutable.
:param pulumi.Input[str] location: The name of the Azure location/region.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: The service properties dictionary. Properties are immutable.
:param pulumi.Input[str] resource_group_name: Name of the resource group in which workspace is located.
:param pulumi.Input[str] service_name: Name of the Azure Machine Learning service.
:param pulumi.Input[float] traffic_percentile: The amount of traffic variant receives.
:param pulumi.Input[Union[str, 'VariantType']] type: The type of the variant.
:param pulumi.Input[str] workspace_name: Name of Azure Machine Learning workspace.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EndpointVariantArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Machine Learning service object wrapped into ARM resource envelope.
:param str resource_name: The name of the resource.
:param EndpointVariantArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EndpointVariantArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_type: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
environment_image_request: Optional[pulumi.Input[pulumi.InputType['CreateServiceRequestEnvironmentImageRequestArgs']]] = None,
is_default: Optional[pulumi.Input[bool]] = None,
keys: Optional[pulumi.Input[pulumi.InputType['CreateServiceRequestKeysArgs']]] = None,
kv_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
traffic_percentile: Optional[pulumi.Input[float]] = None,
type: Optional[pulumi.Input[Union[str, 'VariantType']]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EndpointVariantArgs.__new__(EndpointVariantArgs)
if compute_type is None and not opts.urn:
raise TypeError("Missing required property 'compute_type'")
__props__.__dict__["compute_type"] = 'Custom'
__props__.__dict__["description"] = description
__props__.__dict__["environment_image_request"] = environment_image_request
__props__.__dict__["is_default"] = is_default
__props__.__dict__["keys"] = keys
__props__.__dict__["kv_tags"] = kv_tags
__props__.__dict__["location"] = location
__props__.__dict__["properties"] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["service_name"] = service_name
__props__.__dict__["traffic_percentile"] = traffic_percentile
__props__.__dict__["type"] = type
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["identity"] = None
__props__.__dict__["name"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["tags"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200501preview:EndpointVariant"), pulumi.Alias(type_="azure-native:machinelearningservices:EndpointVariant"), pulumi.Alias(type_="azure-nextgen:machinelearningservices:EndpointVariant"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200515preview:EndpointVariant"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200515preview:EndpointVariant"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200901preview:EndpointVariant"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200901preview:EndpointVariant"), pulumi.Alias(type_="azure-native:machinelearningservices/v20210101:EndpointVariant"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20210101:EndpointVariant"), pulumi.Alias(type_="azure-native:machinelearningservices/v20210401:EndpointVariant"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20210401:EndpointVariant")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(EndpointVariant, __self__).__init__(
'azure-native:machinelearningservices/v20200501preview:EndpointVariant',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'EndpointVariant':
"""
Get an existing EndpointVariant resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = EndpointVariantArgs.__new__(EndpointVariantArgs)
__props__.__dict__["identity"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return EndpointVariant(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityResponse']]:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Any]:
"""
Service properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.