hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8c3a3975d43254e1d8881a6fad802c8c224ad325
| 512
|
py
|
Python
|
desafios-py/expert/d0001-latitude-longitude-ip/d0001-latitude-longitude-ip-v0.py
|
Lsitjs/challenges-python
|
fbbfa4d7860ee8efa4d0d3eee4999ea379933224
|
[
"MIT"
] | null | null | null |
desafios-py/expert/d0001-latitude-longitude-ip/d0001-latitude-longitude-ip-v0.py
|
Lsitjs/challenges-python
|
fbbfa4d7860ee8efa4d0d3eee4999ea379933224
|
[
"MIT"
] | null | null | null |
desafios-py/expert/d0001-latitude-longitude-ip/d0001-latitude-longitude-ip-v0.py
|
Lsitjs/challenges-python
|
fbbfa4d7860ee8efa4d0d3eee4999ea379933224
|
[
"MIT"
] | null | null | null |
# terminal pip install requests
import requests
ip=input("Prezado(a) usuário(a), informe seu ip: ")
#o key é desnecessário é so colar url ja com a chave obtida no site https://ipstack.com/quickstart
#ex http://api.ipstack.com/{ip}?access_key=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
key=input("Informar chave de acesso no site ipstack: ")
request=requests.get(f"http://api.ipstack.com/{ip}?access_key={key}")
location=request.json()
print('Latitude:',location['latitude'])
print('Longitude:',location['longitude'])
| 32
| 98
| 0.755859
|
693b14d6dec4592a82f3ed13bd46020a20d9c410
| 1,227
|
py
|
Python
|
aitlas/base/segmentation.py
|
alex-hayhoe/aitlas-docker
|
57686f9c18f28c884511fc0c84618506cbf61eae
|
[
"MIT"
] | null | null | null |
aitlas/base/segmentation.py
|
alex-hayhoe/aitlas-docker
|
57686f9c18f28c884511fc0c84618506cbf61eae
|
[
"MIT"
] | null | null | null |
aitlas/base/segmentation.py
|
alex-hayhoe/aitlas-docker
|
57686f9c18f28c884511fc0c84618506cbf61eae
|
[
"MIT"
] | null | null | null |
import logging
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from ..metrics import F1ScoreSample
from ..utils import stringify
from .models import BaseModel
from .schemas import BaseSegmentationClassifierSchema
from .metrics import SegmentationRunningScore
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
class BaseSegmentationClassifier(BaseModel):
schema = BaseSegmentationClassifierSchema
def __init__(self, config):
super().__init__(config)
self.running_metrics = SegmentationRunningScore(self.num_classes, self.device)
def get_predicted(self, outputs, threshold=None):
predicted_probs = torch.tanh(outputs)
predicted = (predicted_probs >= self.config.threshold).type(
predicted_probs.dtype
)
return predicted_probs, predicted
def load_optimizer(self):
"""Load the optimizer"""
return optim.Adam([dict(params=self.model.parameters(), lr=self.config.learning_rate), ])
def load_criterion(self):
"""Load the loss function"""
return nn.MSELoss(reduction="mean")
def load_lr_scheduler(self):
return None
| 27.266667
| 97
| 0.720456
|
ab2a955de41b01bfac564fdf7269a8a1ee2b2bc1
| 95
|
py
|
Python
|
archived_projects_(dead)/declare_pyside/typehint/__init__.py
|
likianta/declare-qtquick
|
93c2ce49d841ccdeb0272085c5f731139927f0d7
|
[
"MIT"
] | 3
|
2021-11-02T03:45:27.000Z
|
2022-03-27T05:33:36.000Z
|
declare_pyside/typehint/__init__.py
|
Likianta/pyml
|
b0005b36aa94958a7d3e306a9df65fea46669d18
|
[
"MIT"
] | null | null | null |
declare_pyside/typehint/__init__.py
|
Likianta/pyml
|
b0005b36aa94958a7d3e306a9df65fea46669d18
|
[
"MIT"
] | null | null | null |
from . import pyside
from . import qmlside
from . import widgets_support
from .common import *
| 19
| 29
| 0.778947
|
7fe95693b40eca1abe1d0002f62379ad45bf1155
| 6,703
|
py
|
Python
|
code_complete/code_completer.py
|
berjc/code-complete
|
03c1acb2561cb85e62e791a81ecd624b6297c1c0
|
[
"MIT"
] | 1
|
2017-04-29T12:55:42.000Z
|
2017-04-29T12:55:42.000Z
|
code_complete/code_completer.py
|
berjc/code-complete
|
03c1acb2561cb85e62e791a81ecd624b6297c1c0
|
[
"MIT"
] | null | null | null |
code_complete/code_completer.py
|
berjc/code-complete
|
03c1acb2561cb85e62e791a81ecd624b6297c1c0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Encapsulates Functionality for Testing and Iterating on the Code Complete File. """
import glob
import os
import re
import shutil
import subprocess
from config import READ_OPT
from config import WRITE_OPT
LINE = '=' * 100
# TODO : Fix Task Solution Comment
class CodeCompleter(object):
""" Encapsulates Functionality for Testing and Iterating on the Code Completed File.
:attr _current_code_f: The path to the current code completed version of the original code file.
:type _current_code_f: str
:attr _original_code_f: A path to the file to code complete.
:type _original_code_f: str
:attr _tests_f: A path to the tests to be used to verify code completion.
:type _tests_f: str
:attr _task_descriptors: A list of `TaskDescriptor` objects encapsulating the completion tasks.
:type _task_descriptors: list
:attr _task_solutions:
:type _task_solutions: list
"""
UNITTEST_OK = 'OK'
UNITTEST_FAILURE_REGEX = r'\d+'
PATH_DELIM = '/'
COMMA_DELIM = ','
EQUALS_DELIM = '='
NEW_LINE_DELIM = '\n'
ORIGINAL_EXTENSION = '%s.original'
PREVIOUS_EXTENSION = '%s.previous'
RUN_PYTHON_SCRIPT_CMD = 'python %s'
ALL_PYC_FILES = '%s/*.pyc'
APPEND_FUNCTION_STUB_TEMPLATE = '\n%s'
FUNCTION_STUB_TEMPLATE = '%s%s(%s)'
SET_VARIABLE_TEMPLATE = '%s %s '
def __init__(self, code_f, tests_f, task_descriptors, task_solutions):
""" Initializes the `CodeCompleter` object.
:param code_f: A path to the file to code complete.
:type code_f: str
:param tests_f: A path to the tests to be used to verify code completion.
:type tests_f: str
:param task_descriptors: A list of `TaskDescriptor` objects encapsulating the completion tasks.
:type task_descriptors: list
:param task_solutions: ...
:type task_solutions: list
"""
self._current_code_f = code_f
self._original_code_f = code_f
self._tests_f = tests_f
self._task_descriptors = task_descriptors
self._task_solutions = task_solutions
@staticmethod
def _parse_unittest_results(results):
""" Parses the number of test failures from `unittest.main()` output.
Examples of the last line of the `unittest.main()` output.
.. code-block:: python
OK # <-- Signals all tests passed.
FAILED (failures=1) # <-- Signals 1 test failed.
"""
print results
last_line = results.split(CodeCompleter.NEW_LINE_DELIM)[-1].strip()
if last_line == CodeCompleter.UNITTEST_OK:
return 0
elif not last_line.startswith('FAILED'):
return None
else:
return sum([
int(count) for count in re.findall(CodeCompleter.UNITTEST_FAILURE_REGEX, last_line, re.M | re.I)
])
def _remove_compiled_code(self):
""" Removes compiled python files from the code file's location. """
path_to_code_file = CodeCompleter.PATH_DELIM.join(self._current_code_f.split(CodeCompleter.PATH_DELIM)[:-1])
for f in glob.glob(CodeCompleter.ALL_PYC_FILES % path_to_code_file):
os.remove(f)
def _run_tests(self):
""" Returns the number of tests that failed.
:return: The number of tests that failed.
:rtype: int
"""
process = subprocess.Popen(
CodeCompleter.RUN_PYTHON_SCRIPT_CMD % self._tests_f,
shell=True,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
return CodeCompleter._parse_unittest_results(stderr.strip())
def _attempt_to_solve_task(self, task_descriptor, task_solution):
""" Attempt to solve task with the given task descriptor using the given task solution.
:param task_descriptor: The `TaskDescriptor` object to use to solve the task.
:type task_descriptor: TaskDescriptor
:param task_solution:
:type task_solution:
"""
stub_function, input_list, output_list, function_name = task_solution
current_code_f_contents = open(self._current_code_f, READ_OPT).read()
current_code_f_contents += CodeCompleter.APPEND_FUNCTION_STUB_TEMPLATE % stub_function
current_code_f_contents = current_code_f_contents.replace(
task_descriptor.get_task_id(),
CodeCompleter.FUNCTION_STUB_TEMPLATE % (
CodeCompleter.SET_VARIABLE_TEMPLATE % (
CodeCompleter.COMMA_DELIM.join(output_list),
CodeCompleter.EQUALS_DELIM,
) if output_list else '',
function_name,
CodeCompleter.COMMA_DELIM.join(input_list),
),
)
open(self._current_code_f, WRITE_OPT).write(current_code_f_contents)
def complete(self):
""" Complete all tasks. """
print '%s\n\nStarting with Code... \n\n%s' % (LINE, open(self._current_code_f, READ_OPT).read().replace('\n', '\n\t'))
# Make a copy of the original code file.
shutil.copyfile(self._original_code_f, CodeCompleter.ORIGINAL_EXTENSION % self._original_code_f)
num_of_tests_failed = self._run_tests()
for task_descriptor, task_solutions in zip(self._task_descriptors, self._task_solutions):
# Make a copy of the current code file so that we can revert to it if the test verification fails.
shutil.copyfile(self._current_code_f, CodeCompleter.PREVIOUS_EXTENSION % self._current_code_f)
for task_solution in task_solutions:
self._attempt_to_solve_task(task_descriptor, task_solution)
print '%s\n\nTrying to Reduce %d Errors with...\n\n\t%s' % (
LINE,
num_of_tests_failed,
open(self._current_code_f, READ_OPT).read().replace('\n', '\n\t'),
)
raw_input()
self._remove_compiled_code()
updated_num_tests_failed = self._run_tests()
if updated_num_tests_failed is None:
updated_num_tests_failed = num_of_tests_failed
if updated_num_tests_failed < num_of_tests_failed:
num_of_tests_failed = updated_num_tests_failed
break
else:
shutil.copyfile(CodeCompleter.PREVIOUS_EXTENSION % self._current_code_f, self._current_code_f)
os.remove(CodeCompleter.PREVIOUS_EXTENSION % self._current_code_f)
print '%s\n\nFINISHED with %d Errors!!! %s\n\n%s' % (LINE, num_of_tests_failed, ':(' if num_of_tests_failed else ':)', LINE)
| 41.122699
| 132
| 0.650306
|
7510edb25dfae870dce6245b3400b4fd71cc6f96
| 3,425
|
py
|
Python
|
tests/ex/test_shell_out.py
|
trishume/VintageousPlus
|
1dd62435138234979fe5bb413e1731119b017daf
|
[
"MIT"
] | 6
|
2017-04-01T05:30:08.000Z
|
2017-04-05T14:17:40.000Z
|
tests/ex/test_shell_out.py
|
trishume/VintageousPlus
|
1dd62435138234979fe5bb413e1731119b017daf
|
[
"MIT"
] | 1
|
2017-04-04T06:47:13.000Z
|
2017-04-04T14:26:32.000Z
|
tests/ex/test_shell_out.py
|
trishume/VintageousPlus
|
1dd62435138234979fe5bb413e1731119b017daf
|
[
"MIT"
] | null | null | null |
import unittest
import os
import sublime
from VintageousPlus.tests import set_text
from VintageousPlus.tests import add_sel
from VintageousPlus.tests import get_sel
from VintageousPlus.tests import ViewTest
import VintageousPlus.ex.plat as plat
class Test_ex_shell_out_no_input(ViewTest):
@unittest.skipIf(os.name == 'nt', 'not supported on Windows')
def testCommandOutput(self):
test_string = 'Testing!'
test_command_line = '!echo "' + test_string + '"'
output_panel = self.view.window().get_output_panel('vi_out')
self.view.window().run_command('ex_shell_out', {'command_line': test_command_line})
actual = output_panel.substr(self.R(0, output_panel.size()))
expected = test_string + '\n'
self.assertEqual(expected, actual)
@unittest.skipIf(os.name != 'nt', 'Windows')
def testCommandOutput(self):
test_string = 'Testing!'
test_command_line = '!echo "' + test_string + '"'
output_panel = self.view.window().get_output_panel('vi_out')
self.view.window().run_command('ex_shell_out', {'command_line': test_command_line})
actual = output_panel.substr(self.R(0, output_panel.size()))
expected = '\\"{0}\\"\n'.format(test_string)
self.assertEqual(expected, actual)
def tearDown(self):
# XXX: Ugly hack to make sure that the output panels created in these
# tests don't hide the overall progress panel.
self.view.window().run_command('show_panel', {
'panel': 'output.vintageous.tests'
})
super().tearDown()
class Test_ex_shell_out_filter_through_shell(ViewTest):
@staticmethod
def getWordCountCommand():
if plat.HOST_PLATFORM == plat.WINDOWS:
return None
else:
return 'wc -w'
@unittest.skipIf(sublime.platform() == 'windows' or sublime.platform() == "osx", 'Windows or OSX')
def testSimpleFilterThroughShell(self):
word_count_command = self.__class__.getWordCountCommand()
# TODO implement test for Windows.
if not word_count_command:
return True
self.view.sel().clear()
self.write('''aaa
bbb
ccc''')
self.add_sel(self.R((0, 2), (0, 2)))
test_command_line = ".!" + word_count_command
self.view.run_command('ex_shell_out', {
'command_line': test_command_line
})
actual = self.view.substr(self.R(0, self.view.size()))
expected = '''1
bbb
ccc'''
self.assertEqual(expected, actual)
@unittest.skipIf(sublime.platform() == 'windows' or sublime.platform() == "osx", 'Windows or OSX')
def testMultipleFilterThroughShell(self):
word_count_command = self.__class__.getWordCountCommand()
# TODO implement test for Windows.
if not word_count_command:
return True
self.view.sel().clear()
self.write('''aaa
bbb
ccc
''')
# Two selections touching all numeric word lines.
self.add_sel(self.R((1, 0), (1, 0)))
test_command_line = ".!" + word_count_command
self.view.run_command('ex_shell_out', {
'command_line': test_command_line
})
actual = self.view.substr(self.R(0, self.view.size()))
expected = '''aaa
1
ccc
'''
self.assertEqual(expected, actual)
| 33.252427
| 102
| 0.625985
|
5357d2e914a16f2c92bf5055eb46dd94188b1347
| 6,267
|
py
|
Python
|
v3_baseline/.ipynb_checkpoints/model-checkpoint.py
|
egeersu/Multihop-GNN
|
1638eb2408030004112b8c19d4a2f68571eb4011
|
[
"MIT"
] | 2
|
2021-09-18T17:59:56.000Z
|
2022-03-03T12:26:26.000Z
|
v3_baseline/.ipynb_checkpoints/model-checkpoint.py
|
egeersu/Multihop-GNN
|
1638eb2408030004112b8c19d4a2f68571eb4011
|
[
"MIT"
] | null | null | null |
v3_baseline/.ipynb_checkpoints/model-checkpoint.py
|
egeersu/Multihop-GNN
|
1638eb2408030004112b8c19d4a2f68571eb4011
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from dgl.nn.pytorch import RelGraphConv
from hyperpara import *
import torch.nn.functional as F
import dgl
from functools import partial
import dgl
class RGCN(nn.Module):
def __init__(self, num_nodes, gnn_h_dim, out_dim, num_rels,
num_gcn_hidden_layers, dropout_rate,
use_self_loop=False, use_cuda=True):
super(RGCN, self).__init__()
self.num_nodes = num_nodes
self.gnn_h_dim = gnn_h_dim
self.out_dim = out_dim
self.num_rels = num_rels
self.num_gcn_hidden_layers = num_gcn_hidden_layers
self.dropout_rate = dropout_rate
self.use_self_loop = use_self_loop
self.use_cuda = use_cuda
self.bias = True
self.query_embed_dim = 1024 * 3
self.query_lstm_out_dim = 128
# create rgcn layers
self.build_model()
def build_model(self):
# 2layer bi-LSTM for query embedding: 3*1024 -> 256 -> 128
self.query_lstm_1 = nn.LSTM(self.query_embed_dim, 256, num_layers=1, bidirectional=True, batch_first=True)
self.query_lstm_2 = nn.LSTM(256 * 2, 128, num_layers=1, bidirectional=True, batch_first=True)
# 1-layer FFN 256 dim for ElMo embedding
self.elmo_fnn = nn.Linear(1024 * 3, 256)
# 2-layer query-aware mention encoding
self.q_h_encoding1 = nn.Linear(512, 1024)
self.q_h_encoding2 = nn.Linear(1024, 512)
self.dropout = nn.Dropout(self.dropout_rate)
# h2h 512 to 512
self.gcn_layers = nn.ModuleList()
self.shared_gcn = RGCNLayer(self.gnn_h_dim, self.gnn_h_dim, self.num_rels)
# concatenation h_G with q: 512+256 = 768
# 3 layers output FF [256, 128, 1]
self.output_fnn1 = nn.Linear(512 + 256, 256)
self.output_fnn2 = nn.Linear(256, 128)
self.output_fnn3 = nn.Linear(128, 1)
def forward(self, h, b_norm_adj, query, node_num, answer_mask):
batch_size = node_num.shape[0]
# initialize 500 nodes index for creating mask (mask out padding nodes)
if args.use_gpu:
self.node_mask = torch.arange(max_nodes,dtype=torch.int).unsqueeze(0).cuda()
else:
self.node_mask = torch.arange(max_nodes,dtype=torch.int).unsqueeze(0)
# Make node_mask to mask out the padding nodes
node_num = node_num.unsqueeze(-1) # (bs, 1)
self.node_mask = self.node_mask.repeat((batch_size, 1)) < node_num
# flat the embed query
query = query.contiguous().view(query.shape[0], query.shape[1], 3 * 1024)
# Query embedidng, 2 layer LSTM
lstm_q_outputs, (hn, cn) = self.query_lstm_1(query)
lstm_q_outputs, (hn, cn) = self.query_lstm_2(lstm_q_outputs)
# take output state as encoding state
query_compress = torch.cat((hn[0], hn[-1]), dim=-1)
query_compress = self.dropout(query_compress)
# 1-layer FFN for nodes ElMo embedding (dimension reduction from 3072 -> 256)
h = self.elmo_fnn(h)
h = F.tanh(h)
h = self.dropout(h)
# prepare for concatenation of q and node embeddings
query_compress = query_compress.unsqueeze(1).expand(h.shape)
# Concatenation bewtween q and nodes (256+256=512)
h = torch.cat((h, query_compress), dim=-1) # bs, 500, 512
h = h * self.node_mask.unsqueeze(-1)
# 2 layer FF [1024 512[]
h = self.q_h_encoding1(h)
h = F.tanh(h)
h = self.q_h_encoding2(h)
h = F.tanh(h)
for i in range(self.num_gcn_hidden_layers):
h = self.shared_gcn(h, b_norm_adj, self.node_mask)
h = self.dropout(h)
# Concatenation with query again # bs, 500, 768
h = torch.cat((h, query_compress), dim=-1) * self.node_mask.unsqueeze(-1)
# Graph-level Attention Layer ()
# 3-layer output layers
h = self.output_fnn1(h) # bs, 500, 256
h = F.tanh(h)
h = self.output_fnn2(h) # bs, 500, 128
h = F.tanh(h)
h = self.output_fnn3(h) # bs, 500, 1
# Apply answer mask: mask out the information that not belong to any classes
h = h.view(batch_size, 1, max_nodes).masked_fill(answer_mask, float("-inf"))
# Max reduce over 500 nodes, see Eqt 1. for detail
out = h.max(dim = -1).values
out = F.log_softmax(out, dim=-1)
# print(out)
return out
class RGCNLayer(nn.Module):
def __init__(self, in_feat, out_feat, num_rels, bias=None,
activation=None, is_input_layer=False):
super(RGCNLayer, self).__init__()
self.in_feat = in_feat
self.out_feat = out_feat
self.num_rels = num_rels
self.bias = bias
self.activation = activation
self.is_input_layer = is_input_layer
self.rel_weights = nn.ModuleList()
for _ in range(self.num_rels):
rel_W = nn.Linear(self.in_feat, self.out_feat)
self.rel_weights.append(rel_W)
self.weight_hid = nn.Linear(self.in_feat, self.out_feat)
self.weight_gate = nn.Linear(2 * self.out_feat, self.out_feat)
def forward(self, h, norm_adj, node_mask):
# 1. message aggregation
# copy along relational dimension
h_i = torch.stack([W_r(h) for W_r in self.rel_weights], dim=1) # (bs, rels, num_nodes, hdim)
# Apply node mask: node_mask(bs, 500), h_i: (bs, num_rels, 500, dim)
h_i = h_i * node_mask.unsqueeze(-1).unsqueeze(1)
msg = torch.matmul(norm_adj, h_i).sum(1) # bs, num_nodes, out_dim
update = msg + self.weight_hid(h) * node_mask.unsqueeze(-1) # bs, num_nodes, out_dim
# Gate mechanism
gate = self.weight_gate(torch.cat((update, h), -1))
gate = F.sigmoid(gate)
gate = gate * node_mask.unsqueeze(-1)
# 2. Update
h = gate * F.tanh(update) + (1 - gate) * h
return h
| 35.607955
| 114
| 0.588001
|
ca937b749aebd56405abf312b06faea782dc646e
| 247
|
py
|
Python
|
tools/pbox.py
|
tlk/nintendo-box
|
721eab32d078329302c2a3be3bcfa1d5d6d203f6
|
[
"MIT"
] | null | null | null |
tools/pbox.py
|
tlk/nintendo-box
|
721eab32d078329302c2a3be3bcfa1d5d6d203f6
|
[
"MIT"
] | null | null | null |
tools/pbox.py
|
tlk/nintendo-box
|
721eab32d078329302c2a3be3bcfa1d5d6d203f6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from game import ngame
print('digraph {')
print('graph [pad="2", nodesep="5", ranksep="10"];')
for j in range(32):
for k in range(32):
if ((ngame.pbox[j]>>k)&1):
print(f'c{j} -> d{k};')
print('}')
| 19
| 52
| 0.538462
|
a97330b478151a7dc9838c10fa445db1c57746aa
| 355
|
py
|
Python
|
wireless/sims/instance-get-example-1/instance-get-example-1.6.x.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | 3
|
2020-05-05T10:01:02.000Z
|
2021-02-06T14:23:13.000Z
|
wireless/sims/instance-get-example-1/instance-get-example-1.6.x.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | null | null | null |
wireless/sims/instance-get-example-1/instance-get-example-1.6.x.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | 1
|
2019-10-02T14:36:36.000Z
|
2019-10-02T14:36:36.000Z
|
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# required for all twilio access tokens
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
client = Client(account_sid, auth_token)
sim = client.wireless.sims('DEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA').fetch()
print(sim)
| 27.307692
| 72
| 0.814085
|
bdab157cb101116d0f14c00e3af8d1bbe500f7fb
| 5,348
|
py
|
Python
|
tests/aggregate/test_as_dict.py
|
proteanhq/protean
|
2006832265435cad8d4f9b86d1a789d8828d2707
|
[
"BSD-3-Clause"
] | 6
|
2018-09-26T04:54:09.000Z
|
2022-03-30T01:01:45.000Z
|
tests/aggregate/test_as_dict.py
|
proteanhq/protean
|
2006832265435cad8d4f9b86d1a789d8828d2707
|
[
"BSD-3-Clause"
] | 261
|
2018-09-20T09:53:33.000Z
|
2022-03-08T17:43:04.000Z
|
tests/aggregate/test_as_dict.py
|
proteanhq/protean
|
2006832265435cad8d4f9b86d1a789d8828d2707
|
[
"BSD-3-Clause"
] | 6
|
2018-07-22T07:09:15.000Z
|
2021-02-02T05:17:23.000Z
|
from datetime import datetime
from protean import BaseAggregate, BaseEntity, BaseValueObject
from protean.fields import (
DateTime,
HasMany,
HasOne,
Integer,
Reference,
String,
Text,
ValueObject,
)
class TestAggregateWithNoEnclosedEntitiesOrValueObjects:
def test_basic_as_dict(self):
class Post(BaseAggregate):
title = String(required=True, max_length=1000)
slug = String(required=True, max_length=1024)
content = Text(required=True)
post = Post(title="Test Post", slug="test-post", content="Do Re Mi Fa")
assert post.to_dict() == {
"id": post.id,
"title": "Test Post",
"slug": "test-post",
"content": "Do Re Mi Fa",
}
def test_as_dict_with_date_fields(self):
class Post(BaseAggregate):
title = String(required=True, max_length=1000)
slug = String(required=True, max_length=1024)
content = Text(required=True)
posted_at = DateTime(required=True, default=datetime.utcnow)
current_time = datetime.utcnow()
post = Post(
title="Test Post",
slug="test-post",
content="Do Re Mi Fa",
posted_at=current_time,
)
assert post.to_dict() == {
"id": post.id,
"title": "Test Post",
"slug": "test-post",
"content": "Do Re Mi Fa",
"posted_at": str(current_time),
}
def test_as_dict_with_aggregate_that_has_many_entities(self, test_domain):
class Comment(BaseEntity):
content = Text(required=True)
post = Reference("Post")
class Meta:
aggregate_cls = "Post"
class Post(BaseAggregate):
title = String(required=True, max_length=1000)
slug = String(required=True, max_length=1024)
content = Text(required=True)
comments = HasMany(Comment)
test_domain.register(Post)
test_domain.register(Comment)
post = Post(title="Test Post", slug="test-post", content="Do Re Mi Fa")
comment1 = Comment(content="first comment")
comment2 = Comment(content="second comment")
post.add_comments([comment1, comment2])
assert post.to_dict() == {
"id": post.id,
"title": "Test Post",
"slug": "test-post",
"content": "Do Re Mi Fa",
"comments": [
{"id": comment1.id, "content": "first comment"},
{"id": comment2.id, "content": "second comment"},
],
}
def test_as_dict_with_aggregate_that_has_many_entities_with_reference(
self, test_domain
):
class Comment(BaseEntity):
content = Text(required=True)
post = Reference("Post")
class Meta:
aggregate_cls = "Post"
class Post(BaseAggregate):
title = String(required=True, max_length=1000)
slug = String(required=True, max_length=1024)
content = Text(required=True)
comments = HasMany(Comment)
test_domain.register(Post)
test_domain.register(Comment)
post = Post(title="Test Post", slug="test-post", content="Do Re Mi Fa")
comment1 = Comment(content="first comment", post=post)
comment2 = Comment(content="second comment", post=post)
post.add_comments([comment1, comment2])
assert post.to_dict() == {
"id": post.id,
"title": "Test Post",
"slug": "test-post",
"content": "Do Re Mi Fa",
"comments": [
{"id": comment1.id, "content": "first comment"},
{"id": comment2.id, "content": "second comment"},
],
}
def test_as_dict_with_aggregate_that_has_one_entity(self, test_domain):
class Post(BaseAggregate):
title = String(required=True, max_length=1000)
slug = String(required=True, max_length=1024)
content = Text(required=True)
meta = HasOne("PostMeta")
class PostMeta(BaseEntity):
likes = Integer(default=0)
class Meta:
aggregate_cls = Post
test_domain.register(Post)
test_domain.register(PostMeta)
meta = PostMeta(likes=27)
post = Post(
title="Test Post", slug="test-post", content="Do Re Mi Fa", meta=meta
)
assert post.to_dict() == {
"id": post.id,
"title": "Test Post",
"slug": "test-post",
"content": "Do Re Mi Fa",
"meta": {"id": meta.id, "likes": 27},
}
def test_as_dict_with_aggregate_that_has_a_value_object(self, test_domain):
class Email(BaseValueObject):
address = String(max_length=254, required=True)
class User(BaseAggregate):
email = ValueObject(Email, required=True)
password = String(required=True, max_length=255)
user = User(email=Email(address="john.doe@gmail.com"), password="secret")
assert user.to_dict() == {
"id": user.id,
"email": {"address": "john.doe@gmail.com",},
"password": "secret",
}
| 31.458824
| 81
| 0.55647
|
c60ce6382422ace6155a357750ca1b5069a32976
| 70
|
py
|
Python
|
diving_in_python/week_1/mypackage/utils.py
|
assassinen/coursera_mfti_python
|
eee7b3c55256f391c1be32924fa1ad3364b307f2
|
[
"Apache-2.0"
] | null | null | null |
diving_in_python/week_1/mypackage/utils.py
|
assassinen/coursera_mfti_python
|
eee7b3c55256f391c1be32924fa1ad3364b307f2
|
[
"Apache-2.0"
] | null | null | null |
diving_in_python/week_1/mypackage/utils.py
|
assassinen/coursera_mfti_python
|
eee7b3c55256f391c1be32924fa1ad3364b307f2
|
[
"Apache-2.0"
] | null | null | null |
def multiplay(a, b):
return a * b
def sum(a, b):
return a + b
| 14
| 20
| 0.542857
|
71c08bef134cda57430c5509009f248dbc2ae007
| 217
|
py
|
Python
|
tests/drf/mocks.py
|
taxidriver77/django-angular
|
f8d9dc58e05c99b6abe694e6be4b4de769664657
|
[
"0BSD"
] | 8
|
2016-01-11T11:01:29.000Z
|
2019-05-26T18:00:06.000Z
|
tests/drf/mocks.py
|
sv1jsb/django-angular
|
d527e812e02227c0b7151b9211c8248e6298c4df
|
[
"0BSD"
] | 3
|
2020-02-11T23:41:57.000Z
|
2021-06-10T19:13:54.000Z
|
tests/drf/mocks.py
|
taxidriver77/django-angular
|
f8d9dc58e05c99b6abe694e6be4b4de769664657
|
[
"0BSD"
] | 8
|
2016-12-19T00:02:26.000Z
|
2022-01-11T13:52:43.000Z
|
# -*- coding: utf-8 -*-
mock_user1 = {
"email": "test@test.com",
"username": "test",
"password": "test",
}
mock_user2 = {
"email": "test2@test.com",
"username": "test2",
"password": "test2",
}
| 18.083333
| 30
| 0.520737
|
af76e407253faaa8bd8f764bb474e358c162a7ba
| 1,477
|
py
|
Python
|
setup.py
|
victoriamorris/nielsenTools
|
51981f0becc3ba93213bd116aac580b51dbdabe6
|
[
"MIT"
] | null | null | null |
setup.py
|
victoriamorris/nielsenTools
|
51981f0becc3ba93213bd116aac580b51dbdabe6
|
[
"MIT"
] | null | null | null |
setup.py
|
victoriamorris/nielsenTools
|
51981f0becc3ba93213bd116aac580b51dbdabe6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""setup.py file for nielsenTools."""
# Import required modules
from distutils.core import setup
__author__ = 'Victoria Morris'
__license__ = 'MIT License'
__version__ = '1.0.0'
__status__ = '4 - Beta Development'
# Version
version = '1.0.0'
# Long description
long_description = ''
# List requirements.
# All other requirements should all be contained in the standard library
requirements = [
'regex',
'pyperclip',
'sqlite3',
'csv',
]
# Setup
setup(
console=[
'bin/nielsen2marc_products.py',
'bin/nielsen2marc_organisations.py',
'bin/nielsen_isbn_analysis.py',
'bin/nielsen2marc_clusters.py',
],
zipfile=None,
name='nielsenTools',
version=version,
author='Victoria Morris',
url='',
license='MIT',
description='Tools for working with files from Nielsen.',
long_description=long_description,
packages=['nielsenTools'],
scripts=[
'bin/nielsen2marc_products.py',
'bin/nielsen2marc_organisations.py',
'bin/nielsen_isbn_analysis.py',
'bin/nielsen2marc_clusters.py',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python'
],
requires=requirements,
)
| 24.213115
| 73
| 0.618145
|
9a70054536516987cad963a4d144b776699f5747
| 1,845
|
py
|
Python
|
tf2onnx/constants.py
|
garymm/tensorflow-onnx
|
a8f78ac7903493dee579304b7b1717aa9ec9706f
|
[
"Apache-2.0"
] | null | null | null |
tf2onnx/constants.py
|
garymm/tensorflow-onnx
|
a8f78ac7903493dee579304b7b1717aa9ec9706f
|
[
"Apache-2.0"
] | null | null | null |
tf2onnx/constants.py
|
garymm/tensorflow-onnx
|
a8f78ac7903493dee579304b7b1717aa9ec9706f
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
"""
common constants
"""
from onnx import helper
TF2ONNX_PACKAGE_NAME = __name__.split('.')[0]
# Built-in supported domains
ONNX_DOMAIN = ""
AI_ONNX_ML_DOMAIN = "ai.onnx.ml"
MICROSOFT_DOMAIN = "com.microsoft"
CONTRIB_OPS_DOMAIN = "ai.onnx.contrib"
# Default opset version for onnx domain.
# The current update policy is that the default should be set to
# the latest released version as of 18 months ago.
# Opset 13 was released in ONNX v1.8.0 (Nov, 2020).
PREFERRED_OPSET = 13
# Default opset for custom ops
TENSORFLOW_OPSET = helper.make_opsetid("ai.onnx.converters.tensorflow", 1)
# Built-in supported opset
AI_ONNX_ML_OPSET = helper.make_opsetid(AI_ONNX_ML_DOMAIN, 2)
# Target for the generated onnx graph. It possible targets:
# onnx-1.1 = onnx at v1.1 (winml in rs4 is based on this)
# caffe2 = include some workarounds for caffe2 and winml
TARGET_RS4 = "rs4"
TARGET_RS5 = "rs5"
TARGET_RS6 = "rs6"
TARGET_CAFFE2 = "caffe2"
TARGET_TENSORRT = "tensorrt"
TARGET_CHANNELS_LAST = "nhwc"
TARGET_CHANNELS_FIRST = "nchw"
POSSIBLE_TARGETS = [TARGET_RS4, TARGET_RS5, TARGET_RS6, TARGET_CAFFE2, TARGET_TENSORRT, TARGET_CHANNELS_LAST]
DEFAULT_TARGET = []
NCHW_TO_NHWC = [0, 2, 3, 1]
NHWC_TO_NCHW = [0, 3, 1, 2]
NDHWC_TO_NCDHW = [0, 4, 1, 2, 3]
NCDHW_TO_NDHWC = [0, 2, 3, 4, 1]
HWCN_TO_NCHW = [3, 2, 0, 1]
NCHW_TO_HWCN = [2, 3, 1, 0]
# Environment variables
ENV_TF2ONNX_DEBUG_MODE = "TF2ONNX_DEBUG_MODE"
ENV_TF2ONNX_CATCH_ERRORS = "TF2ONNX_CATCH_ERRORS"
# Mapping opset to IR version.
# Note: opset 7 and opset 8 came out with IR3 but we need IR4 because of PlaceholderWithDefault
# Refer from https://github.com/onnx/onnx/blob/main/docs/Versioning.md#released-versions
OPSET_TO_IR_VERSION = {
1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3, 7: 4, 8: 4, 9: 4, 10: 5, 11: 6, 12: 7, 13: 7, 14: 7, 15: 8, 16: 8
}
| 30.75
| 109
| 0.731707
|
469d4b0f5da498128deff58c466200e0aa41d9a7
| 2,992
|
py
|
Python
|
tests/test_read_user_dict.py
|
rgalbo/cookiecutter
|
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
|
[
"BSD-3-Clause"
] | 7
|
2019-11-03T03:30:43.000Z
|
2021-01-03T04:04:36.000Z
|
tests/test_read_user_dict.py
|
rgalbo/cookiecutter
|
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
|
[
"BSD-3-Clause"
] | 1
|
2019-09-05T09:50:32.000Z
|
2019-09-05T09:59:01.000Z
|
tests/test_read_user_dict.py
|
rgalbo/cookiecutter
|
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
|
[
"BSD-3-Clause"
] | 2
|
2017-10-20T13:12:25.000Z
|
2022-02-14T05:30:32.000Z
|
# -*- coding: utf-8 -*-
"""
test_read_user_dict
-------------------
"""
from __future__ import unicode_literals
import click
import pytest
from cookiecutter.prompt import (
process_json,
read_user_dict,
)
def test_process_json_invalid_json():
with pytest.raises(click.UsageError) as exc_info:
process_json('nope]')
assert str(exc_info.value) == 'Unable to decode to JSON.'
def test_process_json_non_dict():
with pytest.raises(click.UsageError) as exc_info:
process_json('[1, 2]')
assert str(exc_info.value) == 'Requires JSON dict.'
def test_process_json_valid_json():
user_value = '{"name": "foobar", "bla": ["a", 1, "b", false]}'
assert process_json(user_value) == {
'name': 'foobar',
'bla': ['a', 1, 'b', False],
}
def test_process_json_deep_dict():
user_value = '''{
"key": "value",
"integer_key": 37,
"dict_key": {
"deep_key": "deep_value",
"deep_integer": 42,
"deep_list": [
"deep value 1",
"deep value 2",
"deep value 3"
]
},
"list_key": [
"value 1",
"value 2",
"value 3"
]
}'''
assert process_json(user_value) == {
"key": "value",
"integer_key": 37,
"dict_key": {
"deep_key": "deep_value",
"deep_integer": 42,
"deep_list": [
"deep value 1",
"deep value 2",
"deep value 3",
]
},
"list_key": [
"value 1",
"value 2",
"value 3",
]
}
def test_should_raise_type_error(mocker):
prompt = mocker.patch('click.prompt')
with pytest.raises(TypeError):
read_user_dict('name', 'russell')
assert not prompt.called
def test_should_call_prompt_with_process_json(mocker):
"""Test to make sure that process_jon is actually being used
to generate a processor for the user input."""
mock_prompt = mocker.patch(
'cookiecutter.prompt.click.prompt',
autospec=True,
)
read_user_dict('name', {'project_slug': 'pytest-plugin'})
assert mock_prompt.call_args == mocker.call(
'name',
type=click.STRING,
default='default',
value_proc=process_json,
)
def test_read_user_dict_default_value(mocker):
"""Test to make sure that read_user_dict returns the default value for a
dict variable rather than the display value.
"""
mock_prompt = mocker.patch(
'cookiecutter.prompt.click.prompt',
autospec=True,
return_value='default',
)
val = read_user_dict('name', {'project_slug': 'pytest-plugin'})
assert mock_prompt.call_args == mocker.call(
'name',
type=click.STRING,
default='default',
value_proc=process_json,
)
assert val == {'project_slug': 'pytest-plugin'}
| 23.015385
| 76
| 0.561497
|
053e1bd84b802d9bcb66c57bbed3b77c1ec79d38
| 3,808
|
py
|
Python
|
Psychoemotional_simulator/old/2_im_em_ok.py
|
Izabella13/Trainer_for_facial_muscles
|
b3ce8e413605d766bd0141da18ffb1ea6a7e49ae
|
[
"Apache-2.0"
] | null | null | null |
Psychoemotional_simulator/old/2_im_em_ok.py
|
Izabella13/Trainer_for_facial_muscles
|
b3ce8e413605d766bd0141da18ffb1ea6a7e49ae
|
[
"Apache-2.0"
] | null | null | null |
Psychoemotional_simulator/old/2_im_em_ok.py
|
Izabella13/Trainer_for_facial_muscles
|
b3ce8e413605d766bd0141da18ffb1ea6a7e49ae
|
[
"Apache-2.0"
] | null | null | null |
# Импорт нужных библиотек
from time import sleep
import numpy as np
import csv
import cv2
import os
import dlib
import glob
import math
dat_path = '../windows/Data/'
dis_path = '../discript/'
dlib_face = 'dlib_face_recognition_resnet_model_v1.dat'
shape = 'shape_predictor_68_face_landmarks.dat'
# Расположение изображений
faces_folder_path = 'dataset/'
# Для каждой эмоции, устанавливаем свой id
face_id = input('\n введите номер эмоции и нажмите enter ==> ')
Em_num = 'Em_' + face_id
# Функция, рассчитывающая два вида дискрипторов лица
def get_landmarks(image):
# Функция выделяет лицо в прямоугольник
detections = detector(image, 1)
# Цикл по всем найденым на изображении лицам
for k, d in enumerate(detections):
# Рисуем лицевые ориентиры с помощью класса предиктора
# (Возврат координат точек на лице)
shape = predictor(image, d)
# Получаем дискрипторы лица
face_descriptor = facerec.compute_face_descriptor(image, shape)
# Список для размещения координат точек лица по оси X
xlist = []
# Список для размещения координат точек лица по оси Y
ylist = []
# Сохраняем координаты X и Y в двух списках
for i in range(0, 68):
# Список для X
xlist.append(float(shape.part(i).x))
# Список для Y
ylist.append(float(shape.part(i).y))
# Берем 30 точку на носу как центральную на лице
meannp = np.asarray((shape.part(30).x, shape.part(30).y))
# Создаем список для записи дискрипторов1
landmarks_vectorised = []
# Расчитываем дискрипторы 1
for w, z in zip(xlist, ylist):
# Создаем масив из координат векторов расстояний
coornp = np.asarray((z, w))
# Рассчитываем расстояние от центральной точки до данной
dist = np.linalg.norm(coornp - meannp)
# Добавляем в список дискрипторы1
landmarks_vectorised.append(dist)
# Масштабируем параметры изображения
landmarks_vectorised[:] = landmarks_vectorised[:] / landmarks_vectorised[27]
# Добавляем значения переменных, если нет выделенных лиц на экране
if len(detections) == 0:
xlist = 0
ylist = 0
meannp = np.asarray((0, 0))
landmarks_vectorised = 0
face_descriptor = 0
# Возвращаем дискрипторы1, количество выделенных лиц
return xlist, ylist, meannp, landmarks_vectorised, face_descriptor
nn = 0
# Извлекаем 128 дискрипторов
facerec = dlib.face_recognition_model_v1(dat_path + dlib_face)
# Создаем объект который может выделять лица в прямоугольник
detector = dlib.get_frontal_face_detector()
# Загрузка данных для извлечения 68 точек лица
predictor = dlib.shape_predictor(dat_path + shape)
# Список для записи 68 дискрипторов
im_par = []
# Список для записи 128 дискрипторов
im_par1 = []
# Просмотр всего каталога,
# составление списка файлов в этом каталоге и начало его прохождения
for f in glob.glob(os.path.join(faces_folder_path + Em_num, "*.jpg")):
img = dlib.load_rgb_image(f)
l_mx, l_my, mn, l_mv, f_d = get_landmarks(img)
if l_mx != 0:
im_par.append(l_mv)
im_par1.append(f_d)
nn += 1
# Открываем текстовый файл 68 дискрипторов на запись
f = open(dis_path + Em_num + '.txt', 'w', newline='')
# Создаем объект который работает с csv файлами
writer = csv.writer(f, delimiter=',')
for it in im_par:
# Запись каждого из 68 дискрипторов
writer.writerow(it)
f.close()
# Открываем текстовый файл 128 дискрипторов на запись
f = open(dis_path + Em_num + '_128.txt', 'w', newline='')
# Создаем объект который работает с csv файлами
writer = csv.writer(f, delimiter=',')
for it in im_par1:
# Запись каждого из 128 дискрипторов
writer.writerow(it)
f.close()
print(nn)
| 36.615385
| 84
| 0.685924
|
3ac2c7b45ee0bc1300013b0b5649c65b077d2f6c
| 465
|
py
|
Python
|
data/scripts/templates/object/tangible/wearables/wookiee/shared_wke_gloves_s01.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/tangible/wearables/wookiee/shared_wke_gloves_s01.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/tangible/wearables/wookiee/shared_wke_gloves_s01.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/wookiee/shared_wke_gloves_s01.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","wke_gloves_s01")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 27.352941
| 80
| 0.737634
|
92b5c1e08b2274e7861f30b1dbe5ba3ccfc3875f
| 762
|
py
|
Python
|
openprocurement/auction/esco/tests/unit/test_bidders.py
|
ProzorroUKR/openprocurement.auction.esco
|
16a127ac7fc47cacaaf5f2eb708ea8b273e57e56
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/auction/esco/tests/unit/test_bidders.py
|
ProzorroUKR/openprocurement.auction.esco
|
16a127ac7fc47cacaaf5f2eb708ea8b273e57e56
|
[
"Apache-2.0"
] | 3
|
2017-10-26T12:42:01.000Z
|
2017-11-06T10:41:49.000Z
|
openprocurement/auction/esco/tests/unit/test_bidders.py
|
ProzorroUKR/openprocurement.auction.esco
|
16a127ac7fc47cacaaf5f2eb708ea8b273e57e56
|
[
"Apache-2.0"
] | 4
|
2017-07-10T12:03:38.000Z
|
2017-09-08T10:19:46.000Z
|
# -*- coding: utf-8 -*-
from openprocurement.auction.esco.tests.unit.constants import AUCTIONS
def test_set_auction_and_participation_urls(universal_auction, mocker):
mock_prepare_auction_and_participation_urls = mocker.MagicMock()
base = 'openprocurement.auction.worker.auctions.{}.prepare_auction_and_participation_urls'
mocker.patch(base.format('simple'),
mock_prepare_auction_and_participation_urls)
mocker.patch(base.format('multilot'),
mock_prepare_auction_and_participation_urls)
universal_auction.set_auction_and_participation_urls()
mock_prepare_auction_and_participation_urls.assert_called_once_with(universal_auction)
assert mock_prepare_auction_and_participation_urls.call_count == 1
| 44.823529
| 94
| 0.799213
|
d1dd15a7d63a7e7a1cb587b686515734eb8d89b6
| 7,731
|
py
|
Python
|
interface.py
|
ironman5366/W.I.L.L-Telegram
|
3ca8f420ccef4ffd31c44f16a86df357f7f9866c
|
[
"MIT"
] | null | null | null |
interface.py
|
ironman5366/W.I.L.L-Telegram
|
3ca8f420ccef4ffd31c44f16a86df357f7f9866c
|
[
"MIT"
] | null | null | null |
interface.py
|
ironman5366/W.I.L.L-Telegram
|
3ca8f420ccef4ffd31c44f16a86df357f7f9866c
|
[
"MIT"
] | null | null | null |
#Builtin imports
import logging
#External imports
import dataset
from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove, InlineKeyboardButton, InlineKeyboardMarkup)
from telegram.ext import (
Updater, CommandHandler, MessageHandler, Filters, Job, CallbackQueryHandler, RegexHandler, ConversationHandler, Handler
)
#Internal imports
import parser
import plugin_handler
log = logging.getLogger()
events = {}
nlp = None
help_str = '''
Commands:
/help: Print this string
/start: Start the bot and create a userdata table with your username
/settings: Change user settings
If not given a telegram command, W.I.L.L will try to interpret your command as a personal assistant
'''
#TODO: add a /settings function!
db = dataset.connect('sqlite:///will.db')
def help(bot, update):
'''Print help message'''
update.message.reply_text(help_str)
def send_message(bot, chat_id, message_text):
'''Send a text message'''
bot.sendMessage(chat_id, message_text)
def check_plugin(plugins, event):
'''Check which plugin the user wants to run'''
#Use the in place button conversation handler
keyboard = []
def add_to_keyboard(plugin):
keyboard.append(
InlineKeyboardButton(plugin["name"], callback_data=
{"type": "plugin_selection", "event": event, "plugin_function": plugin["function"], "name": plugin["name"]})
)
#Add all the possible plugins to an inline keyboard
map(add_to_keyboard, plugins)
plugin_choice_inline = InlineKeyboardMarkup(keyboard)
event["bot"].sendMessage(event["update"].message.chat.id,text="Please select a plugin to run", reply_markup=plugin_choice_inline)
def alarm(bot, job):
"""Function to send the alarm message"""
alarm_text = job.context["alarm_text"]
chat_id = job.context["chat_id"]
keyboard = [[InlineKeyboardButton("Snooze", callback_data={"type":"snooze_1","job":job.context['job'],'snooze':True}),
InlineKeyboardButton("Dismiss", callback_data={"type":"snooze_1","job":job.context['job'],'snooze':False})]]
snooze_inline = InlineKeyboardMarkup(keyboard)
bot.sendMessage(chat_id, text=alarm_text, reply_markup=snooze_inline)
def set_job(bot, update, args, job_queue, chat_data, response_text, alarm_text):
'''Adds a job to the job queue'''
chat_id = update.message.chat_id
#Time for the timer in seconds
due = int(args[0])
#Put relevant alarm data in context and set the alarm
chat_data["chat_id"] = chat_id
chat_data["alarm_text"] = alarm_text
job = Job(alarm, due, repeat=False, context=chat_data)
chat_data['job'] = job
job_queue.put(job)
update.message.reply_text(response_text)
def button(bot, update, job_queue, chat_data):
'''Button response'''
query = update.callback_query
data = query.data
data_type = data["type"]
if data_type == "snooze_1":
snooze = data["snooze"]
if snooze:
keyboard = [[InlineKeyboardButton("5 minutes", callback_data={"type": "snooze_2", "job": data['job'],
'length': 300}),
InlineKeyboardButton("15 minutes", callback_data={"type": "snooze_2", "job": data['job'],
'length': 900}),
InlineKeyboardButton("1 hour", callback_data={"type": "snooze_2", "job": data['job'],
'length': 3600}),
InlineKeyboardButton("6 hours", callback_data={"type": "snooze_2", "job": data['job'],
'length': 21600}),
InlineKeyboardButton("1 day", callback_data={"type": "snooze_2", "job": data['job'],
'length': 86400})
]]
snooze_inline = InlineKeyboardMarkup(keyboard)
bot.sendMessage(update.messagechat_id, text="How long would you like to snooze?", reply_markup=snooze_inline)
else:
update.message.reply_text("Dismissed.")
elif data_type == "snooze_2":
due = data["length"]
job = Job(alarm, due, repeat=False, context=chat_data)
chat_data["job"] = job
job_queue.put(job)
update.message.reply_text("Snoozed")
elif data_type == "plugin_selection":
event_data = data['event']
plugin_function = data["function"]
log.info("Calling plugin {0}".format(
data["plugin_name"]
))
#Call the plugin
plugin_handler.subscriptions().call_plugin(plugin_function,event_data)
def start(bot,update):
'''First run commands'''
log.info("Setting up bot")
db = dataset.connect('sqlite:///will.db')
userdata = db['userdata']
admin_username = "willbeddow"
log.info("Admin username is {0}".format(admin_username))
username = update.message.from_user.username
first_name = update.message.from_user.first_name
chat_id = update.message.chat_id
#Determine whether the user is the admin user
user_is_admin = username == admin_username
log.info("User data is as follows: username is {0}, first_name is {1}, user_is_admin is {2}, chat_id is {3}".format(
username,first_name,user_is_admin, chat_id
))
userdata.upsert(dict(
first_name=update.message.from_user.first_name,
username=update.message.from_user.username,
admin=user_is_admin,
default_plugin="search"
), ['username'])
update.message.reply_text(
"In order to use the search functions, you need a wolframalpha api key. Please paste one in:"
)
def accept_wolfram_key(bot, update):
'''Store wolfram key given in setup'''
#If I want to add more steps to setup, add them here
username = update.message.from_user.username
db = dataset.connect('sqlite:///will.db')
userdata = db['userdata']
data = dict(username=username, wolfram_key=update.message.text)
userdata.upsert(data, ['username'])
log.info("In accept wolfram, table is {0}".format(
userdata
))
def error(bot, update, error):
'''Log an error'''
log.warn('Update "%s" caused error "%s"' % (update, error))
def cancel(bot, update):
'''Cancel startup conversation'''
update.message.reply_text("Cancelled.")
def initialize(bot_token):
'''Start the bot'''
updater = Updater(bot_token)
# Get the dispatcher to register handlers
dp = updater.dispatcher
#Use regex to match strings of text that look like wolfram keys (long alphanumeric strings)
# on different commands - answer in Telegram
dp.add_handler(RegexHandler('[\s\S]* [\s\S]*', parser.parse,pass_job_queue=True, pass_chat_data=True))
#dp.add_handler(MessageHandler(Filters.text, parser.parse, pass_job_queue=True, pass_chat_data=True))
dp.add_handler(RegexHandler('^[A-Z0-9]{6}-[A-Z0-9]{10}$', accept_wolfram_key))
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
dp.add_handler(CallbackQueryHandler(button, pass_chat_data=True, pass_job_queue=True))
#dp.add_handler(MessageHandler(
# Filters.text, parser.parse,pass_job_queue=True,pass_chat_data=True
#))
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Run the bot until the you presses Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
| 41.342246
| 133
| 0.646747
|
b52232d2a8e490802bc2f84bd05f32a46648d40b
| 11,950
|
py
|
Python
|
catkin_ws/devel/.private/baxter_interface/lib/python2.7/dist-packages/baxter_interface/cfg/PositionFFJointTrajectoryActionServerConfig.py
|
roop-pal/robotic-folding
|
a0e062ac6d23cd07fe10e3f45abc4ba50e533141
|
[
"RSA-MD"
] | null | null | null |
catkin_ws/devel/.private/baxter_interface/lib/python2.7/dist-packages/baxter_interface/cfg/PositionFFJointTrajectoryActionServerConfig.py
|
roop-pal/robotic-folding
|
a0e062ac6d23cd07fe10e3f45abc4ba50e533141
|
[
"RSA-MD"
] | null | null | null |
catkin_ws/devel/.private/baxter_interface/lib/python2.7/dist-packages/baxter_interface/cfg/PositionFFJointTrajectoryActionServerConfig.py
|
roop-pal/robotic-folding
|
a0e062ac6d23cd07fe10e3f45abc4ba50e533141
|
[
"RSA-MD"
] | null | null | null |
## *********************************************************
##
## File autogenerated for the baxter_interface package
## by the dynamic_reconfigure package.
## Please do not edit.
##
## ********************************************************/
from dynamic_reconfigure.encoding import extract_params
inf = float('inf')
config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 246, 'name': 'Default', 'parent': 0, 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [], 'parameters': [{'srcline': 293, 'description': 'Amount of time (s) controller is permitted to be late achieving goal', 'max': 120.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'goal_time', 'edit_method': '', 'default': 0.1, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 293, 'description': 'Maximum velocity (m/s) at end of trajectory to be considered stopped', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'stopped_velocity_tolerance', 'edit_method': '', 'default': 0.2, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'left_s0 - maximum final error', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'left_s0_goal', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'left_s1 - maximum final error', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'left_s1_goal', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'left_e0 - maximum final error', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'left_e0_goal', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'left_e1 - maximum final error', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'left_e1_goal', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'left_w0 - maximum final error', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'left_w0_goal', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'left_w1 - maximum final error', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'left_w1_goal', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'left_w2 - maximum final error', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'left_w2_goal', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'right_s0 - maximum final error', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'right_s0_goal', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'right_s1 - maximum final error', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'right_s1_goal', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'right_e0 - maximum final error', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'right_e0_goal', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'right_e1 - maximum final error', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'right_e1_goal', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'right_w0 - maximum final error', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'right_w0_goal', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'right_w1 - maximum final error', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'right_w1_goal', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'right_w2 - maximum final error', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'right_w2_goal', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'left_s0 - maximum error during trajectory execution', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'left_s0_trajectory', 'edit_method': '', 'default': 0.35, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'left_s1 - maximum error during trajectory execution', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'left_s1_trajectory', 'edit_method': '', 'default': 0.35, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'left_e0 - maximum error during trajectory execution', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'left_e0_trajectory', 'edit_method': '', 'default': 0.35, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'left_e1 - maximum error during trajectory execution', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'left_e1_trajectory', 'edit_method': '', 'default': 0.35, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'left_w0 - maximum error during trajectory execution', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'left_w0_trajectory', 'edit_method': '', 'default': 0.35, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'left_w1 - maximum error during trajectory execution', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'left_w1_trajectory', 'edit_method': '', 'default': 0.35, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'left_w2 - maximum error during trajectory execution', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'left_w2_trajectory', 'edit_method': '', 'default': 0.35, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'right_s0 - maximum error during trajectory execution', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'right_s0_trajectory', 'edit_method': '', 'default': 0.35, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'right_s1 - maximum error during trajectory execution', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'right_s1_trajectory', 'edit_method': '', 'default': 0.35, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'right_e0 - maximum error during trajectory execution', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'right_e0_trajectory', 'edit_method': '', 'default': 0.35, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'right_e1 - maximum error during trajectory execution', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'right_e1_trajectory', 'edit_method': '', 'default': 0.35, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'right_w0 - maximum error during trajectory execution', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'right_w0_trajectory', 'edit_method': '', 'default': 0.35, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'right_w1 - maximum error during trajectory execution', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'right_w1_trajectory', 'edit_method': '', 'default': 0.35, 'level': 0, 'min': -1.0, 'type': 'double'}, {'srcline': 293, 'description': 'right_w2 - maximum error during trajectory execution', 'max': 1.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'right_w2_trajectory', 'edit_method': '', 'default': 0.35, 'level': 0, 'min': -1.0, 'type': 'double'}], 'type': '', 'id': 0}
min = {}
max = {}
defaults = {}
level = {}
type = {}
all_level = 0
#def extract_params(config):
# params = []
# params.extend(config['parameters'])
# for group in config['groups']:
# params.extend(extract_params(group))
# return params
for param in extract_params(config_description):
min[param['name']] = param['min']
max[param['name']] = param['max']
defaults[param['name']] = param['default']
level[param['name']] = param['level']
type[param['name']] = param['type']
all_level = all_level | param['level']
| 322.972973
| 11,063
| 0.677657
|
7936dffca2625fa5413907654152bd8a3dc38f70
| 31,716
|
py
|
Python
|
mvregfus/imaris.py
|
m-albert/MVRegFus
|
dc55537bdf87a60a0a023644a042f1a398db7c9c
|
[
"BSD-3-Clause"
] | 7
|
2020-04-27T01:00:38.000Z
|
2021-12-05T01:04:52.000Z
|
mvregfus/imaris.py
|
m-albert/MVRegFus
|
dc55537bdf87a60a0a023644a042f1a398db7c9c
|
[
"BSD-3-Clause"
] | 7
|
2020-07-21T14:15:08.000Z
|
2021-02-24T12:21:33.000Z
|
mvregfus/imaris.py
|
m-albert/MVRegFus
|
dc55537bdf87a60a0a023644a042f1a398db7c9c
|
[
"BSD-3-Clause"
] | 3
|
2020-07-21T14:44:01.000Z
|
2022-03-13T16:58:21.000Z
|
"""
file taken and modified from https://github.com/tlambert03/imarispy/blob/master/imarispy
(added functions from utils.py for easy single file import)
"""
# from .util import h5str, make_thumbnail, subsample_data
import logging
import os
import re
import h5py
import numpy as np
logger = logging.getLogger(__name__)
def da_to_ims(array, fname='myfile.ims',
# subsamp=((1, 1, 1), (1, 2, 2)),
# chunks=((16, 128, 128), (64, 64, 64)),
subsamp=((1, 1, 1), (2, 2, 2), (4, 4, 4), (8, 8, 8)),
chunks=((16, 128, 128), (64, 64, 64), (32, 32, 32), (16, 16, 16)),
compression='gzip',
thumbsize=256,
dx=0.1, dz=0.25,
overwrite = False,
origin=[0., 0., 0.],
scheduler='threads',
):
"""
:param array: Supports numpy and dask arrays
:param fname:
:param subsamp:
:param chunks:
:param compression:
:param thumbsize:
:param dx:
:param dz:
:return:
"""
assert len(subsamp) == len(chunks)
assert all([len(i) == 3 for i in subsamp]), 'Only deal with 3D chunks'
assert all([len(i) == len(x) for i, x in zip(subsamp, chunks)])
assert compression in (None, 'gzip', 'lzf', 'szip'), 'Unknown compression type'
if not fname.endswith('.ims'):
fname = fname + '.ims'
if overwrite:
if os.path.exists(fname):
os.remove(fname)
# force 5D
if not array.ndim == 5:
array = array.reshape(tuple([1] * (5 - array.ndim)) + array.shape)
nt, nc, nz, ny, nx = array.shape
nr = len(subsamp)
GROUPS = [
'DataSetInfo',
'Thumbnail',
'DataSetTimes',
'DataSetInfo/Imaris',
'DataSetInfo/Image',
'DataSetInfo/TimeInfo'
]
ATTRS = [
('/', ('ImarisDataSet', 'ImarisDataSet')),
('/', ('ImarisVersion', '5.5.0')),
('/', ('DataSetInfoDirectoryName', 'DataSetInfo')),
('/', ('ThumbnailDirectoryName', 'Thumbnail')),
('/', ('DataSetDirectoryName', 'DataSet')),
('DataSetInfo/Imaris', ('Version', '8.0')),
('DataSetInfo/Imaris', ('ThumbnailMode', 'thumbnailMIP')),
('DataSetInfo/Imaris', ('ThumbnailSize', thumbsize)),
('DataSetInfo/Image', ('X', nx)),
('DataSetInfo/Image', ('Y', ny)),
('DataSetInfo/Image', ('Z', nz)),
('DataSetInfo/Image', ('NumberOfChannels', nc)),
('DataSetInfo/Image', ('Noc', nc)),
('DataSetInfo/Image', ('Unit', 'um')),
('DataSetInfo/Image', ('Description', 'description not specified')),
('DataSetInfo/Image', ('MicroscopeModality', '',)),
('DataSetInfo/Image', ('RecordingDate', '2018-05-24 20:36:07.000')),
('DataSetInfo/Image', ('Name', 'name not specified')),
('DataSetInfo/Image', ('ExtMin0', origin[0])),
('DataSetInfo/Image', ('ExtMin1', origin[1])),
('DataSetInfo/Image', ('ExtMin2', origin[2])),
('DataSetInfo/Image', ('ExtMax0', origin[0] + nx * dx)),
('DataSetInfo/Image', ('ExtMax1', origin[1] + ny * dx)),
('DataSetInfo/Image', ('ExtMax2', origin[2] + nz * dz)),
('DataSetInfo/Image', ('LensPower', '63x')),
('DataSetInfo/TimeInfo', ('DatasetTimePoints', nt)),
('DataSetInfo/TimeInfo', ('FileTimePoints', nt)),
]
COLORS = ('0 1 0', '1 0 1', '1 1 0', '0 0 1')
for c in range(nc):
grp = 'DataSetInfo/Channel %s' % c
GROUPS.append(grp)
ATTRS.append((grp, ('ColorOpacity', 1)))
ATTRS.append((grp, ('ColorMode', 'BaseColor')))
ATTRS.append((grp, ('Color', COLORS[c % len(COLORS)])))
ATTRS.append((grp, ('GammaCorrection', 1)))
ATTRS.append((grp, ('ColorRange', '0 255')))
ATTRS.append((grp, ('Name', 'Channel %s' % c)))
# ATTRS.append(grp, ('LSMEmissionWavelength', 0))
# ATTRS.append(grp, ('LSMExcitationWavelength', ''))
# ATTRS.append(grp, ('Description', '(description not specified)'))
# TODO: create accurate timestamps
for t in range(nt):
m, s = divmod(t, 60)
h, m = divmod(m, 60)
strr = '2018-05-24 {:02d}:{:02d}:{:02d}.000'.format(h, m, s)
ATTRS.append(('DataSetInfo/TimeInfo', ('TimePoint{}'.format(t + 1), strr)))
with h5py.File(fname, 'a') as hf:
for grp in GROUPS:
hf.create_group(grp)
for grp, (key, value) in ATTRS:
hf[grp].attrs.create(key, h5str(value))
if type(array) == np.ndarray:
is_numpy = True
else:
import dask
if type(array) == dask.array.core.Array:
is_numpy = False
dset_map = dict()
else:
raise(Exception('array type not supported'))
# try:
# thumb = make_thumbnail(array[0], thumbsize)
# hf.create_dataset('Thumbnail/Data', data=thumb, dtype='u1')
# except Exception:
# logger.warn('Failed to generate Imaris thumbnail')
# add data
fmt = '/DataSet/ResolutionLevel {r}/TimePoint {t}/Channel {c}/'
for t in range(nt):
for c in range(nc):
data = np.squeeze(array[t, c])
for r in range(nr):
if any([i > 1 for i in subsamp[r]]):
data = subsample_data(data, subsamp[r])
grp = hf.create_group(fmt.format(r=r, t=t, c=c))
curr_chunks = tuple(min(*n) for n in zip(chunks[r], data.shape))
if is_numpy:
# if array is a np.array, write to file immediately
print("Writing: %s" % grp)
hist, edges = np.histogram(data, 256)
grp.create_dataset('Data',
data=data,
chunks=curr_chunks,
compression=compression)
else:
# if array is da.array, only prepare hdf5 dsets
# and write after dask optimized chunk calculation
# for the different resolutions and use
# dask.array.core.store to stream the data to disk.
hist, edges = np.histogram(np.zeros(1), 256)
dset = grp.require_dataset('Data',
shape=data.shape,
dtype=data.dtype,
chunks=curr_chunks,
compression=compression)
dset_map[dset] = data
grp.create_dataset('Histogram', data=hist.astype(np.uint64))
grp.attrs.create('HistogramMin', h5str(edges[0]))
grp.attrs.create('HistogramMax', h5str(edges[-1]))
grp.attrs.create('ImageSizeX', h5str(data.shape[2]))
grp.attrs.create('ImageSizeY', h5str(data.shape[1]))
grp.attrs.create('ImageSizeZ', h5str(data.shape[0]))
# stream dask array into file
if not is_numpy:
print("Writing into %s" %fname)
# alternatively to manually executing tasks associated to each chunk,
# use dask optimizations. ave-width seems to work for large dataset with dct weights.
# however it's not clear how dask optimizes
# with dask.config.set({'optimization.fuse.ave-width': 100}):
stored = dask.array.core.store(list(dset_map.values()),
list(dset_map.keys()),
# scheduler=scheduler,
scheduler='single-threaded',
compute=False,
)
dsk = stored.dask
keys = [k for k in dsk.keys() if (type(k) == tuple and k[0].startswith('store'))]
nblocks = [len(c) for c in array.chunks][2:]
delayed_chunks = []
from dask.optimization import cull
for x, y, z in np.ndindex(*nblocks):
chunk_keys = [k for k in keys if k[1:] == (x, y, z)]
cdsk = cull(dsk, chunk_keys)[0]
delayed_chunks.append(dask.delayed(dask.get)(cdsk, chunk_keys, scheduler='single-threaded'))
dask.compute(delayed_chunks, scheduler=scheduler)
return fname
def np_to_ims(array, fname='myfile.ims',
subsamp=((1, 1, 1), (2, 2, 2), (4,4,4), (8,8,8)),
chunks=((16, 128, 128), (64, 64, 64), (32, 32, 32), (16, 16, 16)),
compression='gzip',
thumbsize=256,
dx=1, dz=1,
overwrite= False,
origin=[0.,0.,0.],
):
"""
modified by malbert:
- include nonzero origin
"""
assert len(subsamp) == len(chunks)
assert all([len(i) == 3 for i in subsamp]), 'Only deal with 3D chunks'
assert all([len(i) == len(x) for i, x in zip(subsamp, chunks)])
assert compression in (None, 'gzip', 'lzf', 'szip'), 'Unknown compression type'
if not fname.endswith('.ims'):
fname = fname + '.ims'
if overwrite:
if os.path.exists(fname):
os.remove(fname)
# force 5D
if not array.ndim == 5:
array = array.reshape(tuple([1] * (5 - array.ndim)) + array.shape)
nt, nc, nz, ny, nx = array.shape
nr = len(subsamp)
GROUPS = [
'DataSetInfo',
'Thumbnail',
'DataSetTimes',
'DataSetInfo/Imaris',
'DataSetInfo/Image',
'DataSetInfo/TimeInfo'
]
ATTRS = [
('/', ('ImarisDataSet', 'ImarisDataSet')),
('/', ('ImarisVersion', '5.5.0')),
('/', ('DataSetInfoDirectoryName', 'DataSetInfo')),
('/', ('ThumbnailDirectoryName', 'Thumbnail')),
('/', ('DataSetDirectoryName', 'DataSet')),
('DataSetInfo/Imaris', ('Version', '8.0')),
('DataSetInfo/Imaris', ('ThumbnailMode', 'thumbnailMIP')),
('DataSetInfo/Imaris', ('ThumbnailSize', thumbsize)),
('DataSetInfo/Image', ('X', nx)),
('DataSetInfo/Image', ('Y', ny)),
('DataSetInfo/Image', ('Z', nz)),
('DataSetInfo/Image', ('NumberOfChannels', nc)),
('DataSetInfo/Image', ('Noc', nc)),
('DataSetInfo/Image', ('Unit', 'um')),
('DataSetInfo/Image', ('Description', 'description not specified')),
('DataSetInfo/Image', ('MicroscopeModality', '',)),
('DataSetInfo/Image', ('RecordingDate', '2018-05-24 20:36:07.000')),
('DataSetInfo/Image', ('Name', 'name not specified')),
('DataSetInfo/Image', ('ExtMin0', origin[0])),
('DataSetInfo/Image', ('ExtMin1', origin[1])),
('DataSetInfo/Image', ('ExtMin2', origin[2])),
('DataSetInfo/Image', ('ExtMax0', origin[0] + nx * dx)),
('DataSetInfo/Image', ('ExtMax1', origin[1] + ny * dx)),
('DataSetInfo/Image', ('ExtMax2', origin[2] + nz * dz)),
('DataSetInfo/Image', ('LensPower', '63x')),
('DataSetInfo/TimeInfo', ('DatasetTimePoints', nt)),
('DataSetInfo/TimeInfo', ('FileTimePoints', nt)),
]
COLORS = ('0 1 0', '1 0 1', '1 1 0', '0 0 1')
for c in range(nc):
grp = 'DataSetInfo/Channel %s' % c
GROUPS.append(grp)
ATTRS.append((grp, ('ColorOpacity', 1)))
ATTRS.append((grp, ('ColorMode', 'BaseColor')))
ATTRS.append((grp, ('Color', COLORS[c % len(COLORS)])))
ATTRS.append((grp, ('GammaCorrection', 1)))
ATTRS.append((grp, ('ColorRange', '0 255')))
ATTRS.append((grp, ('Name', 'Channel %s' % c)))
# ATTRS.append(grp, ('LSMEmissionWavelength', 0))
# ATTRS.append(grp, ('LSMExcitationWavelength', ''))
# ATTRS.append(grp, ('Description', '(description not specified)'))
# TODO: create accurate timestamps
for t in range(nt):
m, s = divmod(t, 60)
h, m = divmod(m, 60)
strr = '2018-05-24 {:02d}:{:02d}:{:02d}.000'.format(h, m, s)
ATTRS.append(('DataSetInfo/TimeInfo', ('TimePoint{}'.format(t + 1), strr)))
with h5py.File(fname, 'a') as hf:
for grp in GROUPS:
hf.create_group(grp)
for grp, (key, value) in ATTRS:
hf[grp].attrs.create(key, h5str(value))
try:
thumb = make_thumbnail(array[0], thumbsize)
hf.create_dataset('Thumbnail/Data', data=thumb, dtype='u1')
except Exception:
logger.warn('Failed to generate Imaris thumbnail')
# add data
fmt = '/DataSet/ResolutionLevel {r}/TimePoint {t}/Channel {c}/'
for t in range(nt):
for c in range(nc):
data = np.squeeze(array[t, c])
for r in range(nr):
if any([i > 1 for i in subsamp[r]]):
data = subsample_data(data, subsamp[r])
hist, edges = np.histogram(data, 256)
grp = hf.create_group(fmt.format(r=r, t=t, c=c))
print("Writing: %s" % grp)
grp.create_dataset('Histogram', data=hist.astype(np.uint64))
grp.attrs.create('HistogramMin', h5str(edges[0]))
grp.attrs.create('HistogramMax', h5str(edges[-1]))
grp.create_dataset('Data', data=data,
chunks=tuple(min(*n) for n in zip(chunks[r], data.shape)),
compression=compression)
grp.attrs.create('ImageSizeX', h5str(data.shape[2]))
grp.attrs.create('ImageSizeY', h5str(data.shape[1]))
grp.attrs.create('ImageSizeZ', h5str(data.shape[0]))
return fname
def empty_to_ims(shape, fname='myfile.ims',
subsamp=((1, 1, 1), (2, 2, 2), (4,4,4), (8,8,8)),
chunks=((16, 128, 128), (64, 64, 64), (32, 32, 32), (16, 16, 16)),
compression='gzip',
thumbsize=256,
dx=1, dz=1,
overwrite= False,
origin=[0.,0.,0.],
):
"""
create empty imaris file to stream data into later
"""
assert len(subsamp) == len(chunks)
assert all([len(i) == 3 for i in subsamp]), 'Only deal with 3D chunks'
assert all([len(i) == len(x) for i, x in zip(subsamp, chunks)])
assert compression in (None, 'gzip', 'lzf', 'szip'), 'Unknown compression type'
if not fname.endswith('.ims'):
fname = fname + '.ims'
if overwrite:
if os.path.exists(fname):
os.remove(fname)
# force 5D
# if not array.ndim == 5:
# array = array.reshape(tuple([1] * (5 - array.ndim)) + array.shape)
# nt, nc, nz, ny, nx = array.shape
nt, nc, nz, ny, nx = (1, 1) + tuple(shape)
nr = len(subsamp)
GROUPS = [
'DataSetInfo',
'Thumbnail',
'DataSetTimes',
'DataSetInfo/Imaris',
'DataSetInfo/Image',
'DataSetInfo/TimeInfo'
]
ATTRS = [
('/', ('ImarisDataSet', 'ImarisDataSet')),
('/', ('ImarisVersion', '5.5.0')),
('/', ('DataSetInfoDirectoryName', 'DataSetInfo')),
('/', ('ThumbnailDirectoryName', 'Thumbnail')),
('/', ('DataSetDirectoryName', 'DataSet')),
('DataSetInfo/Imaris', ('Version', '8.0')),
('DataSetInfo/Imaris', ('ThumbnailMode', 'thumbnailMIP')),
('DataSetInfo/Imaris', ('ThumbnailSize', thumbsize)),
('DataSetInfo/Image', ('X', nx)),
('DataSetInfo/Image', ('Y', ny)),
('DataSetInfo/Image', ('Z', nz)),
('DataSetInfo/Image', ('NumberOfChannels', nc)),
('DataSetInfo/Image', ('Noc', nc)),
('DataSetInfo/Image', ('Unit', 'um')),
('DataSetInfo/Image', ('Description', 'description not specified')),
('DataSetInfo/Image', ('MicroscopeModality', '',)),
('DataSetInfo/Image', ('RecordingDate', '2018-05-24 20:36:07.000')),
('DataSetInfo/Image', ('Name', 'name not specified')),
('DataSetInfo/Image', ('ExtMin0', origin[0])),
('DataSetInfo/Image', ('ExtMin1', origin[1])),
('DataSetInfo/Image', ('ExtMin2', origin[2])),
('DataSetInfo/Image', ('ExtMax0', origin[0] + nx * dx)),
('DataSetInfo/Image', ('ExtMax1', origin[1] + ny * dx)),
('DataSetInfo/Image', ('ExtMax2', origin[2] + nz * dz)),
('DataSetInfo/Image', ('LensPower', '63x')),
('DataSetInfo/TimeInfo', ('DatasetTimePoints', nt)),
('DataSetInfo/TimeInfo', ('FileTimePoints', nt)),
]
COLORS = ('0 1 0', '1 0 1', '1 1 0', '0 0 1')
for c in range(nc):
grp = 'DataSetInfo/Channel %s' % c
GROUPS.append(grp)
ATTRS.append((grp, ('ColorOpacity', 1)))
ATTRS.append((grp, ('ColorMode', 'BaseColor')))
ATTRS.append((grp, ('Color', COLORS[c % len(COLORS)])))
ATTRS.append((grp, ('GammaCorrection', 1)))
ATTRS.append((grp, ('ColorRange', '0 255')))
ATTRS.append((grp, ('Name', 'Channel %s' % c)))
# ATTRS.append(grp, ('LSMEmissionWavelength', 0))
# ATTRS.append(grp, ('LSMExcitationWavelength', ''))
# ATTRS.append(grp, ('Description', '(description not specified)'))
# TODO: create accurate timestamps
for t in range(nt):
m, s = divmod(t, 60)
h, m = divmod(m, 60)
strr = '2018-05-24 {:02d}:{:02d}:{:02d}.000'.format(h, m, s)
ATTRS.append(('DataSetInfo/TimeInfo', ('TimePoint{}'.format(t + 1), strr)))
with h5py.File(fname, 'a') as hf:
for grp in GROUPS:
hf.create_group(grp)
for grp, (key, value) in ATTRS:
hf[grp].attrs.create(key, h5str(value))
# create thumbnail later
# try:
# thumb = make_thumbnail(array[0], thumbsize)
# hf.create_dataset('Thumbnail/Data', data=thumb, dtype='u1')
# except Exception:
# logger.warn('Failed to generate Imaris thumbnail')
# add data
fmt = '/DataSet/ResolutionLevel {r}/TimePoint {t}/Channel {c}/'
for t in range(nt):
for c in range(nc):
# data = np.squeeze(array[t, c])
# data = np.zeros(shape)
for r in range(nr):
if any([i > 1 for i in subsamp[r]]):
# data = subsample_data(data, subsamp[r])
nz, ny, nx = [[nz, ny, nx][dim] // subsamp[r][dim] for dim in range(3)]
hist, edges = np.histogram(np.zeros(1), 256)
# hist, edges = np.histogram(data, 256)
grp = hf.create_group(fmt.format(r=r, t=t, c=c))
print("Writing: %s" % grp)
grp.create_dataset('Histogram', data=hist.astype(np.uint64))
grp.attrs.create('HistogramMin', h5str(edges[0]))
grp.attrs.create('HistogramMax', h5str(edges[-1]))
# grp.create_dataset('Data', data=data,
# chunks=tuple(min(*n) for n in zip(chunks[r], data.shape)),
# compression=compression)
grp.attrs.create('ImageSizeX', h5str(nx))
grp.attrs.create('ImageSizeY', h5str(ny))
grp.attrs.create('ImageSizeZ', h5str(nz))
return fname
def im_to_ims(filepattern, channels, tps, fname='myfile.ims', overwrite = True, copy_or_link = 'link'):
"""
- take imaris files of individual timepoints and channels and create a
master file which links to (or copies the data in) the individual files
- don't recalculate any thumbnails or histograms
- function added by malbert
- add rotation attribute
PROBLEM:
Fiji's hdf5 cannot load external links (https://forum.image.sc/t/does-hdf5-vibez-support-external-links-in-hdf5-files/10318)
Imaris however should be fine with it
filepattern example: 'mv_000_%(t)03d_c%(c)02d.ims'
"""
if not fname.endswith('.ims'):
fname = fname + '.ims'
if overwrite:
if os.path.exists(fname):
os.remove(fname)
# need: nr, nx, ny, nz, nt, nc, thumbsize, dx, dz
reffilepath = filepattern %{'t': tps[0], 'c': channels[0]}
reffile = h5py.File(reffilepath,mode='r')
nr = len(reffile['/DataSet'].keys())
nz, ny, nx = reffile['DataSet/ResolutionLevel 0/TimePoint 0/Channel 0/Data'].shape
nt = len(tps)
nc = len(channels)
# thumbsize = reffile['Thumbnail/Data'].shape[0]
thumbsize = 256
# dx = float(''.join([str(i)[-2] for i in reffile['DataSetInfo/Image'].attrs['ExtMax0']]))/nx
# dz = float(''.join([str(i)[-2] for i in reffile['DataSetInfo/Image'].attrs['ExtMax2']]))/nx
dx = 1
dz = 1
# thumbsize = float(''.join([str(i)[-2] for i in reffile['DataSetInfo/Image'].attrs['ExtMax2']]))/nx
# ('DataSetInfo/Imaris', ('ThumbnailSize', thumbsize)),
GROUPS = [
'DataSetInfo',
'Thumbnail',
'DataSetTimes',
'DataSetInfo/Imaris',
'DataSetInfo/Image',
'DataSetInfo/TimeInfo'
]
ATTRS = [
('/', ('ImarisDataSet', 'ImarisDataSet')),
('/', ('ImarisVersion', '5.5.0')),
('/', ('DataSetInfoDirectoryName', 'DataSetInfo')),
('/', ('ThumbnailDirectoryName', 'Thumbnail')),
('/', ('DataSetDirectoryName', 'DataSet')),
('DataSetInfo/Imaris', ('Version', '8.0')),
('DataSetInfo/Imaris', ('ThumbnailMode', 'thumbnailMIP')),
('DataSetInfo/Imaris', ('ThumbnailSize', thumbsize)),
('DataSetInfo/Image', ('X', nx)),
('DataSetInfo/Image', ('Y', ny)),
('DataSetInfo/Image', ('Z', nz)),
('DataSetInfo/Image', ('NumberOfChannels', nc)),
('DataSetInfo/Image', ('Noc', nc)),
('DataSetInfo/Image', ('Unit', 'um')),
('DataSetInfo/Image', ('Description', 'description not specified')),
('DataSetInfo/Image', ('MicroscopeModality', '',)),
('DataSetInfo/Image', ('RecordingDate', '2018-05-24 20:36:07.000')),
('DataSetInfo/Image', ('Name', 'name not specified')),
('DataSetInfo/Image', ('ExtMin0', '0')),
('DataSetInfo/Image', ('ExtMin1', '0')),
('DataSetInfo/Image', ('ExtMin2', '0')),
('DataSetInfo/Image', ('ExtMax0', nx * dx)),
('DataSetInfo/Image', ('ExtMax1', ny * dx)),
('DataSetInfo/Image', ('ExtMax2', nz * dz)),
('DataSetInfo/Image', ('LensPower', '63x')),
('DataSetInfo/TimeInfo', ('DatasetTimePoints', nt)),
('DataSetInfo/TimeInfo', ('FileTimePoints', nt)),
]
COLORS = ('0 1 0', '1 0 1', '1 1 0', '0 0 1')
for c in range(nc):
grp = 'DataSetInfo/Channel %s' % c
GROUPS.append(grp)
ATTRS.append((grp, ('ColorOpacity', 1)))
ATTRS.append((grp, ('ColorMode', 'BaseColor')))
ATTRS.append((grp, ('Color', COLORS[c % len(COLORS)])))
ATTRS.append((grp, ('GammaCorrection', 1)))
ATTRS.append((grp, ('ColorRange', '0 255')))
ATTRS.append((grp, ('Name', 'Channel %s' % c)))
# ATTRS.append(grp, ('LSMEmissionWavelength', 0))
# ATTRS.append(grp, ('LSMExcitationWavelength', ''))
# ATTRS.append(grp, ('Description', '(description not specified)'))
# TODO: create accurate timestamps
for t in range(nt):
m, s = divmod(t, 60)
h, m = divmod(m, 60)
strr = '2018-05-24 {:02d}:{:02d}:{:02d}.000'.format(h, m, s)
ATTRS.append(('DataSetInfo/TimeInfo', ('TimePoint{}'.format(t + 1), strr)))
with h5py.File(fname, 'a') as hf:
for grp in GROUPS:
hf.create_group(grp)
for grp, (key, value) in ATTRS:
hf[grp].attrs.create(key, h5str(value))
# try:
# # thumb = make_thumbnail(array[0], thumbsize)
# # thumb = h5py.SoftLink(filepattern)
# # hf.create_dataset('Thumbnail/Data', data=thumb, dtype='u1')
# hf['Thumbnail/Data'] = h5py.ExternalLink(reffilepath,'/Thumbnail/Data')
# except Exception:
# logger.warn('Failed to generate Imaris thumbnail')
# subsamp = subsamp=((1, 1, 1), (2, 2, 2), (4,4,4), (8,8,8))
# chunks = ((16, 128, 128), (64, 64, 64), (32, 32, 32), (16, 16, 16))
# compression = 'gzip'
# if copy_or_link == 'copy':
#
# # add data
# fmt = '/DataSet/ResolutionLevel {r}/TimePoint {t}/Channel {c}/'
# for t in range(nt):
# for c in range(nc):
# # data = np.squeeze(array[t, c])
# filepath = filepattern % {'t': t, 'c': c}
# srcfile = h5py.File(filepath)
# # data = np.squeeze(h5py.File(filepath)[fmt.format(r=0, t=0, c=0) + 'Data'][()])
# for r in range(nr):
#
# if any([i > 1 for i in subsamp[r]]):
# data = subsample_data(data, subsamp[r])
#
# hist, edges = np.histogram(data, 256)
#
# for key in ['Histogram','Dataset']:
# srcfmt = fmt.format(r=r, t=0, c=0)
# grp[key] = srcfile[srcfmt+key][()]
#
# grp = hf.create_group(fmt.format(r=r, t=t, c=c))
# print("Writing: %s" % grp)
# grp.create_dataset('Histogram', data=hist.astype(np.uint64))
# grp.attrs.create('HistogramMin', h5str(edges[0]))
# grp.attrs.create('HistogramMax', h5str(edges[-1]))
# grp['Data'] = h5py.ExternalLink(filepath, fmt.format(r=r, t=0, c=0) + 'Data')
# else:
# grp.create_dataset('Data', data=data,
# chunks=tuple(min(*n) for n in zip(chunks[r], data.shape)),
# compression=compression)
#
# grp.attrs.create('ImageSizeX', h5str(data.shape[2]))
# grp.attrs.create('ImageSizeY', h5str(data.shape[1]))
# grp.attrs.create('ImageSizeZ', h5str(data.shape[0]))
# # add data
# fmt = '/DataSet/ResolutionLevel {r}/TimePoint {t}/Channel {c}/'
# for t in range(nt):
# for c in range(nc):
# # data = np.squeeze(array[t, c])
# filepath = filepattern % {'t': t, 'c': c}
# data = np.squeeze(h5py.File(filepath)[fmt.format(r=0, t=0, c=0)+'Data'][()])
# for r in range(nr):
#
# if any([i > 1 for i in subsamp[r]]):
# data = subsample_data(data, subsamp[r])
#
# hist, edges = np.histogram(data, 256)
# grp = hf.create_group(fmt.format(r=r, t=t, c=c))
# print("Writing: %s" % grp)
# grp.create_dataset('Histogram', data=hist.astype(np.uint64))
# grp.attrs.create('HistogramMin', h5str(edges[0]))
# grp.attrs.create('HistogramMax', h5str(edges[-1]))
# if r>1:
# grp['Data'] = h5py.ExternalLink(filepath,fmt.format(r=r, t=0, c=0)+'Data')
# else:
# grp.create_dataset('Data', data=data,
# chunks=tuple(min(*n) for n in zip(chunks[r], data.shape)),
# compression=compression)
#
# grp.attrs.create('ImageSizeX', h5str(data.shape[2]))
# grp.attrs.create('ImageSizeY', h5str(data.shape[1]))
# grp.attrs.create('ImageSizeZ', h5str(data.shape[0]))
# elif copy_or_link == 'link':
# add data
fmt = '/DataSet/ResolutionLevel {r}/TimePoint {t}/Channel {c}'
for t in range(nt):
for c in range(nc):
for r in range(nr):
grppath = fmt.format(r=r, t=t, c=c)
dirpath = os.path.dirname(grppath)
# pdb.set_trace()
try:
hf.create_group(dirpath)
except:
pass
filepath = filepattern % {'t': t, 'c': c}
if copy_or_link == 'link':
print("Linking: %s" % grppath)
hf[grppath] = h5py.ExternalLink(filepath,fmt.format(r=r, t=0, c=0))
elif copy_or_link == 'copy':
print("Copying: %s" % grppath)
srcfile = h5py.File(filepath)
srcfile.copy(fmt.format(r=r, t=0, c=0),hf,grppath)
# hf.copy(filepath+':'+fmt.format(r=r, t=0, c=0),grppath)
# hf[grppath] = h5py.File(filepath)[fmt.format(r=r, t=0, c=0)]
else:
raise(Exception('copy or link?'))
# hf.close()
return fname
def unmap_bdv_from_imaris(hf):
for i in hf:
if re.match(r'^t\d{5}$', i) or re.match(r'^s\d{2}$', i):
del hf[i]
return
def make_thumbnail(array, size=256):
""" array should be 4D array """
# TODO: don't just crop to the upper left corner
mip = np.array(array).max(1)[:3, :size, :size].astype(np.float)
for i in range(mip.shape[0]):
mip[i] -= np.min(mip[i])
mip[i] *= 255 / np.max(mip[i])
mip = np.pad(mip, ((0, 3 - mip.shape[0]),
(0, size - mip.shape[1]),
(0, size - mip.shape[2])
), 'constant', constant_values=0)
mip = np.pad(mip, ((0, 1), (0, 0), (0, 0)), 'constant',
constant_values=255).astype('|u1')
return np.squeeze(mip.T.reshape(1, size, size * 4)).astype('|u1')
def h5str(s, coding='ASCII', dtype='S1'):
return np.frombuffer(str(s).encode(coding), dtype=dtype)
def get_meta_from_ims(filename):
"""
read metadata from imaris file
:param filename:
:return:
"""
f = h5py.File(filename)
meta_dict = dict()
def get_attr_string(file_obj,attr):
return float(''.join([i.decode('UTF-8') for i in file_obj['DataSetInfo/Image'].attrs[attr]]))
ns = np.zeros(3,dtype=np.float32)
ns[0] = get_attr_string(f,'X')#.astype(np.int64)
ns[1] = get_attr_string(f,'Y')#.astype(np.int64)
ns[2] = get_attr_string(f,'Z')#.astype(np.int64)
extmin = np.zeros(3,dtype=np.float32)
for i in range(3):
extmin[i] = get_attr_string(f,'ExtMin%s' %i)
extmax = np.zeros(3,dtype=np.float32)
for i in range(3):
extmax[i] = get_attr_string(f,'ExtMax%s' %i)
dx = (extmax[0]-extmin[0])/ns[0]
dz = (extmax[2]-extmin[2])/ns[2]
meta_dict['spacing'] = np.array([dx,dx,dz])
meta_dict['origin'] = extmin
return meta_dict
def subsample_data(data, subsamp):
return data[0::int(subsamp[0]), 0::int(subsamp[1]), 0::int(subsamp[2])]
if __name__ == "__main__":
tps = range(30)
channels = range(3)
file_pattern = '/tmp/im_%(t)03d_c%(c)02d.ims'
for t in tps:
for c in channels:
im = np.random.randint(0, 100, (1, 1, 100, 101, 102)).astype(np.float32)
np_to_ims(im, file_pattern %{'t':t,'c':c}, overwrite=True)
im_to_ims(file_pattern, channels, tps, '/tmp/im.ims', overwrite=True, copy_or_link='copy')
| 40.713736
| 128
| 0.512864
|
22b512610cade77302af75c26f968ff9d65a06bb
| 1,232
|
py
|
Python
|
home_application/views.py
|
xieyag/testA
|
97a2e762497e90ecf14676a90f9b65950736f312
|
[
"Apache-2.0"
] | null | null | null |
home_application/views.py
|
xieyag/testA
|
97a2e762497e90ecf14676a90f9b65950736f312
|
[
"Apache-2.0"
] | 4
|
2020-02-12T03:00:16.000Z
|
2021-06-10T21:45:39.000Z
|
home_application/views.py
|
xieyag/testA
|
97a2e762497e90ecf14676a90f9b65950736f312
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from common.mymako import render_mako_context
def home(request):
"""
首页
"""
return render_mako_context(request, '/home_application/home.html')
def dev_guide(request):
"""
开发指引
"""
return render_mako_context(request, '/home_application/dev_guide.html')
def contactus(request):
"""
联系我们
"""
return render_mako_context(request, '/home_application/contact.html')
def hello(request):
"""
helloWorld
"""
return render_mako_context(request, '/home_application/hello.html')
| 30.8
| 115
| 0.734578
|
56d5903059aa69dc7ac8641d9ccebadcbb2e3146
| 8,873
|
py
|
Python
|
src/openprocurement/tender/location/models.py
|
BohdanBorkivskyi/openprocurement.api
|
9f4134a15243f8a82a28de72c5ffac6f16e7f06b
|
[
"Apache-2.0"
] | null | null | null |
src/openprocurement/tender/location/models.py
|
BohdanBorkivskyi/openprocurement.api
|
9f4134a15243f8a82a28de72c5ffac6f16e7f06b
|
[
"Apache-2.0"
] | null | null | null |
src/openprocurement/tender/location/models.py
|
BohdanBorkivskyi/openprocurement.api
|
9f4134a15243f8a82a28de72c5ffac6f16e7f06b
|
[
"Apache-2.0"
] | null | null | null |
from schematics.transforms import whitelist
from schematics.types import StringType, URLType, BooleanType, IntType, ValidationError
from schematics.types.compound import ModelType, ListType
from zope.interface import implementer
from openprocurement.agreement.core.models.agreement import Agreement
from openprocurement.api.models import Period, Guarantee, Item, Value, IsoDurationType, Model
from openprocurement.api.validation import validate_items_uniq, validate_classification_id
from openprocurement.tender.cfaua.validation import validate_max_awards_number
from openprocurement.tender.core.models import Tender as BaseTender, ITender, TenderAuctionPeriod, Award, Bid, \
Cancellation, ComplaintModelType, Complaint, EUDocument, EnquiryPeriod, Feature, validate_features_uniq, Lot, \
ProcuringEntity, PeriodStartEndRequired, validate_lots_uniq, Question
class Location(Model):
longitude = StringType(required=True)
latitude = StringType(required=True)
def validate_coord(self, data, value):
if not value.replace(".", "").isdigit():
raise ValidationError(u"Should be proper geographic coordinate system value")
validate_longitude = validate_coord
validate_latitude = validate_coord
class ILocationTender(ITender):
""" Marker interface for location tenders """
@implementer(ILocationTender)
class Tender(BaseTender):
class Options:
namespace = "Tender"
_core_roles = BaseTender.Options.roles
_procurement_method_details = whitelist("procurementMethodDetails")
_edit_fields = _core_roles["edit"] + whitelist(
"tenderPeriod",
"features",
"complaintPeriod",
"agreementDuration",
"next_check",
"procuringEntity",
"guarantee",
"serializable_enquiryPeriod",
"minimalStep",
"items",
"qualificationPeriod",
"value",
"maxAwardsCount",
"agreements",
"numberOfBidders",
"hasEnquiries",
"serializable_guarantee",
"serializable_value",
"serializable_minimalStep",
"location"
)
_edit_role = _edit_fields + whitelist("numberOfBids")
_edit_qualification = whitelist("status") + _procurement_method_details
_tendering_view_role = (
_core_roles["view"]
+ _edit_fields
+ whitelist(
"auctionPeriod",
"lots",
"enquiryPeriod",
"complaints",
"auctionUrl",
"awardPeriod",
"qualifications",
"questions",
"cancellations",
"awards",
)
)
_view_role = _tendering_view_role + whitelist("numberOfBids", "bids")
_complete_view_role = _view_role + whitelist("contractPeriod")
roles = {
"create": _edit_role + whitelist("mode", "procurementMethodType", "lots", "coords"),
"edit_draft": _edit_role,
"edit": _edit_role,
"edit_active.tendering": _edit_role + whitelist("coords"),
"edit_active.pre-qualification": _edit_qualification,
"edit_active.qualification": _edit_qualification,
"edit_cancelled": _procurement_method_details,
"edit_complete": _procurement_method_details,
"edit_unsuccessful": _procurement_method_details,
"edit_active.awarded": _procurement_method_details,
"edit_active.auction": _procurement_method_details,
"edit_active.pre-qualification.stand-still": _procurement_method_details,
"draft": _tendering_view_role + whitelist("contractPeriod"),
"active.tendering": _tendering_view_role,
"cancelled": _view_role,
"active.auction": _view_role,
"active.pre-qualification.stand-still": _view_role,
"active.qualification.stand-still": _view_role,
"view": _complete_view_role,
"active.qualification": _complete_view_role,
"active.pre-qualification": _complete_view_role,
"complete": _complete_view_role,
"active.awarded": _complete_view_role,
"unsuccessful": _complete_view_role,
"contracting": _core_roles["contracting"] + _procurement_method_details,
"chronograph": _core_roles["chronograph"] + _procurement_method_details,
"chronograph_view": _core_roles["chronograph_view"] + _procurement_method_details,
"auction_view": _core_roles["auction_view"]
+ _procurement_method_details
+ whitelist("milestones", "mainProcurementCategory"),
"Administrator": _core_roles["Administrator"] + _procurement_method_details,
"auction_post": _core_roles["auction_post"] + _procurement_method_details,
"auction_patch": _core_roles["auction_patch"] + _procurement_method_details,
"listing": _core_roles["listing"] + _procurement_method_details,
"embedded": _core_roles["embedded"],
"plain": _core_roles["plain"],
"default": _core_roles["default"],
}
procurementMethodType = StringType(default="location")
procuring_entity_kinds = ["general", "special", "defense", "central"]
block_tender_complaint_status = ["claim", "pending", "accepted", "satisfied", "stopping"]
block_complaint_status = ["pending", "accepted", "satisfied", "stopping"]
auctionPeriod = ModelType(TenderAuctionPeriod, default={})
auctionUrl = URLType()
awards = ListType(ModelType(Award, required=True), default=list())
awardPeriod = ModelType(Period) # The dat e or period on which an award is anticipated to be made.
bids = ListType(
ModelType(Bid, required=True), default=list()
) # A list of all the companies who entered submissions for the tender.
cancellations = ListType(ModelType(Cancellation, required=True), default=list())
complaints = ListType(ComplaintModelType(Complaint, required=True), default=list())
#contractPeriod = ModelType(ContractPeriod, required=False)
agreements = ListType(ModelType(Agreement, required=True), default=list())
documents = ListType(
ModelType(EUDocument, required=True), default=list()
) # All documents and attachments related to the tender.
enquiryPeriod = ModelType(EnquiryPeriod, required=False)
guarantee = ModelType(Guarantee)
hasEnquiries = BooleanType() # A Yes/No field as to whether enquiries were part of tender process.
items = ListType(
ModelType(Item, required=True),
required=True,
min_size=1,
validators=[validate_items_uniq, validate_classification_id],
) # The goods and services to be purchased, broken into line items wherever possible. Items should not be duplicated, but a quantity of 2 specified instead.
features = ListType(ModelType(Feature, required=True), validators=[validate_features_uniq])
minimalStep = ModelType(Value, required=True)
numberOfBidders = IntType() # The number of unique tenderers who participated in the tender
maxAwardsCount = IntType(required=True, validators=[validate_max_awards_number])
lots = ListType(
ModelType(Lot, required=True), min_size=1, max_size=1, default=list(), validators=[validate_lots_uniq]
)
#procurementMethodType = StringType(default="closeFrameworkAgreementUA")
procuringEntity = ModelType(
ProcuringEntity, required=True
) # The entity managing the procurement, which may be different from the buyer who is paying / using the items being procured.
qualificationPeriod = ModelType(Period)
#qualifications = ListType(ModelType(Qualification, required=True), default=list())
questions = ListType(ModelType(Question, required=True), default=list())
status = StringType(
choices=[
"draft",
"active.tendering",
"active.pre-qualification",
"active.pre-qualification.stand-still",
"active.auction",
"active.qualification",
"active.qualification.stand-still",
"active.awarded",
"complete",
"cancelled",
"unsuccessful",
],
default="active.tendering",
)
tenderPeriod = ModelType(PeriodStartEndRequired, required=True)
title_en = StringType(required=True, min_length=1)
value = ModelType(Value, required=True) # The total estimated value of the procurement.
#agreementDuration = IsoDurationType(required=True, validators=[validate_max_agreement_duration_period])
mainProcurementCategory = StringType(choices=["goods", "services"])
location = ModelType(Location, required=True)
| 47.704301
| 161
| 0.672377
|
4007723856584db9008635ae01654a61b09ae55e
| 1,939
|
py
|
Python
|
urbit/cue.py
|
laanwj/urbit-tools
|
b3823d50d5ab84c0852593e3255c0d7c51de6d1c
|
[
"MIT"
] | 18
|
2015-02-03T19:27:18.000Z
|
2021-04-04T03:03:57.000Z
|
urbit/cue.py
|
laanwj/urbit-tools
|
b3823d50d5ab84c0852593e3255c0d7c51de6d1c
|
[
"MIT"
] | null | null | null |
urbit/cue.py
|
laanwj/urbit-tools
|
b3823d50d5ab84c0852593e3255c0d7c51de6d1c
|
[
"MIT"
] | 2
|
2015-10-02T01:37:13.000Z
|
2017-06-04T03:41:49.000Z
|
# jam format
# 0 10
# 1 1100
# 2 100.1000
# 3 110.1000
# 4 1001.1000
# 5 1011.1000
# 7 1111.1000
# 8 100.0001.0000
# 15 111.1001.0000
# 31 1111.1011.0000
# 63 1.1111.1101.0000
#127 11.1111.1111.0000
#128 1.0000.0000.0010.0000
#255 1.1111.1110.0010.0000
# [0 0] 10.1001
# [1 0] 1011.0001
# [0 1] 1100.1001
# [1 1] 11.0011.0001
# [0 [0 0]] 10.1001.1001
# [[0 0] 0] 10.1010.0101
# (&3)==1 -> cell
# (&3)==2 -> 0
# (&3)==0 -> count trailing zeros
# 2 0 count bits, 1 bit
# 3 1 count bit, 2+x bits
# 4 2 count bits, 4+x bits
def _read_varint(a, ofs):
count = -1
while (a&1) == 0: # number of trailing zeros -> length of length
a >>= 1
ofs += 1
count += 1
a >>= 1
ofs += 1
if count < 0:
return (a, ofs, 0)
b = (1<<count) + (a & ((1 << count)-1))
a >>= count
ofs += count
val = (a & ((1 << b)-1))
a >>= b
ofs += b
return (a, ofs, val)
def _read_value(m, ofs, a):
ostart = ofs
if a == 0:
raise ValueError('cue: invalid value zero')
if (a&1) == 0: # varint
a >>= 1
ofs += 1
(a, ofs, val) = _read_varint(a, ofs)
m[ostart] = val # memorize w/ bit offset
return (a, ofs, val)
elif (a&3) == 1: # cell
a >>= 2
ofs += 2
(a, ofs, val1) = _read_value(m, ofs, a)
(a, ofs, val2) = _read_value(m, ofs, a)
val = (val1,val2)
m[ostart] = val # memorize w/ bit offset
return (a, ofs, val)
else: # (a&3) == 3 seems to mean 'repeat'
a >>= 2
ofs += 2
(a, ofs, val1) = _read_varint(a, ofs)
return (a, ofs, m[val1])
def cue(a):
'''unpack noun'''
# naive recursive interpretation
return _read_value({}, 0, a)[2]
| 24.858974
| 68
| 0.449716
|
2e458d9398af5df935cd63e9a6e96a7b84080382
| 928
|
py
|
Python
|
xlsxwriter/test/comparison/test_data_validation07.py
|
Rippling/XlsxWriter-1
|
be8d1cb8f8b156cf87bbe5d591f1f5475804be44
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/comparison/test_data_validation07.py
|
Rippling/XlsxWriter-1
|
be8d1cb8f8b156cf87bbe5d591f1f5475804be44
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/comparison/test_data_validation07.py
|
Rippling/XlsxWriter-1
|
be8d1cb8f8b156cf87bbe5d591f1f5475804be44
|
[
"BSD-2-Clause"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('data_validation07.xlsx')
def test_create_file(self):
"""Test the creation of a XlsxWriter file with data validation."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.data_validation(
'C2', {'validate': 'list',
'value': ['coffee', 'café'],
}
)
workbook.close()
self.assertExcelEqual()
| 24.421053
| 79
| 0.579741
|
679e8fc43d493624920bad7587534c018aa769bd
| 3,735
|
py
|
Python
|
superviselySDK/supervisely_lib/nn/hosted/inference_batch.py
|
nicehuster/mmdetection-supervisely-person-datasets
|
ff1b57e16a71378510571dbb9cebfdb712656927
|
[
"Apache-2.0"
] | 40
|
2019-05-05T08:08:18.000Z
|
2021-10-17T00:07:58.000Z
|
superviselySDK/supervisely_lib/nn/hosted/inference_batch.py
|
nicehuster/mmdetection-supervisely-person-datasets
|
ff1b57e16a71378510571dbb9cebfdb712656927
|
[
"Apache-2.0"
] | 8
|
2019-06-13T06:00:08.000Z
|
2021-07-24T05:25:33.000Z
|
superviselySDK/supervisely_lib/nn/hosted/inference_batch.py
|
nicehuster/mmdetection-supervisely-person-datasets
|
ff1b57e16a71378510571dbb9cebfdb712656927
|
[
"Apache-2.0"
] | 6
|
2019-07-30T06:36:27.000Z
|
2021-06-03T11:57:36.000Z
|
# coding: utf-8
from copy import deepcopy
import os
from supervisely_lib import logger
from supervisely_lib.annotation.annotation import Annotation
from supervisely_lib.imaging import image as sly_image
from supervisely_lib.io.json import load_json_file
from supervisely_lib.nn.config import AlwaysPassingConfigValidator
from supervisely_lib.project.project import Project, read_single_project, OpenMode
from supervisely_lib.task.paths import TaskPaths
from supervisely_lib.task.progress import report_inference_finished
from supervisely_lib.nn.hosted.inference_single_image import SingleImageInferenceBase
from supervisely_lib.nn.hosted.inference_modes import MODE, InferenceModeFactory, get_effective_inference_mode_config
from supervisely_lib.nn.hosted.legacy.inference_config import maybe_convert_from_v1_inference_task_config
from supervisely_lib.task.progress import Progress
class BatchInferenceApplier:
"""Runs a given single image inference model over all images in a project; saves results to a new project."""
def __init__(self, single_image_inference: SingleImageInferenceBase, default_inference_mode_config: dict,
config_validator=None):
self._single_image_inference = single_image_inference
self._config_validator = config_validator or AlwaysPassingConfigValidator()
self._inference_mode_config = self._determine_inference_mode_config(deepcopy(default_inference_mode_config))
self._determine_input_data()
logger.info('Dataset inference preparation done.')
def _determine_input_data(self):
# TODO support multiple input projects.
self._in_project = read_single_project(TaskPaths.DATA_DIR)
logger.info('Project structure has been read. Samples: {}.'.format(self._in_project.total_items))
def _determine_inference_mode_config(self, default_inference_mode_config):
raw_task_config = load_json_file(TaskPaths.TASK_CONFIG_PATH)
task_config = maybe_convert_from_v1_inference_task_config(raw_task_config)
logger.info('Input task config', extra={'config': task_config})
result_config = get_effective_inference_mode_config(
task_config.get(MODE, {}), default_inference_mode_config)
logger.info('Full inference mode config', extra={'config': result_config})
return result_config
def run_inference(self):
inference_mode = InferenceModeFactory.create(
self._inference_mode_config, self._in_project.meta, self._single_image_inference)
out_project = Project(os.path.join(TaskPaths.RESULTS_DIR, self._in_project.name), OpenMode.CREATE)
out_project.set_meta(inference_mode.out_meta)
progress_bar = Progress('Model applying: ', self._in_project.total_items)
for in_dataset in self._in_project:
out_dataset = out_project.create_dataset(in_dataset.name)
for in_item_name in in_dataset:
# Use output project meta so that we get an annotation that is already in the context of the output
# project (with added object classes etc).
in_item_paths = in_dataset.get_item_paths(in_item_name)
in_img = sly_image.read(in_item_paths.img_path)
in_ann = Annotation.load_json_file(in_item_paths.ann_path, inference_mode.out_meta)
logger.trace('Will process image', extra={'dataset_name': in_dataset.name, 'image_name': in_item_name})
inference_annotation = inference_mode.infer_annotate(in_img, in_ann)
out_dataset.add_item_file(in_item_name, in_item_paths.img_path, ann=inference_annotation)
progress_bar.iter_done_report()
report_inference_finished()
| 54.926471
| 119
| 0.763855
|
47f64ae59b0dc5d4b6929e272da450e15e06bf3b
| 218
|
py
|
Python
|
FIAP - python/Capitulo3-Listas/CriandoListas.py
|
AlamoVinicius/code-pratice
|
924a3ff782caf3695bbeeac39fa02fb23781cd75
|
[
"MIT"
] | null | null | null |
FIAP - python/Capitulo3-Listas/CriandoListas.py
|
AlamoVinicius/code-pratice
|
924a3ff782caf3695bbeeac39fa02fb23781cd75
|
[
"MIT"
] | null | null | null |
FIAP - python/Capitulo3-Listas/CriandoListas.py
|
AlamoVinicius/code-pratice
|
924a3ff782caf3695bbeeac39fa02fb23781cd75
|
[
"MIT"
] | null | null | null |
# Lista preenchida estaticamente
lista_estaticamente = ['xpto', True]
# Lista preenchida dinamicamente
lista_dinamica = [input('Digite um usuário: '), bool(int(input('Está logado?')))]
# Lista vazia:
lista_vazia = []
| 27.25
| 81
| 0.733945
|
49c2cd91b1010c1748480034454ef625005ce078
| 1,906
|
py
|
Python
|
test/test_pyCLIP.py
|
guma44/pyCLIP
|
9fb21100aa5f40c5cf6a4964ca7f4192e623e575
|
[
"MIT"
] | null | null | null |
test/test_pyCLIP.py
|
guma44/pyCLIP
|
9fb21100aa5f40c5cf6a4964ca7f4192e623e575
|
[
"MIT"
] | null | null | null |
test/test_pyCLIP.py
|
guma44/pyCLIP
|
9fb21100aa5f40c5cf6a4964ca7f4192e623e575
|
[
"MIT"
] | null | null | null |
import os
import sys
from Bio import SeqIO
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(script_dir, ".."))
import ShortRead
for rec in SeqIO.parse(os.path.join(script_dir, 'test.fa'), 'fasta'):
actual = rec.id.split('|')[0].split(':')
read = ShortRead.ShortRead(chrom='chrN',
start=0,
end=len(actual[2]),
strand=actual[3],
seq=actual[2],
clipz_cigar=actual[1],
name=actual[0])
print "*" * 80
print
print actual[0], actual[1], actual[2]
print 'Features positions: ', read.clipz_cigar.features
i = 0
j = 0
feat_count = 0
feats = read.clipz_cigar.features
seq1 = []
seq2 = []
align = []
while len(seq1) <= len(actual[2]):
if feat_count not in feats.keys():
seq1.append(actual[2][i])
seq2.append(rec.seq[j])
align.append('|')
i += 1
j += 1
feat_count += 1
else:
if feats[feat_count].startswith('M'):
seq1.append(actual[2][i])
seq2.append(rec.seq[j])
align.append(' ')
i += 1
j += 1
feat_count += 1
elif feats[feat_count].startswith('D'):
seq1.append('-')
seq2.append(rec.seq[j])
align.append(' ')
j += 1
feat_count += 1
elif feats[feat_count].startswith('I'):
seq1.append(actual[2][i])
seq2.append('-')
align.append(' ')
i += 1
feat_count += 1
print "".join(seq1)
print "".join(align)
print "".join(seq2)
print
print "*" * 80
| 30.741935
| 69
| 0.449108
|
b5e5e998561d16deb6e1425f400d92650d74d40b
| 24,981
|
py
|
Python
|
tests/www/views/test_views_acl.py
|
kevin0120/airflow
|
fa263cbf0ac002bdb26239ce36d5dc2a1b6251fd
|
[
"Apache-2.0"
] | 1
|
2021-03-03T07:00:02.000Z
|
2021-03-03T07:00:02.000Z
|
tests/www/views/test_views_acl.py
|
kevin0120/airflow
|
fa263cbf0ac002bdb26239ce36d5dc2a1b6251fd
|
[
"Apache-2.0"
] | 36
|
2021-11-26T00:08:49.000Z
|
2021-11-26T00:09:33.000Z
|
tests/www/views/test_views_acl.py
|
kevin0120/airflow
|
fa263cbf0ac002bdb26239ce36d5dc2a1b6251fd
|
[
"Apache-2.0"
] | 3
|
2020-06-30T02:38:17.000Z
|
2022-01-19T06:14:08.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import json
import urllib.parse
import pytest
from airflow.security import permissions
from airflow.utils import timezone
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from tests.test_utils.api_connexion_utils import create_user
from tests.test_utils.db import clear_db_runs
from tests.test_utils.www import check_content_in_response, check_content_not_in_response, client_with_login
NEXT_YEAR = datetime.datetime.now().year + 1
DEFAULT_DATE = timezone.datetime(NEXT_YEAR, 6, 1)
USER_DATA = {
"dag_tester": (
"dag_acl_tester",
{
"first_name": 'dag_test',
"last_name": 'dag_test',
"email": 'dag_test@fab.org',
"password": 'dag_test',
},
),
"dag_faker": ( # User without permission.
"dag_acl_faker",
{
"first_name": 'dag_faker',
"last_name": 'dag_faker',
"email": 'dag_fake@fab.org',
"password": 'dag_faker',
},
),
"dag_read_only": ( # User with only read permission.
"dag_acl_read_only",
{
"first_name": 'dag_read_only',
"last_name": 'dag_read_only',
"email": 'dag_read_only@fab.org',
"password": 'dag_read_only',
},
),
"all_dag_user": ( # User has all dag access.
"all_dag_role",
{
"first_name": 'all_dag_user',
"last_name": 'all_dag_user',
"email": 'all_dag_user@fab.org',
"password": 'all_dag_user',
},
),
}
@pytest.fixture(scope="module")
def acl_app(app):
security_manager = app.appbuilder.sm
for username, (role_name, kwargs) in USER_DATA.items():
if not security_manager.find_user(username=username):
role = security_manager.add_role(role_name)
security_manager.add_user(
role=role,
username=username,
**kwargs,
)
# FIXME: Clean up this block of code.....
website_permission = security_manager.find_permission_view_menu(
permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE
)
dag_tester_role = security_manager.find_role('dag_acl_tester')
edit_perm_on_dag = security_manager.find_permission_view_menu(
permissions.ACTION_CAN_EDIT, 'DAG:example_bash_operator'
)
security_manager.add_permission_role(dag_tester_role, edit_perm_on_dag)
read_perm_on_dag = security_manager.find_permission_view_menu(
permissions.ACTION_CAN_READ, 'DAG:example_bash_operator'
)
security_manager.add_permission_role(dag_tester_role, read_perm_on_dag)
security_manager.add_permission_role(dag_tester_role, website_permission)
all_dag_role = security_manager.find_role('all_dag_role')
edit_perm_on_all_dag = security_manager.find_permission_view_menu(
permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG
)
security_manager.add_permission_role(all_dag_role, edit_perm_on_all_dag)
read_perm_on_all_dag = security_manager.find_permission_view_menu(
permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG
)
security_manager.add_permission_role(all_dag_role, read_perm_on_all_dag)
read_perm_on_task_instance = security_manager.find_permission_view_menu(
permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE
)
security_manager.add_permission_role(all_dag_role, read_perm_on_task_instance)
security_manager.add_permission_role(all_dag_role, website_permission)
role_user = security_manager.find_role('User')
security_manager.add_permission_role(role_user, read_perm_on_all_dag)
security_manager.add_permission_role(role_user, edit_perm_on_all_dag)
security_manager.add_permission_role(role_user, website_permission)
read_only_perm_on_dag = security_manager.find_permission_view_menu(
permissions.ACTION_CAN_READ, 'DAG:example_bash_operator'
)
dag_read_only_role = security_manager.find_role('dag_acl_read_only')
security_manager.add_permission_role(dag_read_only_role, read_only_perm_on_dag)
security_manager.add_permission_role(dag_read_only_role, website_permission)
dag_acl_faker_role = security_manager.find_role('dag_acl_faker')
security_manager.add_permission_role(dag_acl_faker_role, website_permission)
yield app
for username, _ in USER_DATA.items():
user = security_manager.find_user(username=username)
if user:
security_manager.del_register_user(user)
@pytest.fixture(scope="module")
def reset_dagruns():
"""Clean up stray garbage from other tests."""
clear_db_runs()
@pytest.fixture(autouse=True)
def init_dagruns(acl_app, reset_dagruns):
acl_app.dag_bag.get_dag("example_bash_operator").create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING,
)
acl_app.dag_bag.get_dag("example_subdag_operator").create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING,
)
yield
clear_db_runs()
@pytest.fixture()
def dag_test_client(acl_app):
return client_with_login(acl_app, username="dag_test", password="dag_test")
@pytest.fixture()
def dag_faker_client(acl_app):
return client_with_login(acl_app, username="dag_faker", password="dag_faker")
@pytest.fixture()
def all_dag_user_client(acl_app):
return client_with_login(
acl_app,
username="all_dag_user",
password="all_dag_user",
)
@pytest.fixture(scope="module")
def user_edit_one_dag(acl_app):
return create_user(
acl_app,
username="user_edit_one_dag",
role_name="role_edit_one_dag",
permissions=[
(permissions.ACTION_CAN_READ, 'DAG:example_bash_operator'),
(permissions.ACTION_CAN_EDIT, 'DAG:example_bash_operator'),
],
)
@pytest.mark.usefixtures("user_edit_one_dag")
def test_permission_exist(acl_app):
perms_views = acl_app.appbuilder.sm.find_permissions_view_menu(
acl_app.appbuilder.sm.find_view_menu('DAG:example_bash_operator'),
)
assert len(perms_views) == 2
perms = {str(perm) for perm in perms_views}
assert "can read on DAG:example_bash_operator" in perms
assert "can edit on DAG:example_bash_operator" in perms
@pytest.mark.usefixtures("user_edit_one_dag")
def test_role_permission_associate(acl_app):
test_role = acl_app.appbuilder.sm.find_role('role_edit_one_dag')
perms = {str(perm) for perm in test_role.permissions}
assert 'can edit on DAG:example_bash_operator' in perms
assert 'can read on DAG:example_bash_operator' in perms
@pytest.fixture(scope="module")
def user_all_dags(acl_app):
return create_user(
acl_app,
username="user_all_dags",
role_name="role_all_dags",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
],
)
@pytest.fixture()
def client_all_dags(acl_app, user_all_dags):
return client_with_login(
acl_app,
username="user_all_dags",
password="user_all_dags",
)
def test_index_for_all_dag_user(client_all_dags):
# The all dag user can access/view all dags.
resp = client_all_dags.get('/', follow_redirects=True)
check_content_in_response('example_subdag_operator', resp)
check_content_in_response('example_bash_operator', resp)
def test_index_failure(dag_test_client):
# This user can only access/view example_bash_operator dag.
resp = dag_test_client.get('/', follow_redirects=True)
check_content_not_in_response('example_subdag_operator', resp)
def test_dag_autocomplete_success(client_all_dags):
resp = client_all_dags.get(
'dagmodel/autocomplete?query=example_bash',
follow_redirects=False,
)
check_content_in_response('example_bash_operator', resp)
check_content_not_in_response('example_subdag_operator', resp)
@pytest.fixture(scope="module")
def user_all_dags_dagruns(acl_app):
return create_user(
acl_app,
username="user_all_dags_dagruns",
role_name="role_all_dags_dagruns",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
],
)
@pytest.fixture()
def client_all_dags_dagruns(acl_app, user_all_dags_dagruns):
return client_with_login(
acl_app,
username="user_all_dags_dagruns",
password="user_all_dags_dagruns",
)
def test_dag_stats_success(client_all_dags_dagruns):
resp = client_all_dags_dagruns.post('dag_stats', follow_redirects=True)
check_content_in_response('example_bash_operator', resp)
assert set(list(resp.json.items())[0][1][0].keys()) == {'state', 'count'}
def test_task_stats_failure(dag_test_client):
resp = dag_test_client.post('task_stats', follow_redirects=True)
check_content_not_in_response('example_subdag_operator', resp)
def test_dag_stats_success_for_all_dag_user(client_all_dags_dagruns):
resp = client_all_dags_dagruns.post('dag_stats', follow_redirects=True)
check_content_in_response('example_subdag_operator', resp)
check_content_in_response('example_bash_operator', resp)
@pytest.fixture(scope="module")
def user_all_dags_dagruns_tis(acl_app):
return create_user(
acl_app,
username="user_all_dags_dagruns_tis",
role_name="role_all_dags_dagruns_tis",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
],
)
@pytest.fixture()
def client_all_dags_dagruns_tis(acl_app, user_all_dags_dagruns_tis):
return client_with_login(
acl_app,
username="user_all_dags_dagruns_tis",
password="user_all_dags_dagruns_tis",
)
def test_task_stats_empty_success(client_all_dags_dagruns_tis):
resp = client_all_dags_dagruns_tis.post('task_stats', follow_redirects=True)
check_content_in_response('example_bash_operator', resp)
check_content_in_response('example_subdag_operator', resp)
@pytest.mark.parametrize(
"dags_to_run, unexpected_dag_ids",
[
(
["example_subdag_operator"],
["example_bash_operator", "example_xcom"],
),
(
["example_subdag_operator", "example_bash_operator"],
["example_xcom"],
),
],
ids=["single", "multi"],
)
def test_task_stats_success(
client_all_dags_dagruns_tis,
dags_to_run,
unexpected_dag_ids,
):
resp = client_all_dags_dagruns_tis.post(
'task_stats', data={'dag_ids': dags_to_run}, follow_redirects=True
)
assert resp.status_code == 200
for dag_id in unexpected_dag_ids:
check_content_not_in_response(dag_id, resp)
stats = json.loads(resp.data.decode())
for dag_id in dags_to_run:
assert dag_id in stats
@pytest.fixture(scope="module")
def user_all_dags_codes(acl_app):
return create_user(
acl_app,
username="user_all_dags_codes",
role_name="role_all_dags_codes",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
],
)
@pytest.fixture()
def client_all_dags_codes(acl_app, user_all_dags_codes):
return client_with_login(
acl_app,
username="user_all_dags_codes",
password="user_all_dags_codes",
)
def test_code_success(client_all_dags_codes):
url = 'code?dag_id=example_bash_operator'
resp = client_all_dags_codes.get(url, follow_redirects=True)
check_content_in_response('example_bash_operator', resp)
def test_code_failure(dag_test_client):
url = 'code?dag_id=example_bash_operator'
resp = dag_test_client.get(url, follow_redirects=True)
check_content_not_in_response('example_bash_operator', resp)
@pytest.mark.parametrize(
"dag_id",
["example_bash_operator", "example_subdag_operator"],
)
def test_code_success_for_all_dag_user(client_all_dags_codes, dag_id):
url = f'code?dag_id={dag_id}'
resp = client_all_dags_codes.get(url, follow_redirects=True)
check_content_in_response(dag_id, resp)
def test_dag_details_success(client_all_dags_dagruns):
"""User without RESOURCE_DAG_CODE can see the page, just not the ID."""
url = 'dag_details?dag_id=example_bash_operator'
resp = client_all_dags_dagruns.get(url, follow_redirects=True)
check_content_in_response('DAG Details', resp)
def test_dag_details_failure(dag_faker_client):
url = 'dag_details?dag_id=example_bash_operator'
resp = dag_faker_client.get(url, follow_redirects=True)
check_content_not_in_response('DAG Details', resp)
@pytest.mark.parametrize(
"dag_id",
["example_bash_operator", "example_subdag_operator"],
)
def test_dag_details_success_for_all_dag_user(client_all_dags_dagruns, dag_id):
url = f'dag_details?dag_id={dag_id}'
resp = client_all_dags_dagruns.get(url, follow_redirects=True)
check_content_in_response(dag_id, resp)
@pytest.fixture(scope="module")
def user_all_dags_tis(acl_app):
return create_user(
acl_app,
username="user_all_dags_tis",
role_name="role_all_dags_tis",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
],
)
@pytest.fixture()
def client_all_dags_tis(acl_app, user_all_dags_tis):
return client_with_login(
acl_app,
username="user_all_dags_tis",
password="user_all_dags_tis",
)
@pytest.fixture(scope="module")
def user_all_dags_tis_xcom(acl_app):
return create_user(
acl_app,
username="user_all_dags_tis_xcom",
role_name="role_all_dags_tis_xcom",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
],
)
@pytest.fixture()
def client_all_dags_tis_xcom(acl_app, user_all_dags_tis_xcom):
return client_with_login(
acl_app,
username="user_all_dags_tis_xcom",
password="user_all_dags_tis_xcom",
)
@pytest.fixture(scope="module")
def user_dags_tis_logs(acl_app):
return create_user(
acl_app,
username="user_dags_tis_logs",
role_name="role_dags_tis_logs",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
],
)
@pytest.fixture()
def client_dags_tis_logs(acl_app, user_dags_tis_logs):
return client_with_login(
acl_app,
username="user_dags_tis_logs",
password="user_dags_tis_logs",
)
RENDERED_TEMPLATES_URL = (
f'rendered-templates?task_id=runme_0&dag_id=example_bash_operator&'
f'execution_date={urllib.parse.quote_plus(str(DEFAULT_DATE))}'
)
TASK_URL = (
f'task?task_id=runme_0&dag_id=example_bash_operator&'
f'execution_date={urllib.parse.quote_plus(str(DEFAULT_DATE))}'
)
XCOM_URL = (
f'xcom?task_id=runme_0&dag_id=example_bash_operator&'
f'execution_date={urllib.parse.quote_plus(str(DEFAULT_DATE))}'
)
DURATION_URL = "duration?days=30&dag_id=example_bash_operator"
TRIES_URL = "tries?days=30&dag_id=example_bash_operator"
LANDING_TIMES_URL = "landing_times?days=30&dag_id=example_bash_operator"
GANTT_URL = "gantt?dag_id=example_bash_operator"
TREE_URL = "tree?dag_id=example_bash_operator"
LOG_URL = (
f"log?task_id=runme_0&dag_id=example_bash_operator&"
f"execution_date={urllib.parse.quote_plus(str(DEFAULT_DATE))}"
)
@pytest.mark.parametrize(
"client, url, expected_content",
[
("client_all_dags_tis", RENDERED_TEMPLATES_URL, "Rendered Template"),
("all_dag_user_client", RENDERED_TEMPLATES_URL, "Rendered Template"),
("client_all_dags_tis", TASK_URL, "Task Instance Details"),
("client_all_dags_tis_xcom", XCOM_URL, "XCom"),
("client_all_dags_tis", DURATION_URL, "example_bash_operator"),
("client_all_dags_tis", TRIES_URL, "example_bash_operator"),
("client_all_dags_tis", LANDING_TIMES_URL, "example_bash_operator"),
("client_all_dags_tis", GANTT_URL, "example_bash_operator"),
("client_dags_tis_logs", TREE_URL, "runme_1"),
("viewer_client", TREE_URL, "runme_1"),
("client_dags_tis_logs", LOG_URL, "Log by attempts"),
("user_client", LOG_URL, "Log by attempts"),
],
ids=[
"rendered-templates",
"rendered-templates-all-dag-user",
"task",
"xcom",
"duration",
"tries",
"landing-times",
"gantt",
"tree-for-readonly-role",
"tree-for-viewer",
"log",
"log-for-user",
],
)
def test_success(request, client, url, expected_content):
resp = request.getfixturevalue(client).get(url, follow_redirects=True)
check_content_in_response(expected_content, resp)
@pytest.mark.parametrize(
"url, unexpected_content",
[
(RENDERED_TEMPLATES_URL, "Rendered Template"),
(TASK_URL, "Task Instance Details"),
(XCOM_URL, "XCom"),
(DURATION_URL, "example_bash_operator"),
(TRIES_URL, "example_bash_operator"),
(LANDING_TIMES_URL, "example_bash_operator"),
(GANTT_URL, "example_bash_operator"),
(LOG_URL, "Log by attempts"),
],
ids=[
"rendered-templates",
"task",
"xcom",
"duration",
"tries",
"landing-times",
"gantt",
"log",
],
)
def test_failure(dag_faker_client, url, unexpected_content):
resp = dag_faker_client.get(url, follow_redirects=True)
check_content_not_in_response(unexpected_content, resp)
@pytest.mark.parametrize("client", ["dag_test_client", "all_dag_user_client"])
def test_run_success(request, client):
form = dict(
task_id="runme_0",
dag_id="example_bash_operator",
ignore_all_deps="false",
ignore_ti_state="true",
execution_date=DEFAULT_DATE,
)
resp = request.getfixturevalue(client).post('run', data=form)
assert resp.status_code == 302
def test_blocked_success(client_all_dags_dagruns):
resp = client_all_dags_dagruns.post('blocked', follow_redirects=True)
check_content_in_response('example_bash_operator', resp)
def test_blocked_success_for_all_dag_user(all_dag_user_client):
resp = all_dag_user_client.post('blocked', follow_redirects=True)
check_content_in_response('example_bash_operator', resp)
check_content_in_response('example_subdag_operator', resp)
@pytest.mark.parametrize(
"dags_to_block, unexpected_dag_ids",
[
(
["example_subdag_operator"],
["example_bash_operator", "example_xcom"],
),
(
["example_subdag_operator", "example_bash_operator"],
["example_xcom"],
),
],
ids=["single", "multi"],
)
def test_blocked_success_when_selecting_dags(
admin_client,
dags_to_block,
unexpected_dag_ids,
):
resp = admin_client.post(
'blocked',
data={'dag_ids': dags_to_block},
follow_redirects=True,
)
assert resp.status_code == 200
for dag_id in unexpected_dag_ids:
check_content_not_in_response(dag_id, resp)
blocked_dags = {blocked['dag_id'] for blocked in json.loads(resp.data.decode())}
for dag_id in dags_to_block:
assert dag_id in blocked_dags
@pytest.fixture(scope="module")
def user_all_dags_edit_tis(acl_app):
return create_user(
acl_app,
username="user_all_dags_edit_tis",
role_name="role_all_dags_edit_tis",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
],
)
@pytest.fixture()
def client_all_dags_edit_tis(acl_app, user_all_dags_edit_tis):
return client_with_login(
acl_app,
username="user_all_dags_edit_tis",
password="user_all_dags_edit_tis",
)
def test_failed_success(client_all_dags_edit_tis):
form = dict(
task_id="run_this_last",
dag_id="example_bash_operator",
execution_date=DEFAULT_DATE,
upstream="false",
downstream="false",
future="false",
past="false",
)
resp = client_all_dags_edit_tis.post('failed', data=form)
check_content_in_response('example_bash_operator', resp)
@pytest.mark.parametrize(
"url, expected_content",
[
("paused?dag_id=example_bash_operator&is_paused=false", "OK"),
("refresh?dag_id=example_bash_operator", ""),
],
ids=[
"paused",
"refresh",
],
)
def test_post_success(dag_test_client, url, expected_content):
# post request failure won't test
resp = dag_test_client.post(url, follow_redirects=True)
check_content_in_response(expected_content, resp)
@pytest.fixture(scope="module")
def user_only_dags_tis(acl_app):
return create_user(
acl_app,
username="user_only_dags_tis",
role_name="role_only_dags_tis",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
@pytest.fixture()
def client_only_dags_tis(acl_app, user_only_dags_tis):
return client_with_login(
acl_app,
username="user_only_dags_tis",
password="user_only_dags_tis",
)
def test_success_fail_for_read_only_task_instance_access(client_only_dags_tis):
form = dict(
task_id="run_this_last",
dag_id="example_bash_operator",
execution_date=DEFAULT_DATE,
upstream="false",
downstream="false",
future="false",
past="false",
)
resp = client_only_dags_tis.post('success', data=form)
check_content_not_in_response('Wait a minute', resp, resp_code=302)
GET_LOGS_WITH_METADATA_URL = (
f"get_logs_with_metadata?task_id=runme_0&dag_id=example_bash_operator&"
f"execution_date={urllib.parse.quote_plus(str(DEFAULT_DATE))}&"
f"try_number=1&metadata=null"
)
@pytest.mark.parametrize("client", ["client_dags_tis_logs", "user_client"])
def test_get_logs_with_metadata_success(request, client):
resp = request.getfixturevalue(client).get(
GET_LOGS_WITH_METADATA_URL,
follow_redirects=True,
)
check_content_in_response('"message":', resp)
check_content_in_response('"metadata":', resp)
def test_get_logs_with_metadata_failure(dag_faker_client):
resp = dag_faker_client.get(
GET_LOGS_WITH_METADATA_URL,
follow_redirects=True,
)
check_content_not_in_response('"message":', resp)
check_content_not_in_response('"metadata":', resp)
def test_refresh_failure_for_viewer(viewer_client):
# viewer role can't refresh
resp = viewer_client.post('refresh?dag_id=example_bash_operator')
check_content_in_response('Redirecting', resp, resp_code=302)
| 32.740498
| 108
| 0.708819
|
a0b30a7cae8a6f4033ac7c9d10908fa716a8f3d3
| 8,208
|
py
|
Python
|
utest/running/test_userhandlers.py
|
phil-davis/robotframework
|
4d4ce686cbe01e293bb86ea6ff34330e8c45fc43
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2020-09-13T08:56:49.000Z
|
2021-01-10T11:21:34.000Z
|
utest/running/test_userhandlers.py
|
phil-davis/robotframework
|
4d4ce686cbe01e293bb86ea6ff34330e8c45fc43
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-10-15T19:39:58.000Z
|
2020-10-15T19:41:03.000Z
|
utest/running/test_userhandlers.py
|
phil-davis/robotframework
|
4d4ce686cbe01e293bb86ea6ff34330e8c45fc43
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2016-02-29T15:42:22.000Z
|
2018-05-08T08:58:18.000Z
|
import sys
import unittest
from robot.errors import DataError
from robot.model import Keywords
from robot.running.userkeyword import EmbeddedArgumentsHandler
from robot.running.arguments import EmbeddedArguments, UserKeywordArgumentParser
from robot.utils.asserts import (assert_equal, assert_true, assert_raises,
assert_raises_with_msg)
class Fake(object):
value = ''
message = ''
def __iter__(self):
return iter([])
class FakeArgs(object):
def __init__(self, args):
self.value = args
def __nonzero__(self):
return bool(self.value)
def __iter__(self):
return iter(self.value)
class HandlerDataMock:
def __init__(self, name, args=[]):
self.name = name
self.args = FakeArgs(args)
self.metadata = {}
self.keywords = Keywords()
self.defaults = []
self.varargs = None
self.minargs = 0
self.maxargs = 0
self.return_value = None
self.doc = Fake()
self.timeout = Fake()
self.return_ = Fake()
self.tags = ()
self.teardown = None
def EAT(name, args=[]):
handler = HandlerDataMock(name, args)
embedded = EmbeddedArguments(name)
return EmbeddedArgumentsHandler(handler, 'resource', embedded)
class TestEmbeddedArgs(unittest.TestCase):
def setUp(self):
self.tmp1 = EAT('User selects ${item} from list')
self.tmp2 = EAT('${x} * ${y} from "${z}"')
def test_no_embedded_args(self):
assert_true(not EmbeddedArguments('No embedded args here'))
assert_true(EmbeddedArguments('${Yes} embedded args here'))
def test_get_embedded_arg_and_regexp(self):
assert_equal(self.tmp1.embedded_args, ['item'])
assert_equal(self.tmp1.embedded_name.pattern,
'^User\\ selects\\ (.*?)\\ from\\ list$')
assert_equal(self.tmp1.name, 'User selects ${item} from list')
def test_get_multiple_embedded_args_and_regexp(self):
assert_equal(self.tmp2.embedded_args, ['x', 'y', 'z'])
quote = '"' if sys.version_info[:2] >= (3, 7) else '\\"'
assert_equal(self.tmp2.embedded_name.pattern,
'^(.*?)\\ \\*\\ (.*?)\\ from\\ {0}(.*?){0}$'.format(quote))
def test_create_runner_when_no_match(self):
assert_raises(ValueError, self.tmp1.create_runner, 'Not matching')
def test_create_runner_with_one_embedded_arg(self):
runner = self.tmp1.create_runner('User selects book from list')
assert_equal(runner.embedded_args, [('item', 'book')])
assert_equal(runner.name, 'User selects book from list')
assert_equal(runner.longname, 'resource.User selects book from list')
runner = self.tmp1.create_runner('User selects radio from list')
assert_equal(runner.embedded_args, [('item', 'radio')])
assert_equal(runner.name, 'User selects radio from list')
assert_equal(runner.longname, 'resource.User selects radio from list')
def test_create_runner_with_many_embedded_args(self):
runner = self.tmp2.create_runner('User * book from "list"')
assert_equal(runner.embedded_args,
[('x', 'User'), ('y', 'book'), ('z', 'list')])
def test_create_runner_with_empty_embedded_arg(self):
runner = self.tmp1.create_runner('User selects from list')
assert_equal(runner.embedded_args, [('item', '')])
def test_create_runner_with_special_characters_in_embedded_args(self):
runner = self.tmp2.create_runner('Janne & Heikki * "enjoy" from """')
assert_equal(runner.embedded_args,
[('x', 'Janne & Heikki'), ('y', '"enjoy"'), ('z', '"')])
def test_embedded_args_without_separators(self):
template = EAT('This ${does}${not} work so well')
runner = template.create_runner('This doesnot work so well')
assert_equal(runner.embedded_args, [('does', ''), ('not', 'doesnot')])
def test_embedded_args_with_separators_in_values(self):
template = EAT('This ${could} ${work}-${OK}')
runner = template.create_runner("This doesn't really work---")
assert_equal(runner.embedded_args,
[('could', "doesn't"), ('work', 'really work'), ('OK', '--')])
def test_creating_runners_is_case_insensitive(self):
runner = self.tmp1.create_runner('User SELECts book frOm liST')
assert_equal(runner.embedded_args, [('item', 'book')])
assert_equal(runner.name, 'User SELECts book frOm liST')
assert_equal(runner.longname, 'resource.User SELECts book frOm liST')
class TestGetArgSpec(unittest.TestCase):
def test_no_args(self):
self._verify('')
def test_args(self):
self._verify('${arg1}', ['arg1',])
self._verify('${a1} ${a2}', ['a1', 'a2'])
def test_defaults(self):
self._verify('${arg1} ${arg2}=default @{varargs}',
args=['arg1', 'arg2'],
defaults={'arg2': 'default'},
varargs='varargs')
self._verify('${arg1} ${arg2}= @{varargs}',
args=['arg1', 'arg2'],
defaults={'arg2': ''},
varargs='varargs')
self._verify('${arg1}=d1 ${arg2}=d2 ${arg3}=d3',
args=['arg1', 'arg2', 'arg3'],
defaults={'arg1': 'd1', 'arg2': 'd2', 'arg3': 'd3'})
def test_vararg(self):
self._verify('@{varargs}', varargs='varargs')
self._verify('${arg} @{varargs}', ['arg'], varargs='varargs')
def test_kwonly(self):
self._verify('@{} ${ko1} ${ko2}',
kwonlyargs=['ko1', 'ko2'])
self._verify('@{vars} ${ko1} ${ko2}',
varargs='vars',
kwonlyargs=['ko1', 'ko2'])
def test_kwonlydefaults(self):
self._verify('@{} ${ko1} ${ko2}=xxx',
kwonlyargs=['ko1', 'ko2'],
defaults={'ko2': 'xxx'})
self._verify('@{} ${ko1}=xxx ${ko2}',
kwonlyargs=['ko1', 'ko2'],
defaults={'ko1': 'xxx'})
self._verify('@{v} ${ko1}=foo ${ko2} ${ko3}=',
varargs='v',
kwonlyargs=['ko1', 'ko2', 'ko3'],
defaults={'ko1': 'foo', 'ko3': ''})
def test_kwargs(self):
self._verify('&{kwargs}', kwargs='kwargs')
self._verify('${arg} &{kwargs}',
args=['arg'],
kwargs='kwargs')
self._verify('@{} ${arg} &{kwargs}',
kwonlyargs=['arg'],
kwargs='kwargs')
self._verify('${a1} ${a2}=ad @{vars} ${k1} ${k2}=kd &{kws}',
args=['a1', 'a2'],
varargs='vars',
kwonlyargs=['k1', 'k2'],
defaults={'a2': 'ad', 'k2': 'kd'},
kwargs='kws')
def _verify(self, in_args, args=[], defaults={}, varargs=None,
kwonlyargs=[], kwargs=None):
argspec = self._parse(in_args)
assert_equal(argspec.positional, args)
assert_equal(argspec.defaults, defaults)
assert_equal(argspec.varargs, varargs)
assert_equal(argspec.kwonlyargs, kwonlyargs)
assert_equal(argspec.kwargs, kwargs)
def _parse(self, in_args):
return UserKeywordArgumentParser().parse(in_args.split())
def test_arg_after_defaults(self):
self._verify_error('${arg1}=default ${arg2}',
'Non-default argument after default arguments.')
def test_multiple_varargs(self):
for spec in ['@{v1} @{v2}', '@{} @{v}', '@{v} @{}', '@{} @{}']:
self._verify_error(spec, 'Cannot have multiple varargs.')
def test_args_after_kwargs(self):
self._verify_error('&{kws} ${arg}',
'Only last argument can be kwargs.')
def _verify_error(self, in_args, exp_error):
assert_raises_with_msg(DataError,
'Invalid argument specification: ' + exp_error,
self._parse, in_args)
if __name__ == '__main__':
unittest.main()
| 38
| 83
| 0.568226
|
b55bd52ebdec79ff1d4ced8480ebb02c9cea1d93
| 3,148
|
py
|
Python
|
cen/experiment/utils.py
|
crodriguez1a/cen
|
f03397a0bf4ac24162e270907d623f8658179e88
|
[
"Apache-2.0"
] | 6
|
2020-02-23T04:53:08.000Z
|
2022-01-10T18:13:37.000Z
|
cen/experiment/utils.py
|
crodriguez1a/cen
|
f03397a0bf4ac24162e270907d623f8658179e88
|
[
"Apache-2.0"
] | null | null | null |
cen/experiment/utils.py
|
crodriguez1a/cen
|
f03397a0bf4ac24162e270907d623f8658179e88
|
[
"Apache-2.0"
] | 5
|
2020-09-27T23:46:33.000Z
|
2021-10-14T07:42:54.000Z
|
# Copyright 2020 Maruan Al-Shedivat. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Experiment utils."""
import os
import tensorflow as tf
from cen import losses
from cen import metrics
from cen import models
from cen import networks
class ModeKeys(object):
TRAIN = "train"
EVAL = "eval"
INFER = "infer"
def get_input_dtypes(data):
"""Returns input shapes."""
return {k: str(v.dtype) for k, v in data[0].items()}
def get_input_shapes(data):
"""Returns input shapes."""
return {k: v.shape[1:] for k, v in data[0].items()}
def get_output_shape(data):
"""Returns output shapes."""
return data[1].shape[1:]
def build(
cfg,
input_dtypes,
input_shapes,
output_shape,
mode=ModeKeys.TRAIN,
working_dir=None,
):
"""Builds model and callbacks for training or evaluation."""
tf.keras.backend.clear_session()
if working_dir is None:
working_dir = os.getcwd()
# Build model.
net = networks.get(**cfg.network)
model, info = models.get(
cfg.model.name,
encoder=net,
input_dtypes=input_dtypes,
input_shapes=input_shapes,
output_shape=output_shape,
**cfg.model.kwargs,
)
# Build loss and optimizer.
loss = losses.get(**cfg.train.loss)
opt = tf.keras.optimizers.get(dict(**cfg.optimizer))
# Build metrics.
metrics_list = None
if cfg.eval.metrics:
metrics_list = [metrics.get(**v) for _, v in cfg.eval.metrics.items()]
# Compile model for training.
if mode == ModeKeys.TRAIN:
model.compile(optimizer=opt, loss=loss, metrics=metrics_list)
callbacks = []
if cfg.train.checkpoint_kwargs:
callbacks.append(
tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(working_dir, "checkpoint"),
**cfg.train.checkpoint_kwargs,
)
)
if cfg.train.tensorboard:
callbacks.append(
tf.keras.callbacks.TensorBoard(
log_dir=os.path.join(working_dir, "tensorboard"),
**cfg.train.tensorboard,
)
)
info["callbacks"] = callbacks
return model, info
# Compile model for evaluation or inference.
else:
model.compile(loss=loss, optimizer=opt, metrics=metrics_list)
checkpoint_path = os.path.join(working_dir, "checkpoint")
model.load_weights(checkpoint_path).expect_partial()
return model, info
| 29.148148
| 80
| 0.6223
|
3b916aa30d7f2f4d0b7e6b29de6b6c4b05c11878
| 2,855
|
py
|
Python
|
Nancy/strat/comparisons/make_readme.py
|
SalishSeaCast/analysis
|
5964628f08ca1f36121a5d8430ad5b4ae7756c7a
|
[
"Apache-2.0"
] | null | null | null |
Nancy/strat/comparisons/make_readme.py
|
SalishSeaCast/analysis
|
5964628f08ca1f36121a5d8430ad5b4ae7756c7a
|
[
"Apache-2.0"
] | null | null | null |
Nancy/strat/comparisons/make_readme.py
|
SalishSeaCast/analysis
|
5964628f08ca1f36121a5d8430ad5b4ae7756c7a
|
[
"Apache-2.0"
] | null | null | null |
"""Salish Sea NEMO IPython Notebook collection README generator
Copyright 2013-2016 The Salish Sea MEOPAR Contributors
and The University of British Columbia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import json
import os
import re
nbviewer = 'http://nbviewer.ipython.org/urls'
repo = 'bitbucket.org/salishsea/analysis/raw/tip'
repo_dir = 'Nancy/strat/comparisons'
url = os.path.join(nbviewer, repo, repo_dir)
title_pattern = re.compile('#{1,6} ?')
readme = """The IPython Notebooks in this directory are made by Nancy for
quick sharing of results. Most of the notebooks and files are used for
comaprisons of stratification data with the model.
The links below are to static renderings of the notebooks via
[nbviewer.ipython.org](http://nbviewer.ipython.org/).
Descriptions below the links are from the first cell of the notebooks
(if that cell contains Markdown or raw text).
"""
notebooks = (fn for fn in os.listdir('./') if fn.endswith('ipynb'))
for fn in notebooks:
readme += '* ##[{fn}]({url}/{fn}) \n \n'.format(fn=fn, url=url)
with open(fn, 'rt') as notebook:
contents = json.load(notebook)
try:
first_cell = contents['worksheets'][0]['cells'][0]
except KeyError:
first_cell = contents['cells'][0]
first_cell_type = first_cell['cell_type']
if first_cell_type in 'markdown raw'.split():
desc_lines = first_cell['source']
for line in desc_lines:
suffix = ''
if title_pattern.match(line):
line = title_pattern.sub('**', line)
suffix = '**'
if line.endswith('\n'):
readme += (
' {line}{suffix} \n'
.format(line=line[:-1], suffix=suffix))
else:
readme += (
' {line}{suffix} '.format(line=line, suffix=suffix))
readme += '\n' * 2
license = """
##License
These notebooks and files are copyright 2013-{this_year}
by the Salish Sea MEOPAR Project Contributors
and The University of British Columbia.
They are licensed under the Apache License, Version 2.0.
http://www.apache.org/licenses/LICENSE-2.0
Please see the LICENSE file for details of the license.
""".format(this_year=datetime.date.today().year)
with open('README.md', 'wt') as f:
f.writelines(readme)
f.writelines(license)
| 36.139241
| 76
| 0.678459
|
db74d525da6294ebb40993b40634ba9bfffd3ce3
| 2,489
|
py
|
Python
|
numpy-pandas-matplotlib/functions_vectorised.py
|
AxoyTO/ML-DL-DS-Python-Studies
|
ffef653190d1106e01244a4ea7f3f953b9d97882
|
[
"Unlicense"
] | null | null | null |
numpy-pandas-matplotlib/functions_vectorised.py
|
AxoyTO/ML-DL-DS-Python-Studies
|
ffef653190d1106e01244a4ea7f3f953b9d97882
|
[
"Unlicense"
] | null | null | null |
numpy-pandas-matplotlib/functions_vectorised.py
|
AxoyTO/ML-DL-DS-Python-Studies
|
ffef653190d1106e01244a4ea7f3f953b9d97882
|
[
"Unlicense"
] | 1
|
2021-12-08T13:00:41.000Z
|
2021-12-08T13:00:41.000Z
|
import numpy
import numpy as np
import pickle
from itertools import groupby
def prod_non_zero_diag(X: np.ndarray) -> int:
if not 0 in X.diagonal():
return X.diagonal().prod()
if len(X.diagonal().nonzero()[0]) == 0:
return -1
else:
prod = 1
for i in X[X.diagonal().nonzero(), X.diagonal().nonzero()]:
for j in i:
prod *= j
break
return prod
pass
def are_multisets_equal(x: np.ndarray, y: np.ndarray) -> bool:
"""
Return True if both 1-d arrays create equal multisets, False if not.
Return type: bool / np.bool_
"""
x.sort()
y.sort()
return True if False not in (x == y) else False
pass
def max_after_zero(x: np.ndarray) -> int:
y = np.where(x == 0)[0] + 1
if y.size == 0:
return -1
elif y.size == 1 and y == x.size:
return -1
maximum = 0
for i in y:
if x[i] > maximum:
maximum = x[i]
return maximum
pass
def convert_image(image: np.ndarray, weights: np.ndarray) -> np.ndarray:
"""
#TOO SLOW
ret = [[]] * len(image)
v = 0
for i in image:
tmp = []
for j in i:
c = 0
t = 0
for k in j:
if c == len(j):
break
t += k * weights[c]
c += 1
tmp.append(t)
ret[v] = tmp
v += 1
return ret
#----------
#EVEN SLOWER
ret = numpy.zeros(shape=(len(image), len(image)))
for i in range(0, len(image)):
for j in range(0, len(image[i])):
t = 0
for k in range(0, len(image[i][j])):
t += weights[k] * image[i][j][k]
ret[i][j] = t
return ret
"""
ret = np.dot(image, weights)
return ret
pass
def run_length_encoding(x: np.ndarray) -> (np.ndarray, np.ndarray):
if x.ndim != 1:
return -1
n = x.size
if n == 0:
return -1
else:
loc_run_start = np.empty(n, dtype=bool)
loc_run_start[0] = True
np.not_equal(x[:-1], x[1:], out=loc_run_start[1:])
run_starts = np.nonzero(loc_run_start)[0]
run_values = x[loc_run_start]
run_lengths = np.diff(np.append(run_starts, n))
return run_values, run_lengths
def pairwise_distance(X: np.ndarray, Y: np.ndarray) -> np.ndarray:
Z = np.linalg.norm(X[:, None, :] - Y[None, :, :], ord=None, axis=-1)
return Z
pass
| 23.481132
| 72
| 0.506227
|
04ffbb54417c472b7e6ccffe19048cce86b858cd
| 1,574
|
py
|
Python
|
tools/find_includes.py
|
meghanto/tiny_vm
|
da19e9f4bff3b48349bf13f8dc725d387d01e1f5
|
[
"CC-BY-3.0"
] | 2
|
2022-01-12T03:16:07.000Z
|
2022-01-15T07:52:48.000Z
|
tools/find_includes.py
|
meghanto/tiny_vm
|
da19e9f4bff3b48349bf13f8dc725d387d01e1f5
|
[
"CC-BY-3.0"
] | 2
|
2022-01-25T20:52:54.000Z
|
2022-01-31T02:46:45.000Z
|
tools/find_includes.py
|
meghanto/tiny_vm
|
da19e9f4bff3b48349bf13f8dc725d387d01e1f5
|
[
"CC-BY-3.0"
] | 10
|
2022-01-04T04:36:07.000Z
|
2022-01-15T00:55:27.000Z
|
"""
Scan #include references in C/C++ code
"""
import argparse
import pathlib
import logging
import re
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
C_SUFFIXES = ["h", "c"] # FIXME: Need comprehensive suffix list
def cli() -> object:
"""Command line arguments"""
parser = argparse.ArgumentParser("Scan C/C++ for #include relation")
parser.add_argument("dirs", help="Directories to scan; defaults to .",
nargs="?", default=["."])
args = parser.parse_args()
return args
def scan_includes(p: pathlib.Path):
"""Emits an "f -> g" line for each #include "g" """
f_name = p.name
with open(p, "r") as f:
#print()
for line in f:
matched = re.match(r"""#include\s*["](?P<included>.*)["].*""",
line)
if matched:
log.debug(f"Matched line '{line.strip()}'")
included = matched.groupdict()["included"]
log.debug(f"Included '{included}'")
print(f"{f_name}\t->\t{included}")
def main():
print("digraph depends {")
args = cli()
for d in args.dirs:
p = pathlib.Path(d)
log.debug(f"Scanning directory {p}")
for f in p.iterdir():
log.debug(f"Considering file {f}")
suffix = f.name.split(".")[-1]
if suffix in C_SUFFIXES:
log.debug(f"{f.name} appears to be a C source file")
scan_includes(f)
print("}")
if __name__ == "__main__":
main()
| 24.984127
| 74
| 0.543837
|
7ad85af44829b972b33aad6a596ad03693c1a8e8
| 333
|
py
|
Python
|
app.py
|
ptrkdy/weather_bot
|
0b68e74421a026ff59d17f730eb87cb2dea7cacf
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
ptrkdy/weather_bot
|
0b68e74421a026ff59d17f730eb87cb2dea7cacf
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
ptrkdy/weather_bot
|
0b68e74421a026ff59d17f730eb87cb2dea7cacf
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from flask import Flask
app = Flask(__name__)
# tell our app which pages it will accept
# check conversation
# check slots
# imagine endpoint design -> /chatbot /conversation /slots /user
@app.route('/') # 'http://www.ourapi.com/' root directory
def home():
return "Hello, world!"
app.run(port=8888)
| 15.857143
| 64
| 0.702703
|
aa266f4d379a013665e4e63bae7821b118b83baa
| 1,149
|
py
|
Python
|
day3/python/main.py
|
freidrichen/advent-of-code-2019
|
08aca50e86700504d35c934a308a640a95de586e
|
[
"MIT"
] | null | null | null |
day3/python/main.py
|
freidrichen/advent-of-code-2019
|
08aca50e86700504d35c934a308a640a95de586e
|
[
"MIT"
] | null | null | null |
day3/python/main.py
|
freidrichen/advent-of-code-2019
|
08aca50e86700504d35c934a308a640a95de586e
|
[
"MIT"
] | null | null | null |
import numpy as np
def get_sections(line):
return [(s[0], int(s[1:])) for s in line.split(',')]
def get_coords(sections):
coords = set()
current_coord = np.array([0, 0])
for direction, length in sections:
if direction == 'U':
dir_unit = np.array([0, -1])
elif direction == 'D':
dir_unit = np.array([0, 1])
elif direction == 'R':
dir_unit = np.array([1, 0])
elif direction == 'L':
dir_unit = np.array([-1, 0])
else:
assert(False)
for _ in range(length):
current_coord += dir_unit
coords.add(tuple(current_coord))
return coords
input_file = "../input.txt"
with open(input_file, 'r') as f:
input_text = f.read()
wire_sections = [
get_sections(input_line) for input_line in input_text.splitlines()]
wire_coords = [get_coords(s) for s in wire_sections]
intersections = set.intersection(*wire_coords)
min_distance = min(sum(np.absolute(coords)) for coords in intersections)
print(f"Distance (part 1): {min_distance}")
# print(f"Inputs that produce {target} (part 2): {100*noun + verb}")
| 28.725
| 72
| 0.604874
|
c95000c188c1f4473e5b7cc123b641675e66706e
| 2,112
|
py
|
Python
|
kolibri/core/auth/management/commands/resumesync.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 545
|
2016-01-19T19:26:55.000Z
|
2022-03-20T00:13:04.000Z
|
kolibri/core/auth/management/commands/resumesync.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 8,329
|
2016-01-19T19:32:02.000Z
|
2022-03-31T21:23:12.000Z
|
kolibri/core/auth/management/commands/resumesync.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 493
|
2016-01-19T19:26:48.000Z
|
2022-03-28T14:35:05.000Z
|
from kolibri.core.auth.constants.morango_sync import DATA_PORTAL_SYNCING_BASE_URL
from kolibri.core.auth.management.utils import get_network_connection
from kolibri.core.auth.management.utils import MorangoSyncCommand
class Command(MorangoSyncCommand):
help = "Allow the syncing of facility data with Kolibri Data Portal or another Kolibri device."
def add_arguments(self, parser):
parser.add_argument(
"--id", type=str, help="ID of an incomplete session to resume sync"
)
parser.add_argument(
"--baseurl", type=str, default=DATA_PORTAL_SYNCING_BASE_URL, dest="baseurl"
)
parser.add_argument("--noninteractive", action="store_true")
parser.add_argument(
"--chunk-size",
type=int,
default=500,
help="Chunk size of records to send/retrieve per request",
)
parser.add_argument(
"--no-push", action="store_true", help="Do not push data to the server"
)
parser.add_argument(
"--no-pull", action="store_true", help="Do not pull data from the server"
)
parser.add_argument(
"--no-provision",
action="store_true",
help="do not create a facility and temporary superuser",
)
parser.add_argument(
"--user",
type=str,
help="for single-user syncing, the user ID of the account to be synced",
)
parser.add_argument(
"--keep-alive",
action="store_true",
help="do not close the sync session",
)
def handle_async(self, *args, **options):
(baseurl, sync_session_id, chunk_size,) = (
options["baseurl"],
options["id"],
options["chunk_size"],
)
# try to connect to server
network_connection = get_network_connection(baseurl)
sync_session_client = network_connection.resume_sync_session(
sync_session_id, chunk_size=chunk_size
)
self._sync(sync_session_client, **options)
| 36.413793
| 99
| 0.608902
|
821ad0604faf35c7e7ce9deddf3a4740ee57a73f
| 2,697
|
py
|
Python
|
venv/lib/python3.8/site-packages/applicationinsights/channel/contracts/MetricData.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/applicationinsights/channel/contracts/MetricData.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/applicationinsights/channel/contracts/MetricData.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
import collections
import copy
from .Utils import _write_complex_object
class MetricData(object):
"""Data contract class for type MetricData.
"""
ENVELOPE_TYPE_NAME = 'Microsoft.ApplicationInsights.Metric'
DATA_TYPE_NAME = 'MetricData'
_defaults = collections.OrderedDict([
('ver', 2),
('metrics', []),
('properties', {})
])
def __init__(self):
"""Initializes a new instance of the class.
"""
self._values = {
'ver': 2,
'metrics': [],
}
self._initialize()
@property
def ver(self):
"""The ver property.
Returns:
(int). the property value. (defaults to: 2)
"""
return self._values['ver']
@ver.setter
def ver(self, value):
"""The ver property.
Args:
value (int). the property value.
"""
self._values['ver'] = value
@property
def metrics(self):
"""The metrics property.
Returns:
(list). the property value. (defaults to: [])
"""
return self._values['metrics']
@metrics.setter
def metrics(self, value):
"""The metrics property.
Args:
value (list). the property value.
"""
self._values['metrics'] = value
@property
def properties(self):
"""The properties property.
Returns:
(hash). the property value. (defaults to: {})
"""
if 'properties' in self._values:
return self._values['properties']
self._values['properties'] = copy.deepcopy(self._defaults['properties'])
return self._values['properties']
@properties.setter
def properties(self, value):
"""The properties property.
Args:
value (hash). the property value.
"""
if value == self._defaults['properties'] and 'properties' in self._values:
del self._values['properties']
else:
self._values['properties'] = value
def _initialize(self):
"""Initializes the current instance of the object.
"""
pass
def write(self):
"""Writes the contents of this object and returns the content as a dict object.
Returns:
(dict). the object that represents the same data as the current instance.
"""
return _write_complex_object(self._defaults, self._values)
| 26.70297
| 88
| 0.510197
|
bf3796eba5c4a475c58de564c37841c056fce88e
| 16,732
|
py
|
Python
|
app/calculadora.py
|
mylesdonaldson/calculadora-tk
|
1679aa57035fbba10a381b7804afbb8d37c5b325
|
[
"MIT"
] | 1
|
2022-01-17T00:29:43.000Z
|
2022-01-17T00:29:43.000Z
|
app/calculadora.py
|
mylesdonaldson/calculadora-tk
|
1679aa57035fbba10a381b7804afbb8d37c5b325
|
[
"MIT"
] | null | null | null |
app/calculadora.py
|
mylesdonaldson/calculadora-tk
|
1679aa57035fbba10a381b7804afbb8d37c5b325
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @autor: Matheus Felipe
# @github: github.com/matheusfelipeog
# Builtins
import sys
import os
import platform
import tkinter as tk
from tkinter import Menu, FALSE
from functools import partial
from json import load as json_load
from json import dump as json_dump
from copy import deepcopy
# Módulos próprios
from .calculador import Calculador
class Calculadora(object):
"""Classe para criação do layout da calculadora, distribuição dos botões
e a adição de suas funcionalidades.
Os botões distríbuidos no layout estão conforme o exemplo abaixo:
C | ( | ) | <
7 | 8 | 9 | x
4 | 5 | 6 | -
1 | 2 | 3 | +
. | 0 | = | /
| | ^ | √
OBS: É necessário importar o modulo style contido na pacote view,
e selecionar uma de suas classes de estilo.
"""
"""Class for creating the layout of the calculator, distribution of buttons
and adding its features.
The buttons distributed in the layout are as shown in the example below:
C | ( | ) | <
7 | 8 | 9 | x
4 | 5 | 6 | -
1 | 2 | 3 | +
. | 0 | = | /
| | ^ | √
NOTE: It is necessary to import the style module contained in the view package,
and select one of your style classes.
"""
def __init__(self, master):
self.master = master
self.calc = Calculador()
self.settings = self._load_settings()
# Define estilo padrão para macOS, caso seja o sistema operacional utilizado
# Sets default style for macOS, if it's the operating system used
if platform.system() == 'Darwin':
self.theme = self._get_theme('Default Theme For MacOS')
else:
self.theme = self._get_theme(self.settings['current_theme'])
# Edição da Top-Level
# Top Level Master
self.master.title('Calculadora Tk')
self.master.maxsize(width=335, height=415)
self.master.minsize(width=335, height=415)
self.master.geometry('-150+100')
self.master['bg'] = self.theme['master_bg']
# Área do input
# Input
self._frame_input = tk.Frame(self.master, bg=self.theme['frame_bg'], pady=4)
self._frame_input.pack()
# Área dos botões
# Buttons
self._frame_buttons = tk.Frame(self.master, bg=self.theme['frame_bg'], padx=2)
self._frame_buttons.pack()
# Funções de inicialização
# Startup functions
self._create_input(self._frame_input)
self._create_buttons(self._frame_buttons)
self._create_menu(self.master)
@staticmethod
def _load_settings():
"""Utilitário para carregar o arquivo de confirgurações da calculadora."""
"""Utility to load the calculator settings file."""
with open('./app/settings/settings.json', mode='r', encoding='utf-8') as f:
settings = json_load(f)
return settings
def _get_theme(self, name='Dark'):
"""Retorna as configurações de estilo para o theme especificado."""
"""Returns the style settings for the specified theme."""
list_of_themes = self.settings['themes']
found_theme = None
for t in list_of_themes:
if name == t['name']:
found_theme = deepcopy(t)
break
return found_theme
def _create_input(self, master):
self._entrada = tk.Entry(master, cnf=self.theme['INPUT'])
self._entrada.insert(0,0)
self._entrada.pack()
def _create_menu(self, master):
self.master.option_add('*tearOff', FALSE)
calc_menu = Menu(self.master)
self.master.config(menu=calc_menu)
#Configuração
config = Menu(calc_menu)
theme = Menu(config)
#Menu tema
theme_incompatible = ['Default Theme For MacOS']
for t in self.settings['themes']:
name = t['name']
# Ignora os temas não compatíveis.
# Ignores unsupported themes.
if name in theme_incompatible:
continue
else:
theme.add_command(label=name, command=partial(self._change_theme_to, name))
#Configuração
#Configuration
calc_menu.add_cascade(label='Configuração', menu=config)
config.add_cascade(label='Tema', menu=theme)
config.add_separator()
config.add_command(label='Sair', command=self._exit)
def _change_theme_to(self, name='Dark'):
self.settings['current_theme'] = name
with open('./app/settings/settings.json', 'w') as outfile:
json_dump(self.settings, outfile, indent=4)
self._realod_app()
def _create_buttons(self, master):
""""Metódo responsável pela criação de todos os botões da calculadora,
indo desde adição de eventos em cada botão à distribuição no layout grid.
"""
""""Method responsible for creating all calculator buttons,
ranging from adding events on each button to distributing them on the grid layout.
"""
# Seta configurações globais (width, height font etc) no botão especificado.
self.theme['BTN_NUMERICO'].update(self.settings['global'])
self._BTN_NUM_0 = tk.Button(master, text=0, cnf=self.theme['BTN_NUMERICO'])
self._BTN_NUM_1 = tk.Button(master, text=1, cnf=self.theme['BTN_NUMERICO'])
self._BTN_NUM_2 = tk.Button(master, text=2, cnf=self.theme['BTN_NUMERICO'])
self._BTN_NUM_3 = tk.Button(master, text=3, cnf=self.theme['BTN_NUMERICO'])
self._BTN_NUM_4 = tk.Button(master, text=4, cnf=self.theme['BTN_NUMERICO'])
self._BTN_NUM_5 = tk.Button(master, text=5, cnf=self.theme['BTN_NUMERICO'])
self._BTN_NUM_6 = tk.Button(master, text=6, cnf=self.theme['BTN_NUMERICO'])
self._BTN_NUM_7 = tk.Button(master, text=7, cnf=self.theme['BTN_NUMERICO'])
self._BTN_NUM_8 = tk.Button(master, text=8, cnf=self.theme['BTN_NUMERICO'])
self._BTN_NUM_9 = tk.Button(master, text=9, cnf=self.theme['BTN_NUMERICO'])
# Seta configurações globais (width, height font etc) no botão especificado.
# Sets Global configurations
self.theme['BTN_OPERADOR'].update(self.settings['global'])
# Instânciação dos botões dos operadores númericos
# Creates instances of number operator buttons
self._BTN_SOMA = tk.Button(master, text='+', cnf=self.theme['BTN_OPERADOR'])
self._BTN_SUB = tk.Button(master, text='-', cnf=self.theme['BTN_OPERADOR'])
self._BTN_DIV = tk.Button(master, text='/', cnf=self.theme['BTN_OPERADOR'])
self._BTN_MULT = tk.Button(master, text='*', cnf=self.theme['BTN_OPERADOR'])
self._BTN_EXP = tk.Button(master, text='^', cnf=self.theme['BTN_OPERADOR'])
self._BTN_RAIZ = tk.Button(master, text='√', cnf=self.theme['BTN_OPERADOR'])
# Seta configurações globais (width, height font etc) no botão especificado.
# Sets Global configurations
self.theme['BTN_DEFAULT'].update(self.settings['global'])
self.theme['BTN_CLEAR'].update(self.settings['global'])
# Instânciação dos botões de funcionalidades da calculadora
# Instance of calculator feature buttons
self._BTN_ABRE_PARENTESE = tk.Button(master, text='(', cnf=self.theme['BTN_DEFAULT'])
self._BTN_FECHA_PARENTESE = tk.Button(master, text=')', cnf=self.theme['BTN_DEFAULT'])
self._BTN_CLEAR = tk.Button(master, text='C', cnf=self.theme['BTN_DEFAULT'])
self._BTN_DEL = tk.Button(master, text='<', cnf=self.theme['BTN_CLEAR'])
self._BTN_RESULT = tk.Button(master, text='=', cnf=self.theme['BTN_OPERADOR'])
self._BTN_DOT = tk.Button(master, text='.', cnf=self.theme['BTN_DEFAULT'])
# Instânciação dos botões vazios, para futura implementação
# Instantiation of empty buttons, for future implementation
self._BTN_VAZIO1 = tk.Button(master, text='', cnf=self.theme['BTN_OPERADOR'])
self._BTN_VAZIO2 = tk.Button(master, text='', cnf=self.theme['BTN_OPERADOR'])
# Distribuição dos botões em um gerenciador de layout grid
# Linha 0
# Distribution of buttons in a grid layout manager
# Line 0
self._BTN_CLEAR.grid(row=0, column=0, padx=1, pady=1)
self._BTN_ABRE_PARENTESE.grid(row=0, column=1, padx=1, pady=1)
self._BTN_FECHA_PARENTESE.grid(row=0, column=2, padx=1, pady=1)
self._BTN_DEL.grid(row=0, column=3, padx=1, pady=1)
# Linha 1
# Line 1
self._BTN_NUM_7.grid(row=1, column=0, padx=1, pady=1)
self._BTN_NUM_8.grid(row=1, column=1, padx=1, pady=1)
self._BTN_NUM_9.grid(row=1, column=2, padx=1, pady=1)
self._BTN_MULT.grid(row=1, column=3, padx=1, pady=1)
# Linha 2
# Line 2
self._BTN_NUM_4.grid(row=2, column=0, padx=1, pady=1)
self._BTN_NUM_5.grid(row=2, column=1, padx=1, pady=1)
self._BTN_NUM_6.grid(row=2, column=2, padx=1, pady=1)
self._BTN_SUB.grid(row=2, column=3, padx=1, pady=1)
# Linha 3
self._BTN_NUM_1.grid(row=3, column=0, padx=1, pady=1)
self._BTN_NUM_2.grid(row=3, column=1, padx=1, pady=1)
self._BTN_NUM_3.grid(row=3, column=2, padx=1, pady=1)
self._BTN_SOMA.grid(row=3, column=3, padx=1, pady=1)
# Linha 4
self._BTN_DOT.grid(row=4, column=0, padx=1, pady=1)
self._BTN_NUM_0.grid(row=4, column=1, padx=1, pady=1)
self._BTN_RESULT.grid(row=4, column=2, padx=1, pady=1)
self._BTN_DIV.grid(row=4, column=3, padx=1, pady=1)
# Linha 5
self._BTN_VAZIO1.grid(row=5, column=0, padx=1, pady=1)
self._BTN_VAZIO2.grid(row=5, column=1, padx=1, pady=1)
self._BTN_EXP.grid(row=5, column=2, padx=1, pady=1)
self._BTN_RAIZ.grid(row=5, column=3, padx=1, pady=1)
# Eventos dos botões númericos
# Button Number events
self._BTN_NUM_0['command'] = partial(self._set_values_in_input, 0)
self._BTN_NUM_1['command'] = partial(self._set_values_in_input, 1)
self._BTN_NUM_2['command'] = partial(self._set_values_in_input, 2)
self._BTN_NUM_3['command'] = partial(self._set_values_in_input, 3)
self._BTN_NUM_4['command'] = partial(self._set_values_in_input, 4)
self._BTN_NUM_5['command'] = partial(self._set_values_in_input, 5)
self._BTN_NUM_6['command'] = partial(self._set_values_in_input, 6)
self._BTN_NUM_7['command'] = partial(self._set_values_in_input, 7)
self._BTN_NUM_8['command'] = partial(self._set_values_in_input, 8)
self._BTN_NUM_9['command'] = partial(self._set_values_in_input, 9)
# Eventos dos botões de operação matemática
# Button Math operator events
self._BTN_SOMA['command'] = partial(self._set_operator_in_input, '+')
self._BTN_SUB['command'] = partial(self._set_operator_in_input, '-')
self._BTN_MULT['command'] = partial(self._set_operator_in_input, '*')
self._BTN_DIV['command'] = partial(self._set_operator_in_input, '/')
self._BTN_EXP['command'] = partial(self._set_operator_in_input, '**')
self._BTN_RAIZ['command'] = partial(self._set_operator_in_input, '**(1/2)')
# Eventos dos botões de funcionalidades da calculadora
# Calculator Feature Buttons
self._BTN_DOT['command'] = partial(self._set_dot_in_input, '.')
self._BTN_ABRE_PARENTESE['command'] = self._set_open_parent
self._BTN_FECHA_PARENTESE['command'] = self._set_close_parent
self._BTN_DEL['command'] = self._del_last_value_in_input
self._BTN_CLEAR['command'] = self._clear_input
self._BTN_RESULT['command'] = self._get_data_in_input
def _set_values_in_input(self, value):
"""Metódo responsável por captar o valor númerico clicado e setar no input"""
"""Method responsible for capturing the numerical value clicked and setting the input"""
if self._entrada.get() == 'Erro':
self._entrada.delete(0, len(self._entrada.get()))
if self._entrada.get() == '0':
self._entrada.delete(0)
self._entrada.insert(0 ,value)
elif self._lenght_max(self._entrada.get()):
self._entrada.insert(len(self._entrada.get()) ,value)
def _set_dot_in_input(self, dot):
"""Metódo responsável por setar o ponto de separação decimal no valor"""
"""Method responsible for setting the decimal point in the value"""
if self._entrada.get() == 'Erro':
return
if self._entrada.get()[-1] not in '.+-/*' and self._lenght_max(self._entrada.get()):
self._entrada.insert(len(self._entrada.get()) ,dot)
def _set_open_parent(self):
"""Metódo para setar a abertura de parenteses no input"""
"""Method to set the opening of parentheses in the input"""
if self._entrada.get() == 'Erro':
return
if self._entrada.get() == '0':
self._entrada.delete(0)
self._entrada.insert(len(self._entrada.get()), '(')
elif self._entrada.get()[-1] in '+-/*' and self._lenght_max(self._entrada.get()):
self._entrada.insert(len(self._entrada.get()), '(')
def _set_close_parent(self):
"""Metódo para setar o fechamento de parenteses no input"""
"""Method to set the closing of parentheses in the input"""
if self._entrada.get() == 'Erro':
return
if self._entrada.get().count('(') <= self._entrada.get().count(')'):
return
if self._entrada.get()[-1] not in '+-/*(' and self._lenght_max(self._entrada.get()):
self._entrada.insert(len(self._entrada.get()), ')')
def _clear_input(self):
"""Reseta o input da calculadora, limpando-o por completo e inserindo o valor 0"""
"""Resets the calculator input, clearing it completely and entering the value 0"""
self._entrada.delete(0, len(self._entrada.get()))
self._entrada.insert(0,0)
def _del_last_value_in_input(self):
"""Apaga o último digito contido dentro do input"""
"""Erases the last digit contained within the input"""
if self._entrada.get() == 'Erro':
return
if len(self._entrada.get()) == 1:
self._entrada.delete(0)
self._entrada.insert(0,0)
else:
self._entrada.delete(len(self._entrada.get()) - 1)
def _set_operator_in_input(self, operator):
"""Metódo responsável por captar o operador matemático clicado e setar no input"""
"""Method responsible for capturing the clicked mathematical operator and setting the input"""
if self._entrada.get() == 'Erro':
return
if self._entrada.get() == '':
# print('\33[91mOperação inválida.\33[m')
return
# Evita casos de operadores repetidos sequêncialmente, para evitar erros
if self._entrada.get()[-1] not in '+-*/' and self._lenght_max(self._entrada.get()):
self._entrada.insert(len(self._entrada.get()) ,operator)
def _get_data_in_input(self):
"""Pega os dados com todas as operações contidos dentro do input
para realizar o calculo"""
"""Get the data with all operations contained within the input
to perform the calculation"""
if self._entrada.get() == 'Erro':
return
result = self.calc.calculation(self._entrada.get())
self._set_result_in_input(result=result)
def _set_result_in_input(self, result=0):
"""Seta o resultado de toda a operação dentro do input"""
"""Set the result of the entire operation inside the input"""
if self._entrada.get() == 'Erro':
return
self._entrada.delete(0, len(self._entrada.get()))
self._entrada.insert(0, result)
def _lenght_max(self, data_in_input):
"""Para verificar se o input atingiu a quantidade de caracteres máxima"""
"""To check if the input has reached the maximum number of characters"""
if len(str(data_in_input)) >= 15:
return False
return True
def start(self):
print('\33[92mCalculadora Tk Iniciada. . .]\33\n')
self.master.mainloop()
def _reload_app(self):
"""Reinicia o aplicativo and recupera o path do executável do python """
"""Restart the Calculator and retrieve path of executable"""
python = sys.executable
os.execl(python, python, * sys.argv)
def _exit(self):
exit()
| 42.040201
| 102
| 0.629094
|
5f1d2da412e53a57b593a9370cf1f8cec76d4da2
| 3,687
|
py
|
Python
|
pymodule/rule.py
|
ymktw/SDNProbe
|
46dee9737951012dc378f4d71675844402093569
|
[
"Apache-2.0"
] | 3
|
2017-07-17T04:12:27.000Z
|
2017-07-22T06:37:21.000Z
|
pymodule/rule.py
|
ymktw/SDNProbe
|
46dee9737951012dc378f4d71675844402093569
|
[
"Apache-2.0"
] | null | null | null |
pymodule/rule.py
|
ymktw/SDNProbe
|
46dee9737951012dc378f4d71675844402093569
|
[
"Apache-2.0"
] | null | null | null |
import cPickle
def dump_rules(filename, rules):
with open(filename, 'w') as f:
cPickle.dump(rules, f, cPickle.HIGHEST_PROTOCOL)
def load_rules(filename):
with open(filename, 'r') as f:
rules = cPickle.load(f)
return rules
class Rule():
APPLY_ACTION = 0
CLEAR_ACTION = 1
WRITE_ACTION = 2
GOTO_TABLE = 3
INSTRUCTION = [APPLY_ACTION, CLEAR_ACTION, WRITE_ACTION, GOTO_TABLE]
SET_FIELD = 0
GROUP = 1
OUTPUT = 2
ACTION = [SET_FIELD, GROUP, OUTPUT]
EDGE_PORT = 1000
MAX_PRIORITY = 30000
def __init__(self, id, switch_id, prefix, in_port, out_port, priority=MAX_PRIORITY):
self.id = id;
self.switch_id = switch_id;
self.priority = priority
self.prefix = prefix
self.header_space, self.ip, self.match_length = self.to_header_space(prefix)
self.out_port = out_port
self.in_port = in_port
self.is_path_start = False
self.is_path_end = False
self.timeout = 0
self.path_index = None
self.inst_actions = {}
self.table_id = 0
self.group_id = None
self.modify_field = None
self.all_pair_path_index = None
self.is_incremental = False
self.is_sendback = False
self.is_deleted = False
self.is_modified_input = False
self.is_modified_output = False
def to_header_space(self, prefix):
ip = prefix.split('/')[0]
match_length = 32 if len(prefix.split('/')) < 2 else int(prefix.split('/')[1] )
hs = ''.join([bin(int(x)+256)[3:] for x in ip.split('.')])
hs = hs[:match_length]
hs += 'x'*(32-len(hs))
return hs, ip, match_length
def is_match(self, last_rule):
return self.header_space[:self.header_space.index('x')] == last_rule.get_header_sapce()[:self.header_space.index('x')]
def serialize(self):
return cPickle.dumps(self)
def get_id(self):
return self.id
def get_switch_id(self):
return self.switch_id
def set_in_port(self, in_port):
self.in_port = in_port
def get_in_port(self):
return self.in_port
def set_out_port(self, out_port):
self.out_port = out_port
def get_out_port(self):
return self.out_port
def set_inst_actions(self, inst, actions):
self.inst_actions[inst] = actions
def set_table_id(self, table_id):
self.table_id = table_id
def get_table_id(self):
return self.table_id
def set_path_index(self, index):
self.path_index = index
def get_path_index(self):
return self.path_index
def set_all_pair_path_index(self, index):
self.all_pair_path_index = index;
def set_priority(self, priority):
self.priority = priority
def get_priority(self):
return self.priority
def set_prefix(self, prefix):
self.prefix = prefix
self.header_space, self.ip, self.match_length = self.to_header_space(prefix)
def get_prefix(self):
return self.prefix
def get_header_space(self):
return self.header_space
def get_all_pair_path_index(self):
return self.all_pair_path_index;
def __str__(self):
string = 'Rule ID: ' + str(self.id) + ', ' + "Switch ID: " + str(self.switch_id) + ', ' + \
'Priority: ' + str(self.priority) + ', ' + 'Prefix: ' + self.prefix + ', ' + 'HeaderSpace: ' + self.header_space + ', ' + \
'Inport: ' + str(self.in_port) + ', ' + 'Outport: ' + str(self.out_port) + ', ' + 'Inst_actions: ' + str(self.inst_actions)
return string
if __name__ == '__main__':
pass
| 27.514925
| 139
| 0.616762
|
01d7e6124fa080baf4b7e1bf8116ac821b6cbf4a
| 14,672
|
py
|
Python
|
test/e2e/v1beta1/argo_workflow.py
|
jardon/katib
|
9c88bbce8e0b3b43f0e791de6983d6d10a207a1b
|
[
"Apache-2.0"
] | null | null | null |
test/e2e/v1beta1/argo_workflow.py
|
jardon/katib
|
9c88bbce8e0b3b43f0e791de6983d6d10a207a1b
|
[
"Apache-2.0"
] | null | null | null |
test/e2e/v1beta1/argo_workflow.py
|
jardon/katib
|
9c88bbce8e0b3b43f0e791de6983d6d10a207a1b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script creates Argo Workflow for the e2e Katib tests.
from kubeflow.testing import argo_build_util
# Main worker image to execute Workflow.
IMAGE_WORKER = "public.ecr.aws/j1r0q0g6/kubeflow-testing:latest"
# Kaniko image to build Katib images.
IMAGE_KANIKO = "gcr.io/kaniko-project/executor:v1.0.0"
# Volume to store test data among the Workflow tasks.
VOLUME_TEST_DATA = "kubeflow-test-volume"
# Volume mount path to store test data among the Workflow tasks.
MOUNT_PATH = "/mnt/test-data-volume"
# Volume to store GitHub token to clone repos.
VOLUME_GITHUB_TOKEN = "github-token"
# Volume to store AWS secret for the Kaniko build.
VOLUME_AWS_SECRET = "aws-secret"
# Volume to store Docker config for Kaniko build.
VOLUME_DOCKER_CONFIG = "docker-config"
# Entrypoint for the Argo Workflow.
ENTRYPOINT = "e2e"
# The template that should always run when the Workflow is complete.
EXIT_HANDLER = "exit-handler"
# Dict with all Katib images.
# Key - image name, Value - dockerfile location.
KATIB_IMAGES = {
"katib-controller": "cmd/katib-controller/v1beta1/Dockerfile",
"katib-db-manager": "cmd/db-manager/v1beta1/Dockerfile",
# TODO (andreyvelich): Change it to /cmd/ui/v1beta1/Dockerfile once old UI is deprecated.
"katib-ui": "cmd/new-ui/v1beta1/Dockerfile",
"cert-generator": "cmd/cert-generator/v1beta1/Dockerfile",
"file-metrics-collector": "cmd/metricscollector/v1beta1/file-metricscollector/Dockerfile",
"tfevent-metrics-collector": "cmd/metricscollector/v1beta1/tfevent-metricscollector/Dockerfile",
"suggestion-hyperopt": "cmd/suggestion/hyperopt/v1beta1/Dockerfile",
"suggestion-chocolate": "cmd/suggestion/chocolate/v1beta1/Dockerfile",
"suggestion-skopt": "cmd/suggestion/skopt/v1beta1/Dockerfile",
"suggestion-hyperband": "cmd/suggestion/hyperband/v1beta1/Dockerfile",
"suggestion-goptuna": "cmd/suggestion/goptuna/v1beta1/Dockerfile",
"suggestion-optuna": "cmd/suggestion/optuna/v1beta1/Dockerfile",
"suggestion-enas": "cmd/suggestion/nas/enas/v1beta1/Dockerfile",
"suggestion-darts": "cmd/suggestion/nas/darts/v1beta1/Dockerfile",
"earlystopping-medianstop": "cmd/earlystopping/medianstop/v1beta1/Dockerfile",
"trial-mxnet-mnist": "examples/v1beta1/trial-images/mxnet-mnist/Dockerfile",
"trial-pytorch-mnist": "examples/v1beta1/trial-images/pytorch-mnist/Dockerfile",
"trial-tf-mnist-with-summaries": "examples/v1beta1/trial-images/tf-mnist-with-summaries/Dockerfile",
"trial-enas-cnn-cifar10-gpu": "examples/v1beta1/trial-images/enas-cnn-cifar10/Dockerfile.gpu",
"trial-enas-cnn-cifar10-cpu": "examples/v1beta1/trial-images/enas-cnn-cifar10/Dockerfile.cpu",
"trial-darts-cnn-cifar10": "examples/v1beta1/trial-images/darts-cnn-cifar10/Dockerfile",
}
# Dict with Katib Experiments to run during the test.
# Key - image name, Value - dockerfile location.
KATIB_EXPERIMENTS = {
"random": "examples/v1beta1/hp-tuning/random.yaml",
"grid": "examples/v1beta1/hp-tuning/grid.yaml",
"bayesianoptimization": "examples/v1beta1/hp-tuning/bayesian-optimization.yaml",
"tpe": "examples/v1beta1/hp-tuning/tpe.yaml",
"multivariate-tpe": "examples/v1beta1/hp-tuning/multivariate-tpe.yaml",
"cmaes": "examples/v1beta1/hp-tuning/cma-es.yaml",
"hyperband": "examples/v1beta1/hp-tuning/hyperband.yaml",
"enas": "examples/v1beta1/nas/enas-cpu.yaml",
"darts": "examples/v1beta1/nas/darts-cpu.yaml",
"pytorchjob": "examples/v1beta1/kubeflow-training-operator/pytorchjob-mnist.yaml",
"tfjob": "examples/v1beta1/kubeflow-training-operator/tfjob-mnist-with-summaries.yaml",
"file-metricscollector": "examples/v1beta1/metrics-collector/file-metrics-collector.yaml",
"never-resume": "examples/v1beta1/resume-experiment/never-resume.yaml",
"from-volume-resume": "examples/v1beta1/resume-experiment/from-volume-resume.yaml",
"median-stop": "examples/v1beta1/early-stopping/median-stop.yaml"
}
# How many Experiments are running in parallel.
PARALLEL_EXECUTION = 5
class WorkflowBuilder(object):
def __init__(self, workflow_name, workflow_namespace, test_dir, ecr_registry):
"""WorkflowBuilder constructor.
:param workflow_name: Argo Workflow name.
:param workflow_namespace: Argo Workflow namespace.
:param test_dir: Root directory to store all data for a particular test run.
:param ecr_registry: ECR registry to push the test images.
"""
self.workflow_name = workflow_name
self.workflow_namespace = workflow_namespace
self.test_dir = test_dir
self.katib_dir = test_dir + "/src/github.com/kubeflow/katib"
self.manifest_dir = test_dir + "/src/github.com/kubeflow/manifests"
self.ecr_registry = ecr_registry
def create_task_template(self, task_name, exec_image, command):
"""Creates template for all the Workflow tasks.
:param task_name: Template name for the task.
:param exec_image: Container image to execute the task.
:param command: List of container commands.
:return: Created task template.
"""
# Container environment variables.
# TODO (andreyvelich): Add PYTHONPATH ?
env = [
{
"name": "AWS_ACCESS_KEY_ID",
"valueFrom": {
"secretKeyRef": {
"name": "aws-credentials",
"key": "AWS_ACCESS_KEY_ID"
}
}
},
{
"name": "AWS_SECRET_ACCESS_KEY",
"valueFrom": {
"secretKeyRef": {
"name": "aws-credentials",
"key": "AWS_SECRET_ACCESS_KEY"
}
}
},
{
"name": "AWS_REGION",
"value": "us-west-2"
},
{
"name": "CLUSTER_NAME",
"value": self.workflow_name
},
{
"name": "EKS_CLUSTER_VERSION",
"value": "1.19"
},
{
"name": "ECR_REGISTRY",
"value": self.ecr_registry
},
{
"name": "GIT_TOKEN",
"valueFrom": {
"secretKeyRef": {
"name": "github-token",
"key": "github_token"
}
}
},
{
"name": "MANIFESTS_DIR",
"value": self.manifest_dir
},
{
"name": "EXTRA_REPOS",
"value": "kubeflow/testing@HEAD;kubeflow/manifests@v1.5-branch"
},
# Set GOPATH to test_dir because Katib repo is located under /src/github.com/kubeflow/katib
{
"name": "GOPATH",
"value": self.test_dir
}
]
# Container volume mounts.
volume_mounts = [
{
"name": VOLUME_TEST_DATA,
"mountPath": MOUNT_PATH
},
{
"name": VOLUME_GITHUB_TOKEN,
"mountPath": "/secret/github-token"
},
{
"name": VOLUME_AWS_SECRET,
"mountPath": "/root/.aws/"
},
{
"name": VOLUME_DOCKER_CONFIG,
"mountPath": "/kaniko/.docker/"
},
]
task_template = {
"name": task_name,
# Each container can be alive for 40 minutes.
"retryStrategy": {
"limit": "3",
"retryPolicy": "Always",
"backoff": {
"duration": "1",
"factor": "2",
"maxDuration": "1m",
},
},
"container": {
"command": command,
"image": exec_image,
"workingDir": self.katib_dir,
"env": env,
"volumeMounts": volume_mounts,
}
}
# Add prow env to the task template.
prow_env_dict = argo_build_util.get_prow_dict()
for k, v in prow_env_dict.items():
task_template["container"]["env"].append({"name": k, "value": v})
return task_template
def create_init_workflow(self):
"""Creates initial structure for the Argo Workflow.
:return: Initial Argo Workflow.
"""
# Volumes which are used in Argo Workflow.
volumes = [
{
"name": VOLUME_TEST_DATA,
"persistentVolumeClaim": {
"claimName": "nfs-external"
},
},
{
"name": VOLUME_GITHUB_TOKEN,
"secret": {
"secretName": VOLUME_GITHUB_TOKEN
},
},
{
"name": VOLUME_AWS_SECRET,
"secret": {
"secretName": VOLUME_AWS_SECRET
},
},
{
"name": VOLUME_DOCKER_CONFIG,
"configMap": {
"name": VOLUME_DOCKER_CONFIG
},
},
]
workflow = {
"apiVersion": "argoproj.io/v1alpha1",
"kind": "Workflow",
"metadata": {
"name": self.workflow_name,
"namespace": self.workflow_namespace,
},
"spec": {
"entrypoint": ENTRYPOINT,
"volumes": volumes,
"templates": [
{
"name": ENTRYPOINT,
"dag": {
"tasks": []
}
},
{
"name": EXIT_HANDLER,
"dag": {
"tasks": []
}
}
],
"onExit": EXIT_HANDLER
},
}
return workflow
def create_workflow(name, namespace, **kwargs):
"""Main function which returns Argo Workflow.
:param name: Argo Workflow name.
:param namespace: Argo Workflow namespace.
:param kwargs: Argo Workflow additional arguments.
:return: Created Argo Workflow.
"""
test_dir = MOUNT_PATH + "/" + name
ecr_registry = kwargs["registry"]
builder = WorkflowBuilder(name, namespace, test_dir, ecr_registry)
# Build initial structure for the Workflow.
workflow = builder.create_init_workflow()
# Delete AWS Cluster in the exit handler step.
delete_cluster = builder.create_task_template(
task_name="delete-cluster",
exec_image=IMAGE_WORKER,
command=[
"/usr/local/bin/delete-eks-cluster.sh",
]
)
argo_build_util.add_task_to_dag(workflow, EXIT_HANDLER, delete_cluster, [])
# Step 1. Checkout GitHub repositories.
checkout = builder.create_task_template(
task_name="checkout",
exec_image=IMAGE_WORKER,
command=[
"/usr/local/bin/checkout.sh",
test_dir + "/src/github.com"
]
)
argo_build_util.add_task_to_dag(workflow, ENTRYPOINT, checkout, [])
# Step 2.1 Build all Katib images.
depends = []
for image, dockerfile in KATIB_IMAGES.items():
build_image = builder.create_task_template(
task_name="build-"+image,
exec_image=IMAGE_KANIKO,
command=[
"/kaniko/executor",
"--dockerfile={}/{}".format(builder.katib_dir, dockerfile),
"--context=dir://" + builder.katib_dir,
"--destination={}/katib/v1beta1/{}:$(PULL_PULL_SHA)".format(ecr_registry, image)
]
)
argo_build_util.add_task_to_dag(workflow, ENTRYPOINT, build_image, [checkout["name"]])
depends.append(build_image["name"])
# Step 2.2 Create AWS cluster.
create_cluster = builder.create_task_template(
task_name="create-cluster",
exec_image=IMAGE_WORKER,
command=[
"/usr/local/bin/create-eks-cluster.sh",
]
)
argo_build_util.add_task_to_dag(workflow, ENTRYPOINT, create_cluster, [checkout["name"]])
depends.append(create_cluster["name"])
# Step 3. Setup Katib on AWS cluster.
setup_katib = builder.create_task_template(
task_name="setup-katib",
exec_image=IMAGE_WORKER,
command=[
"test/e2e/v1beta1/scripts/setup-katib.sh"
]
)
# Installing Katib after cluster is created and images are built.
argo_build_util.add_task_to_dag(workflow, ENTRYPOINT, setup_katib, depends)
# Step 4. Run Katib Experiments.
depends = [setup_katib["name"]]
tmp_depends = []
for index, (experiment, location) in enumerate(KATIB_EXPERIMENTS.items()):
run_experiment = builder.create_task_template(
task_name="run-e2e-experiment-"+experiment,
exec_image=IMAGE_WORKER,
command=[
"test/e2e/v1beta1/scripts/run-e2e-experiment.sh",
location
]
)
argo_build_util.add_task_to_dag(workflow, ENTRYPOINT, run_experiment, depends)
tmp_depends.append(run_experiment["name"])
# We run only X number of Experiments at the same time. index starts with 0
if (index+1) % PARALLEL_EXECUTION == 0:
depends, tmp_depends = tmp_depends, []
return workflow
| 37.814433
| 110
| 0.562432
|
18e8f6dbec7ca0229b29a754c039d4b6a722f128
| 988
|
py
|
Python
|
src/train/train_logistic_reg.py
|
CristianViorelPopa/transformers-dialect-identification
|
75edbc39c4b91967b99b63d97f7cd6644c48f8a7
|
[
"MIT"
] | 3
|
2021-11-04T09:26:19.000Z
|
2022-01-13T00:20:41.000Z
|
src/train/train_logistic_reg.py
|
CristianViorelPopa/transformers-dialect-identification
|
75edbc39c4b91967b99b63d97f7cd6644c48f8a7
|
[
"MIT"
] | null | null | null |
src/train/train_logistic_reg.py
|
CristianViorelPopa/transformers-dialect-identification
|
75edbc39c4b91967b99b63d97f7cd6644c48f8a7
|
[
"MIT"
] | null | null | null |
import argparse
import pickle
from sklearn.datasets import load_svmlight_file
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--data-file", required=True,
help="Location of the data to predict on.")
parser.add_argument("--output-file", required=True,
help="Location where the model will be stored.")
args = parser.parse_args()
X, y = load_svmlight_file(args.data_file)
parameters = {
'C': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'intercept_scaling': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'fit_intercept': (True, False)
}
model = LogisticRegression(penalty='l2', solver='liblinear', tol=1e-8, max_iter=500,
random_state=42)
clf = GridSearchCV(model, parameters, n_jobs=20, cv=5, refit=True, iid=False)
clf.fit(X, y)
pickle.dump(clf.best_estimator_, open(args.output_file, 'wb'))
if __name__ == '__main__':
main()
| 29.058824
| 85
| 0.708502
|
be78367fd6cd713d44ccf34a37009c995c3eb564
| 667
|
py
|
Python
|
2021/puzzle_07.py
|
ericvw/advent-of-code
|
dec645d778fe5532f1b7796220bb6c89a156911f
|
[
"MIT"
] | null | null | null |
2021/puzzle_07.py
|
ericvw/advent-of-code
|
dec645d778fe5532f1b7796220bb6c89a156911f
|
[
"MIT"
] | null | null | null |
2021/puzzle_07.py
|
ericvw/advent-of-code
|
dec645d778fe5532f1b7796220bb6c89a156911f
|
[
"MIT"
] | null | null | null |
import fileinput
from itertools import chain
import math
import statistics
# Parse
crab_positions = [
int(x)
for x in chain.from_iterable(line.rstrip().split(",") for line in fileinput.input())
]
# Main
# Part 1
median = int(statistics.median(crab_positions))
deltas_to_align = [abs(p - median) for p in crab_positions]
# Result 1
print(f"Fuel spent for part 1: {sum(deltas_to_align)}")
# Part 2
mean = statistics.mean(crab_positions)
means = (math.ceil(mean), math.floor(mean))
min_fuel_spent = min(
sum((d * (d + 1) // 2 for d in (abs(c - m) for c in crab_positions))) for m in means
)
# Result 2
print(f"Fuel spent for part 2: {min_fuel_spent}")
| 22.233333
| 88
| 0.701649
|
4289505d1d8212c09167280e2efbe257f8fbe7c0
| 1,269
|
py
|
Python
|
django_modules/home/forms.py
|
Mehdi6/djangoModules
|
b6e8fc578933675d0d087e87e1bdc99d12f440c1
|
[
"MIT"
] | null | null | null |
django_modules/home/forms.py
|
Mehdi6/djangoModules
|
b6e8fc578933675d0d087e87e1bdc99d12f440c1
|
[
"MIT"
] | null | null | null |
django_modules/home/forms.py
|
Mehdi6/djangoModules
|
b6e8fc578933675d0d087e87e1bdc99d12f440c1
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Contact
from django.core.mail import EmailMessage
from django.conf import settings
class ContactForm(forms.Form):
name = forms.CharField(required=True, min_length=3, max_length=50, label='Name', widget=forms.TextInput(attrs={'placeholder': 'Name'}))
email = forms.EmailField(required=True, label='Email address', widget=forms.TextInput(attrs={'placeholder': 'Email Address'}))
subject = forms.CharField(required=False, min_length=3, max_length=140, label='Subject', widget=forms.TextInput(attrs={'placeholder': 'Subject'}))
content = forms.CharField(required=True, max_length=2500, label='Content', widget=forms.Textarea)
def send_email(self):
email = EmailMessage(self.cleaned_data['name'] +' - SUBJECT:'+self.cleaned_data['subject'],\
'EMAIL: '+self.cleaned_data['email'] +'\nCONTENT: '+self.cleaned_data['content'],\
to=settings.ADMINS_EMAILS)
email.send()
pass
def save(self):
contact = Contact(name=self.cleaned_data['name'], email=self.cleaned_data['email'],\
subject=self.cleaned_data['subject'], content=self.cleaned_data['content'])
contact.save()
| 52.875
| 150
| 0.666667
|
e18e025d49122b2c18067565af8a4e0ef569e2e6
| 972
|
py
|
Python
|
src/cdumay_rest_client/client.py
|
cdumay/cdumay-rest-client
|
ba0c997c418a88a096c030a2892595904a49b817
|
[
"Apache-2.0"
] | null | null | null |
src/cdumay_rest_client/client.py
|
cdumay/cdumay-rest-client
|
ba0c997c418a88a096c030a2892595904a49b817
|
[
"Apache-2.0"
] | null | null | null |
src/cdumay_rest_client/client.py
|
cdumay/cdumay-rest-client
|
ba0c997c418a88a096c030a2892595904a49b817
|
[
"Apache-2.0"
] | 1
|
2018-01-12T15:27:27.000Z
|
2018-01-12T15:27:27.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Cédric Dumay <cedric.dumay@gmail.com>
"""
import json
import logging
from cdumay_http_client.client import HttpClient
logger = logging.getLogger(__name__)
class RESTClient(HttpClient):
"""RestClient"""
def __init__(self, server, timeout=None, headers=None, username=None,
password=None, ssl_verify=True, retry_number=None,
retry_delay=None):
_headers = headers or dict()
_headers.update({
'Content-Type': 'application/json',
'Accept': 'application/json',
})
HttpClient.__init__(
self, server, timeout, _headers, username, password, ssl_verify,
retry_number, retry_delay
)
def _format_data(self, data):
return json.dumps(data) if data else None
# noinspection PyMethodMayBeStatic
def _parse_response(self, response):
return response.json()
| 25.578947
| 76
| 0.635802
|
98492bd36d6ec868d291aa5e0150c43cadf6c115
| 631
|
py
|
Python
|
frappe_renovation_docsync/__init__.py
|
mainul94/frappe_renovation_docsync
|
5d49655b373cdd891e594feed6825952e0b7e089
|
[
"MIT"
] | null | null | null |
frappe_renovation_docsync/__init__.py
|
mainul94/frappe_renovation_docsync
|
5d49655b373cdd891e594feed6825952e0b7e089
|
[
"MIT"
] | null | null | null |
frappe_renovation_docsync/__init__.py
|
mainul94/frappe_renovation_docsync
|
5d49655b373cdd891e594feed6825952e0b7e089
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
__version__ = '0.0.1'
# Patches
import frappe.event_streaming.doctype.event_producer.event_producer
from frappe.event_streaming.doctype.event_producer.event_producer import EventProducer
from frappe_renovation_docsync.utils.event_producer import get_request_data as _get_request_data, \
update_event_consumer as _update_event_consumer, get_updates as _get_updates
EventProducer.get_request_data = _get_request_data
EventProducer.update_event_consumer = _update_event_consumer
frappe.event_streaming.doctype.event_producer.event_producer.get_updates = _get_updates
| 48.538462
| 99
| 0.868463
|
a14a57f02d6fb6ee2048149ab284a23c420fcf48
| 1,381
|
py
|
Python
|
api/views.py
|
darlannakamura/secompp-2018
|
1dd7edef642f56769a1c606ca3de566429f0d39c
|
[
"MIT",
"Unlicense"
] | null | null | null |
api/views.py
|
darlannakamura/secompp-2018
|
1dd7edef642f56769a1c606ca3de566429f0d39c
|
[
"MIT",
"Unlicense"
] | null | null | null |
api/views.py
|
darlannakamura/secompp-2018
|
1dd7edef642f56769a1c606ca3de566429f0d39c
|
[
"MIT",
"Unlicense"
] | null | null | null |
from rest_framework import generics
from rest_framework import mixins
from blog.models import Post
from rest_framework import permissions
from api.serializers import PostSerializer
from api.permissions import IsAuthorOrReadOnly
from rest_framework.permissions import IsAuthenticatedOrReadOnly
class PostList(generics.ListAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = (IsAuthenticatedOrReadOnly,)
def post(self, request, format=None):
serializer = PostSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class PostDetail(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
generics.GenericAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = (IsAuthorOrReadOnly,)
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
| 36.342105
| 78
| 0.714699
|
1532dcd4b5c644e5378fe5ae0193b0cac8aecdb3
| 2,413
|
py
|
Python
|
lib/medloaders/covid_ct_dataset.py
|
McMasterAI/RadiologyandAI-MedicalZooPytorch
|
606a1654f08b8bae7c265608694d55fecc1001ed
|
[
"MIT"
] | 995
|
2019-07-23T11:34:22.000Z
|
2022-03-30T21:10:52.000Z
|
lib/medloaders/covid_ct_dataset.py
|
pyushkevich/MedicalZooPytorch
|
c6831d8ddebfbc1b33c04f8cec0d01c2ceb828f6
|
[
"MIT"
] | 18
|
2020-04-27T03:38:22.000Z
|
2022-01-18T20:55:20.000Z
|
lib/medloaders/covid_ct_dataset.py
|
pyushkevich/MedicalZooPytorch
|
c6831d8ddebfbc1b33c04f8cec0d01c2ceb828f6
|
[
"MIT"
] | 209
|
2019-08-21T13:41:13.000Z
|
2022-03-30T08:01:52.000Z
|
import os
import torch
from torch.utils.data import Dataset
from lib.utils.covid_utils import read_txt
from PIL import Image
import torchvision.transforms as transforms
class CovidCTDataset(Dataset):
def __init__(self,mode, root_dir, txt_COVID, txt_NonCOVID, transform=None):
"""
Args:
txt_path (string): Path to the txt file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
File structure:
- root_dir
- CT_COVID
- img1.png
- img2.png
- ......
- CT_NonCOVID
- img1.png
- img2.png
- ......
"""
self.root_dir = root_dir
self.txt_path = [txt_COVID, txt_NonCOVID]
self.classes = ['CT_COVID', 'CT_NonCOVID']
self.num_cls = len(self.classes)
self.img_list = []
self.full_volume = None
self.affine = None
for c in range(self.num_cls):
cls_list = [[os.path.join(self.root_dir, self.classes[c], item), c] for item in read_txt(self.txt_path[c])]
self.img_list += cls_list
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_transformer = transforms.Compose([
transforms.Resize(256),
transforms.RandomResizedCrop((224), scale=(0.5, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
val_transformer = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
if(mode == 'train'):
self.transform = train_transformer
else:
self.transform = val_transformer
print('samples = ', len(self.img_list))
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_path = self.img_list[idx][0]
image = Image.open(img_path).convert('RGB')
if self.transform:
image = self.transform(image)
return image, torch.tensor(int(self.img_list[idx][1]), dtype=torch.long)
| 30.1625
| 119
| 0.5661
|
9cba9eae7ae64b8752071a584a4e0870153f0f7f
| 5,555
|
py
|
Python
|
ciphey/languageCheckerMod/dictionaryChecker.py
|
decalage2/Ciphey
|
ebe22af0a2ab5c21aaaa3913f8ff20e10149ca9e
|
[
"MIT"
] | 1
|
2020-10-28T18:37:23.000Z
|
2020-10-28T18:37:23.000Z
|
ciphey/languageCheckerMod/dictionaryChecker.py
|
decalage2/Ciphey
|
ebe22af0a2ab5c21aaaa3913f8ff20e10149ca9e
|
[
"MIT"
] | null | null | null |
ciphey/languageCheckerMod/dictionaryChecker.py
|
decalage2/Ciphey
|
ebe22af0a2ab5c21aaaa3913f8ff20e10149ca9e
|
[
"MIT"
] | 1
|
2021-09-18T13:21:00.000Z
|
2021-09-18T13:21:00.000Z
|
import string
import os
import sys
from loguru import logger
import cipheydists
sys.path.append("..")
try:
import ciphey.mathsHelper as mh
except ModuleNotFoundError:
import mathsHelper as mh
class dictionaryChecker:
"""
Class designed to confirm whether something is **language** based on how many words of **language** appears
Call confirmlanguage(text, language)
* text: the text you want to confirm
* language: the language you want to confirm
Find out what language it is by using chisquared.py, the highest chisquared score is the language
languageThreshold = 45
if a string is 45% **language** words, then it's confirmed to be english
"""
def __init__(self):
self.mh = mh.mathsHelper()
self.languagePercentage: float = 0.0
self.languageWordsCounter: float = 0.0
self.languageThreshold = 55
# this is hard coded because i dont want to use a library or rely on reading from files, as it's slow.
# dictionary because lookup is O(1)
self.top1000Words = dict.fromkeys(cipheydists.get_list("english1000"))
def cleanText(self, text: str) -> list:
"""Cleans the texy ready to be checked
Strips punucation, makes it lower case, turns it into a list seperated by spaces, remvoes duplicate words
Args:
text -> The text we use to perform analysis on
Returns:
text -> the text as a list, now cleaned
"""
# makes the text unique words and readable
text = text.lower()
text = self.mh.strip_puncuation(text)
text = text.split(" ")
text = list(set(text))
return text
def check1000Words(self, text: str) -> bool:
"""Checks to see if word is in the list of 1000 words
the 1000words is a dict, so lookup is O(1)
Args:
text -> The text we use to text (a word)
Returns:
bool -> whether it's in the dict or not.
"""
if text is None:
return False
check = dict.fromkeys(self.top1000Words)
logger.debug(f"text before cleaning is {text}")
text: str = self.cleanText(text)
logger.debug(f"Check 1000 words text is {text}")
# If any of the top 1000 words in the text appear
# return true
for word in text:
logger.debug(f"Word in check1000 is {word}")
# I was debating using any() here, but I think they're the
# same speed so it doesn't really matter too much
if word in check:
logger.debug(f"Check 1000 words returns True for word {word}")
return True
return False
def checkDictionary(self, text: str, language: str) -> int:
"""Sorts & searches the dict
ompares a word with
The dictionary is sorted and the text is sorted
for every single word in main dictionary
if that word == text[0] then +1 to counter
then +1 to text[0 + i]
so say the dict is ordered
we just loop through dict
and eventually we'll reach a point where word in dict = word in text
at that point, we move to the next text point
both text and dict are sorted
so we only loop once, we can do this in O(n log n) time
Args:
text -> The text we use to perform analysis on
language -> the language we want to check
Returns:
counter -> how many words in text, are in the dict of language
"""
# reads through most common words / passwords
# and calculates how much of that is in language
text: str = self.cleanText(text)
text.sort()
f = cipheydists.get_list(language)
# so this should loop until it gets to the point in the @staticmethod
# that equals the word :)
counter: int = 0
counter_percent: int = 0
for dictLengthCounter, word in enumerate(f):
# if there is more words counted than there is text
# it is 100%, sometimes it goes over
# so this stops that
if counter >= len(text):
break
# if the dictionary word is contained in the text somewhere
# counter + 1
if word in text:
counter = counter + 1
counter_percent = counter_percent + 1
self.languageWordsCounter = counter
self.languagePercentage = self.mh.percentage(
float(self.languageWordsCounter), float(len(text))
)
return counter
def confirmlanguage(self, text: str, language: str) -> True:
"""Confirms whether given text is language
If the languagePercentage (taken from checkDictionary) is higher than the language threshold, reutrn True
Args:
text -> The text we use to text (a word)
language -> the language we use to check
Returns:
bool -> whether it's written in Language or not
"""
self.checkDictionary(text, language)
if self.languagePercentage >= self.languageThreshold:
logger.debug(
f"The language percentange {self.languagePercentage} is over the threshold {self.languageThreshold}"
)
return True
else:
return False
| 35.382166
| 117
| 0.590999
|
70dc138daeeff711e87ee3bc28a66d178d6c3f01
| 4,325
|
py
|
Python
|
app/src/Poem_On_a_topic.py
|
dgw0015/AI-Backend
|
0b377504585bf807171569cece382e5a140b2142
|
[
"Apache-2.0"
] | null | null | null |
app/src/Poem_On_a_topic.py
|
dgw0015/AI-Backend
|
0b377504585bf807171569cece382e5a140b2142
|
[
"Apache-2.0"
] | null | null | null |
app/src/Poem_On_a_topic.py
|
dgw0015/AI-Backend
|
0b377504585bf807171569cece382e5a140b2142
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Load data
import io
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from IPython import get_ipython
sns.set(context='talk', style='ticks')
get_ipython().run_line_magic('matplotlib', 'inline')
# In[50]:
# Read text file into dataframe and split songs into rows
df = pd.DataFrame({'lyrics': io.open('poemdata.txt', 'r', encoding='ascii', errors='ignore').read().split('\n\n')})
# In[51]:
# Derive text related metrics (number of characters, words, lines, unique words) and lexical density for each poem
# characters, words, lines
df['#characters'] = df.lyrics.str.len()
df['#words'] = df.lyrics.str.split().str.len()
df['#lines'] = df.lyrics.str.split('\n').str.len()
df['#uniq_words'] = df.lyrics.apply(lambda x: len(set(x.split())))
df['lexical_density'] = df['#uniq_words'] / df['#words']
# In[52]:
# Now that we have text metrics, a quick histogram spread on all metrics.
df.hist(sharey=True, layout=(2, 3), figsize=(15, 8))
# In[53]:
# Word length distribution
pd.Series(len(x) for x in ' '.join(df.lyrics).split()).value_counts().sort_index().plot(kind='bar', figsize=(12, 3))
# In[54]:
# top words
pd.Series(' '.join(df.lyrics).lower().split()).value_counts()[:20][::-1].plot(kind='barh')
# In[55]:
# top long words
pd.Series([w for w in ' '.join(df.lyrics).lower().split() if len(w) > 7]).value_counts()[:20][::-1].plot(kind='barh')
# In[56]:
from nltk import ngrams
# In[57]:
def get_ngrams_from_series(series, n=2):
# using nltk.ngrams
lines = ' '.join(series).lower().split('\n')
lgrams = [ngrams(l.split(), n) for l in lines]
grams = [[' '.join(g) for g in list(lg)] for lg in lgrams]
return [item for sublist in grams for item in sublist]
# In[58]:
# Top bi-grams
pd.Series(get_ngrams_from_series(df.lyrics, 2)).value_counts()[:20][::-1].plot(kind='barh')
# In[59]:
# Top tri-grams
pd.Series(get_ngrams_from_series(df.lyrics, 3)).value_counts()[:20][::-1].plot(kind='barh')
# In[60]:
# Top four-grams
pd.Series(get_ngrams_from_series(df.lyrics, 4)).value_counts()[:20][::-1].plot(kind='barh')
# In[67]:
# sentiment
import nltk
from nltk import sentiment
nltk.download('vader_lexicon')
# In[68]:
senti_analyze = sentiment.vader.SentimentIntensityAnalyzer()
senti_analyze.polarity_scores(df.lyrics[0])
# In[69]:
df['sentiment_score'] = pd.DataFrame(df.lyrics.apply(senti_analyze.polarity_scores).tolist())['compound']
df['sentiment'] = pd.cut(df['sentiment_score'], [-np.inf, -0.35, 0.35, np.inf],
labels=['negative', 'neutral', 'positive'])
# In[70]:
# generating via markov chain
# Machine generated lyrics using Markov
import re
import random
from collections import defaultdict
class GeneratePoetry:
def __init__(self, corpus='', order=2, length=8):
self.order = order
self.length = length
self.words = re.findall("[a-z']+", corpus.lower())
self.states = defaultdict(list)
for i in range(len(self.words) - self.order):
self.states[tuple(self.words[i:i + self.order])].append(self.words[i + order])
def gen_sentence(self, length=8, startswith=None):
terms = None
if startswith:
start_seed = [x for x in self.states.keys() if startswith in x]
if start_seed:
terms = list(start_seed[0])
if terms is None:
start_seed = random.randint(0, len(self.words) - self.order)
terms = self.words[start_seed:start_seed + self.order]
for _ in range(length):
terms.append(random.choice(self.states[tuple(terms[-self.order:])]))
return ' '.join(terms)
def gen_song(self, lines=10, length=8, length_range=None, startswith=None):
song = []
if startswith:
song.append(self.gen_sentence(length=length, startswith=startswith))
lines -= 1
for _ in range(lines):
sent_len = random.randint(*length_range) if length_range else length
song.append(self.gen_sentence(length=sent_len))
return '\n'.join(song)
# In[71]:
poetrygeneration = GeneratePoetry(corpus=' '.join(df.lyrics))
# In[73]:
poetrygeneration.gen_song(lines=10, length_range=[5, 10], startswith='bird')
# In[ ]:
| 25
| 117
| 0.648786
|
ab0ffe46af397d359ef1163e7015786500dc42d3
| 2,357
|
py
|
Python
|
SpiderForCollege/spiders/city.py
|
Rafael-Luo/spider-college
|
20869fbd916db199273bbf84310593c5a593d2cb
|
[
"MIT"
] | 2
|
2021-03-17T16:08:19.000Z
|
2021-05-23T11:12:02.000Z
|
SpiderForCollege/spiders/city.py
|
Rafael-Luo/spider-college
|
20869fbd916db199273bbf84310593c5a593d2cb
|
[
"MIT"
] | null | null | null |
SpiderForCollege/spiders/city.py
|
Rafael-Luo/spider-college
|
20869fbd916db199273bbf84310593c5a593d2cb
|
[
"MIT"
] | 2
|
2020-05-18T18:32:33.000Z
|
2021-06-26T07:37:14.000Z
|
# 获取省份及链接
import re
from multiprocessing.dummy import Pool
import requests
from bs4 import BeautifulSoup
from SpiderForCollege.db.DBUtil import DBUtil
pro_link = []
def get_provice(url):
web_data = requests.get(url, headers=header)
soup = BeautifulSoup(web_data.content, 'lxml')
provice_link = soup.select('.area_box > a')
for link in provice_link:
href = link['href']
provice = link.select('span')[0].text
data = {
'href': href,
'provice': provice
}
#provice_href.insert_one(data)#存入数据库
pro_link.append(href)
# 获取分数线
def get_score(url):
web_data = requests.get(url, headers=header)
soup = BeautifulSoup(web_data.content, 'lxml')
# 获取省份信息
provice = soup.select('.col-nav span')[0].text[0:-5]
# 获取文理科
categories = soup.select('h3.ft14')
category_list = []
for item in categories:
category_list.append(item.text.strip().replace(' ', ''))#替换空格
# 获取分数
tables = soup.select('h3 ~ table')
for index, table in enumerate(tables):
tr = table.find_all('tr', attrs={'class': re.compile('^c_\S*')})#使用正则匹配
for j in tr:
td = j.select('td')
score_list = []
for k in td:
# 获取每年的分数
if 'class' not in k.attrs:
score = k.text.strip()
score_list.append(score)
# 获取分数线类别
elif 'class' in k.attrs:
score_line = k.text.strip()
score_data = (
provice.strip(),#省份
category_list[index],#文理科分类
score_line,#分数线类别
str(score_list)#分数列表
)
sql = """INSERT INTO score(city, category,score_line, score_list)
VALUES (%s,%s,%s,%s)"""
db = DBUtil()
db.insert(sql, score_data)
#score_detail.insert_one(score_data)#插入数据库
if __name__ == '__main__':
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Connection': 'keep - alive'
}
url = 'http://www.gaokao.com/guangdong/fsx/'
get_provice(url)
pool = Pool()
pool.map(get_score, [i for i in pro_link]) # 使用多线程
| 31.851351
| 103
| 0.540518
|
6d4d41bca05d724ee8cca6308cd1327e3fb11464
| 106,236
|
py
|
Python
|
tests/exchange/test_IbetExchange.py
|
BoostryJP/ibet-SmartContract
|
dc3f73a708ef145e7200ce58fce4e8171e21d3c2
|
[
"Apache-2.0"
] | 10
|
2021-06-12T08:43:50.000Z
|
2022-02-17T14:24:48.000Z
|
tests/exchange/test_IbetExchange.py
|
BoostryJP/ibet-SmartContract
|
dc3f73a708ef145e7200ce58fce4e8171e21d3c2
|
[
"Apache-2.0"
] | 44
|
2021-04-11T06:43:10.000Z
|
2022-03-30T12:42:32.000Z
|
tests/exchange/test_IbetExchange.py
|
BoostryJP/ibet-SmartContract
|
dc3f73a708ef145e7200ce58fce4e8171e21d3c2
|
[
"Apache-2.0"
] | 1
|
2022-03-09T07:27:57.000Z
|
2022-03-09T07:27:57.000Z
|
"""
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
import brownie
from eth_utils import to_checksum_address
from brownie import IbetStandardToken
def init_args(tradable_exchange):
name = 'test_token'
symbol = 'MEM'
initial_supply = 2 ** 256 - 1
tradable_exchange = tradable_exchange
contact_information = 'some_contact_information'
privacy_policy = 'some_privacy_policy'
deploy_args = [
name,
symbol,
initial_supply,
tradable_exchange,
contact_information,
privacy_policy
]
return deploy_args
def deploy(users, deploy_args):
token = users['issuer'].deploy(
IbetStandardToken,
*deploy_args
)
return token
def deploy_share(users, deploy_args):
from brownie import IbetShare
token = users["issuer"].deploy(
IbetShare,
*deploy_args
)
return token
# TEST_deploy
class TestDeploy:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, exchange,
exchange_storage, payment_gateway):
# assertion
owner = exchange.owner()
payment_gateway_address = exchange.paymentGatewayAddress()
storage_address = exchange.storageAddress()
assert owner == users['admin']
assert payment_gateway_address == to_checksum_address(payment_gateway.address)
assert storage_address == to_checksum_address(exchange_storage.address)
# TEST_tokenFallback
class TestTokenFallback:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, exchange):
_issuer = users['issuer']
_value = 100
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# transfer to exchange contract
token.transfer.transact(
exchange.address,
_value,
{'from': _issuer}
)
# assertion
balance_token = token.balanceOf(_issuer)
balance_exchange = exchange.balanceOf(_issuer, token.address)
assert balance_token == deploy_args[2] - _value
assert balance_exchange == _value
# Normal_2
# Multiple deposit
def test_normal_2(self, users, exchange):
_issuer = users['issuer']
_value = 100
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# transfer to exchange contract (1)
token.transfer.transact(
exchange.address,
_value,
{'from': _issuer}
)
# transfer to exchange contract (2)
token.transfer.transact(
exchange.address,
_value,
{'from': _issuer}
)
# assertion
balance_token = token.balanceOf(_issuer)
balance_exchange = exchange.balanceOf(_issuer, token.address)
assert balance_token == deploy_args[2] - _value * 2
assert balance_exchange == _value * 2
# TEST_withdraw
class TestWithdraw:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users, exchange):
_issuer = users['issuer']
_value = 2 ** 256 - 1
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# transfer to exchange contract
token.transfer.transact(exchange.address, _value, {'from': _issuer})
# withdraw
tx = exchange.withdraw.transact(token.address, {'from': _issuer})
# assertion
balance_token = token.balanceOf(_issuer)
balance_exchange = exchange.balanceOf(_issuer, token.address)
assert balance_token == deploy_args[2]
assert balance_exchange == 0
assert tx.events["Withdrawn"]["token"] == token.address
assert tx.events["Withdrawn"]["account"] == _issuer
#######################################
# Error
#######################################
# Error_1
# The balance must be greater than zero.
def test_error_1(self, users, exchange):
_issuer = users['issuer']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# withdraw
with brownie.reverts(revert_msg="The balance must be greater than zero."):
exchange.withdraw.transact(
token.address,
{'from': _issuer}
)
# assertion
balance_token = token.balanceOf(_issuer)
balance_exchange = exchange.balanceOf(_issuer, token.address)
assert balance_token == deploy_args[2]
assert balance_exchange == 0
# Error_2
# Must be transferable.
def test_error_2(self, users, exchange):
_issuer = users['issuer']
_value = 2 ** 256 - 1
# issue token
deploy_args = [
'test_share',
'test_symbol',
2 ** 256 - 1,
2 ** 256 - 1,
2 ** 256 - 1,
'20200829',
'20200831',
'20191231',
2 ** 256 - 1
]
token = deploy_share(users, deploy_args)
# set to transferable
token.setTransferable(
True,
{'from': _issuer}
)
# set to tradable contract
token.setTradableExchange(
exchange.address,
{'from': _issuer}
)
# transfer to exchange contract
token.transfer(
exchange.address,
_value,
{'from': _issuer}
)
# set to not transferable
token.setTransferable(
False,
{'from': _issuer}
)
# withdraw
with brownie.reverts(revert_msg="Must be transferable."):
exchange.withdraw(
token.address,
{'from': _issuer}
)
# assertion
assert token.balanceOf(_issuer) == 0
assert token.balanceOf(exchange.address) == deploy_args[3]
assert exchange.balanceOf(_issuer, token.address) == deploy_args[3]
# TEST_createOrder
class TestCreateOrder:
#######################################
# Normal
#######################################
# Normal_1
# Make order: BUY
def test_normal_1(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make order: BUY
_amount = 2 ** 256 - 1
_price = 123
_isBuy = True
tx = exchange.createOrder.transact(
token.address,
_amount,
_price,
_isBuy,
agent,
{'from': trader}
)
# assertion
order_id = exchange.latestOrderId()
assert exchange.getOrder(order_id) == [
trader.address,
token.address,
_amount,
_price,
_isBuy,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
assert tx.events["NewOrder"]["tokenAddress"] == token.address
assert tx.events["NewOrder"]["orderId"] == order_id
assert tx.events["NewOrder"]["accountAddress"] == trader
assert tx.events["NewOrder"]["isBuy"] is True
assert tx.events["NewOrder"]["price"] == _price
assert tx.events["NewOrder"]["amount"] == _amount
assert tx.events["NewOrder"]["agentAddress"] == agent
# Normal_2
# Make order: SELL
def test_normal_2(self, users, exchange):
issuer = users['issuer']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# transfer to contract -> make order: SELL
_amount = 2 ** 256 - 1
_price = 123
_isBuy = False
token.transfer.transact(
exchange.address,
_amount,
{'from': issuer}
)
tx = exchange.createOrder.transact(
token.address,
_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# assertion
order_id = exchange.latestOrderId()
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_amount,
_price,
_isBuy,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _amount
assert exchange.commitmentOf(issuer, token.address) == _amount
assert tx.events["NewOrder"]["tokenAddress"] == token.address
assert tx.events["NewOrder"]["orderId"] == order_id
assert tx.events["NewOrder"]["accountAddress"] == issuer
assert tx.events["NewOrder"]["isBuy"] is False
assert tx.events["NewOrder"]["price"] == _price
assert tx.events["NewOrder"]["amount"] == _amount
assert tx.events["NewOrder"]["agentAddress"] == agent
#######################################
# Error
#######################################
# Error_1_1
# Make order: BUY
# Amount must be greater than zero
def test_error_1_1(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make order: BUY
_amount = 0
_price = 123
_isBuy = True
order_id_before = exchange.latestOrderId()
with brownie.reverts():
exchange.createOrder.transact(
token.address,
_amount,
_price,
_isBuy,
agent,
{'from': trader}
)
# assertion
order_id_after = exchange.latestOrderId()
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
assert order_id_before == order_id_after
# Error_1_2
# Make order: BUY
# Status must be True
def test_error_1_2(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# change token status
token.setStatus.transact(False, {'from': issuer})
# make order: BUY
_amount = 100
_price = 123
_isBuy = True
order_id_before = exchange.latestOrderId()
with brownie.reverts():
exchange.createOrder.transact(
token.address,
_amount,
_price,
_isBuy,
agent,
{'from': trader}
)
# assertion
order_id_after = exchange.latestOrderId()
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
assert order_id_before == order_id_after
# Error_1_3
# Make order: BUY
# Agent must be valid
def test_error_1_3(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make order: BUY
_amount = 0
_price = 123
_isBuy = True
order_id_before = exchange.latestOrderId()
with brownie.reverts():
exchange.createOrder.transact(
token.address,
_amount,
_price,
_isBuy,
users['user1'], # invalid
{'from': trader}
)
# assertion
order_id_after = exchange.latestOrderId()
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
assert order_id_before == order_id_after
# Error_2_1
# Make order: SELL
# Amount must be greater than zero
def test_error_2_1(self, users, exchange):
issuer = users['issuer']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# transfer to contract -> make order: SELL
_amount = 2 ** 256 - 1
_price = 123
_isBuy = False
token.transfer.transact(
exchange.address,
_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
0, # zero
_price,
_isBuy,
agent,
{'from': issuer}
)
# assertion
assert exchange.commitmentOf(issuer, token.address) == 0
assert token.balanceOf(issuer) == deploy_args[2]
# Error_2_2
# Make order: SELL
# Insufficient balance
def test_error_2_2(self, users, exchange):
issuer = users['issuer']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# transfer to contract -> make order: SELL
_amount = 100
_price = 123
_isBuy = False
token.transfer.transact(
exchange.address,
_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
101, # greater than deposit amount
_price,
_isBuy,
agent,
{'from': issuer}
)
# assertion
assert exchange.commitmentOf(issuer, token.address) == 0
assert token.balanceOf(issuer) == deploy_args[2]
# Error_2_3
# Make order: SELL
# Status must be True
def test_error_2_3(self, users, exchange):
issuer = users['issuer']
agent = users['agent']
# issuer token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# change token status
token.setStatus.transact(False, {'from': issuer})
# transfer to contract -> make order: SELL
_amount = 100
_price = 123
_isBuy = False
token.transfer.transact(
exchange.address,
_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# assertion
assert exchange.commitmentOf(issuer, token.address) == 0
assert token.balanceOf(issuer) == deploy_args[2]
# Error_2_4
# Make order: SELL
# Agent must be valid
def test_error_2_4(self, users, exchange):
issuer = users['issuer']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# transfer to contract -> make order: SELL
_amount = 100
_price = 123
_isBuy = False
token.transfer.transact(
exchange.address,
_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_amount,
_price,
_isBuy,
users['user1'], # invalid agent
{'from': issuer}
)
# assertion
assert exchange.commitmentOf(issuer, token.address) == 0
assert token.balanceOf(issuer) == deploy_args[2]
# Error_2_5
# Make order: SELL
# REVERT: Must be transferable.
def test_error_2_5(self, users, exchange):
issuer = users['issuer']
# issue token
deploy_args = [
'test_share',
'test_symbol',
2 ** 256 - 1,
2 ** 256 - 1,
2 ** 256 - 1,
'20200829',
'20200831',
'20191231',
2 ** 256 - 1
]
token = deploy_share(users, deploy_args)
token.setTransferable(
True,
{'from': issuer}
)
token.setTradableExchange(
exchange.address,
{'from': issuer}
)
# transfer to exchange contract
_amount = 2 ** 256 - 1
token.transfer(
exchange.address,
_amount,
{'from': issuer}
)
# set to not transferable
token.setTransferable(
False,
{'from': issuer}
)
# make sell order
_price = 123
_isBuy = False
with brownie.reverts(revert_msg="Must be transferable."):
exchange.createOrder(
token.address,
_amount,
_price,
_isBuy,
users['user1'], # invalid agent
{'from': issuer}
)
# assertion
assert token.balanceOf(issuer) == deploy_args[3] - _amount
assert token.balanceOf(exchange.address) == _amount
assert exchange.balanceOf(issuer, token.address) == _amount
assert exchange.commitmentOf(issuer, token.address) == 0
# TEST_cancelOrder
class TestCancelOrder:
#######################################
# Normal
#######################################
# Normal_1
# Cancel order: BUY
def test_normal_1(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make order: BUY
_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = True
exchange.createOrder.transact(
token.address,
_amount,
_price,
_isBuy,
agent,
{'from': trader}
)
# cancel order
order_id = exchange.latestOrderId()
tx = exchange.cancelOrder.transact(
order_id,
{'from': trader}
)
# assertion
assert exchange.getOrder(order_id) == [
trader.address,
token.address,
_amount,
_price,
_isBuy,
agent.address,
True
]
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
assert tx.events["CancelOrder"]["tokenAddress"] == token.address
assert tx.events["CancelOrder"]["orderId"] == order_id
assert tx.events["CancelOrder"]["accountAddress"] == trader
assert tx.events["CancelOrder"]["isBuy"] is True
assert tx.events["CancelOrder"]["price"] == _price
assert tx.events["CancelOrder"]["amount"] == _amount
assert tx.events["CancelOrder"]["agentAddress"] == agent
# Normal_2
# Cancel order: SELL
def test_normal_2(self, users, exchange):
issuer = users['issuer']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# transfer to contract -> make order: SELL
_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# cancel order
order_id = exchange.latestOrderId()
tx = exchange.cancelOrder.transact(
order_id,
{'from': issuer}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_amount,
_price,
_isBuy,
agent.address,
True
]
assert token.balanceOf(issuer) == deploy_args[2]
assert exchange.commitmentOf(issuer, token.address) == 0
assert tx.events["CancelOrder"]["tokenAddress"] == token.address
assert tx.events["CancelOrder"]["orderId"] == order_id
assert tx.events["CancelOrder"]["accountAddress"] == issuer
assert tx.events["CancelOrder"]["isBuy"] is False
assert tx.events["CancelOrder"]["price"] == _price
assert tx.events["CancelOrder"]["amount"] == _amount
assert tx.events["CancelOrder"]["agentAddress"] == agent
#######################################
# Error
#######################################
# Error_1
# REVERT: The orderId must be less than or equal to the latest order ID.
def test_error_1(self, users, exchange):
issuer = users['issuer']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# transfer to contract -> make order: SELL
_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# cancel order
latest_order_id = exchange.latestOrderId()
with brownie.reverts(revert_msg="The orderId must be less than or equal to the latest order ID."):
exchange.cancelOrder.transact(
latest_order_id + 1,
{'from': issuer}
)
# assertion
assert exchange.getOrder(latest_order_id) == [
issuer.address,
token.address,
_amount,
_price,
_isBuy,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _amount
assert exchange.commitmentOf(issuer, token.address) == _amount
# Error_2
# REVERT: The remaining amount of the original order must be greater than zero.
def test_error_2(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make BUY order by trader
_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
exchange.createOrder.transact(
token.address,
_amount,
_price,
True,
agent,
{'from': trader}
)
# take SELL order by issuer
order_id = exchange.latestOrderId()
token.transfer.transact(
exchange.address,
_amount,
{'from': issuer}
)
exchange.executeOrder.transact(
order_id,
_amount,
False,
{'from': issuer}
)
# confirm agreement by agent
agreement_id = exchange.latestAgreementId(order_id)
exchange.confirmAgreement.transact(
order_id,
agreement_id,
{'from': agent}
)
assert exchange.getOrder(order_id)[2] == 0
# cancel order
with brownie.reverts(revert_msg="The remaining amount of the original order must be greater than zero."):
exchange.cancelOrder.transact(
order_id,
{'from': issuer}
)
# assertion
assert exchange.getOrder(order_id) == [
trader.address,
token.address,
0,
_price,
True,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _amount
assert token.balanceOf(trader) == _amount
# Error_3
# REVERT: The order to be cancelled must not have been cancelled.
def test_error_3(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make BUY order
_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
exchange.createOrder.transact(
token.address,
_amount,
_price,
True,
agent,
{'from': trader}
)
# cancel order (1)
order_id = exchange.latestOrderId()
exchange.cancelOrder.transact(order_id, {'from': trader})
# cancel order (2)
with brownie.reverts(revert_msg="The order to be cancelled must not have been cancelled."):
exchange.cancelOrder.transact(order_id, {'from': trader})
# assertion
assert exchange.getOrder(order_id) == [
trader.address,
token.address,
_amount,
_price,
True,
agent.address,
True
]
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
# Error_4
# REVERT: msg.sender must be an orderer.
def test_error_4(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make BUY order by trader
_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
exchange.createOrder.transact(
token.address,
_amount,
_price,
True,
agent,
{'from': trader}
)
# cancel order
order_id = exchange.latestOrderId()
with brownie.reverts(revert_msg="msg.sender must be an orderer."):
exchange.cancelOrder.transact(
order_id,
{'from': users['user1']}
)
# assertion
assert exchange.getOrder(order_id) == [
trader.address,
token.address,
_amount,
_price,
True,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
# Error_5
# Cancel order: SELL
# REVERT: Must be transferable.
def test_error_5(self, users, exchange):
issuer = users['issuer']
agent = users['agent']
# issue token
deploy_args = [
'test_share',
'test_symbol',
2 ** 256 - 1,
2 ** 256 - 1,
2 ** 256 - 1,
'20200829',
'20200831',
'20191231',
2 ** 256 - 1
]
token = deploy_share(users, deploy_args)
token.setTransferable(
True,
{'from': issuer}
)
token.setTradableExchange(
exchange.address,
{'from': issuer}
)
# transfer to exchange contract
_amount = 2 ** 256 - 1
token.transfer(
exchange.address,
_amount,
{'from': issuer}
)
# make SELL order
_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
exchange.createOrder(
token.address,
_amount,
_price,
False,
agent,
{'from': issuer}
)
# set to not transferable
token.setTransferable(
False,
{'from': issuer}
)
# cancel order
order_id = exchange.latestOrderId()
with brownie.reverts(revert_msg="Must be transferable."):
exchange.cancelOrder(
order_id,
{'from': issuer}
)
# assertion
assert token.balanceOf(issuer) == 0
assert token.balanceOf(exchange.address) == _amount
assert exchange.balanceOf(issuer, token.address) == 0
assert exchange.commitmentOf(issuer, token.address) == _amount
# TEST_forceCancelOrder
class TestForceCancelOrder:
#######################################
# Normal
#######################################
# Normal_1
# Force cancel order: BUY
def test_normal_1(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make order: BUY
_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = True
exchange.createOrder(
token.address,
_amount,
_price,
_isBuy,
agent,
{'from': trader}
)
# force cancel order
order_id = exchange.latestOrderId()
tx = exchange.forceCancelOrder(
order_id,
{'from': agent}
)
# assertion
assert exchange.getOrder(order_id) == [
trader.address,
token.address,
_amount,
_price,
_isBuy,
agent.address,
True
]
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
assert tx.events["ForceCancelOrder"]["tokenAddress"] == token.address
assert tx.events["ForceCancelOrder"]["orderId"] == order_id
assert tx.events["ForceCancelOrder"]["accountAddress"] == trader
assert tx.events["ForceCancelOrder"]["isBuy"] is True
assert tx.events["ForceCancelOrder"]["price"] == _price
assert tx.events["ForceCancelOrder"]["amount"] == _amount
assert tx.events["ForceCancelOrder"]["agentAddress"] == agent
# Normal_2
# Force cancel order: SELL
def test_normal_2(self, users, exchange):
issuer = users['issuer']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# transfer to contract -> make order: SELL
_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer(
exchange.address,
_amount,
{'from': issuer}
)
exchange.createOrder(
token.address,
_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# force cancel order
order_id = exchange.latestOrderId()
tx = exchange.forceCancelOrder(
order_id,
{'from': agent}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_amount,
_price,
_isBuy,
agent.address,
True
]
assert token.balanceOf(issuer) == deploy_args[2]
assert exchange.commitmentOf(issuer, token.address) == 0
assert tx.events["ForceCancelOrder"]["tokenAddress"] == token.address
assert tx.events["ForceCancelOrder"]["orderId"] == order_id
assert tx.events["ForceCancelOrder"]["accountAddress"] == issuer
assert tx.events["ForceCancelOrder"]["isBuy"] is False
assert tx.events["ForceCancelOrder"]["price"] == _price
assert tx.events["ForceCancelOrder"]["amount"] == _amount
assert tx.events["ForceCancelOrder"]["agentAddress"] == agent
#######################################
# Error
#######################################
# Error_1
# REVERT: The orderId must be less than or equal to the latest order ID.
def test_error_1(self, users, exchange):
issuer = users['issuer']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# transfer to contract -> make order: SELL
_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer(
exchange.address,
_amount,
{'from': issuer}
)
exchange.createOrder(
token.address,
_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# cancel order
latest_order_id = exchange.latestOrderId()
with brownie.reverts(revert_msg="The orderId must be less than or equal to the latest order ID."):
exchange.forceCancelOrder(
latest_order_id + 1,
{'from': agent}
)
# assertion
assert exchange.getOrder(latest_order_id) == [
issuer.address,
token.address,
_amount,
_price,
_isBuy,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _amount
assert exchange.commitmentOf(issuer, token.address) == _amount
# Error_2
# REVERT: The remaining amount of the original order must be greater than zero.
def test_error_2(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make BUY order by trader
_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
exchange.createOrder(
token.address,
_amount,
_price,
True,
agent,
{'from': trader}
)
# take SELL order by issuer
order_id = exchange.latestOrderId()
token.transfer(
exchange.address,
_amount,
{'from': issuer}
)
exchange.executeOrder(
order_id,
_amount,
False,
{'from': issuer}
)
# confirm agreement by agent
agreement_id = exchange.latestAgreementId(order_id)
exchange.confirmAgreement(
order_id,
agreement_id,
{'from': agent}
)
assert exchange.getOrder(order_id)[2] == 0
# cancel order
with brownie.reverts(revert_msg="The remaining amount of the original order must be greater than zero."):
exchange.forceCancelOrder(
order_id,
{'from': agent}
)
# assertion
assert exchange.getOrder(order_id) == [
trader.address,
token.address,
0,
_price,
True,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _amount
assert token.balanceOf(trader) == _amount
# Error_3
# REVERT: The order to be cancelled must not have been cancelled.
def test_error_3(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make BUY order
_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
exchange.createOrder(
token.address,
_amount,
_price,
True,
agent,
{'from': trader}
)
# cancel order (1)
order_id = exchange.latestOrderId()
exchange.cancelOrder(order_id, {'from': trader})
# cancel order (2)
with brownie.reverts(revert_msg="The order to be cancelled must not have been cancelled."):
exchange.forceCancelOrder(
order_id,
{'from': agent}
)
# assertion
assert exchange.getOrder(order_id) == [
trader.address,
token.address,
_amount,
_price,
True,
agent.address,
True
]
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
# Error_4
# REVERT: msg.sender must be an agent.
def test_error_4(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make BUY order by trader
_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
exchange.createOrder(
token.address,
_amount,
_price,
True,
agent,
{'from': trader}
)
# cancel order
order_id = exchange.latestOrderId()
with brownie.reverts(revert_msg="msg.sender must be an agent."):
exchange.forceCancelOrder(
order_id,
{'from': trader}
)
# assertion
assert exchange.getOrder(order_id) == [
trader.address,
token.address,
_amount,
_price,
True,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
# Error_5
# Cancel order: SELL
# REVERT: Must be transferable.
def test_error_5(self, users, exchange):
issuer = users['issuer']
agent = users['agent']
# issue token
deploy_args = [
'test_share',
'test_symbol',
2 ** 256 - 1,
2 ** 256 - 1,
2 ** 256 - 1,
'20200829',
'20200831',
'20191231',
2 ** 256 - 1
]
token = deploy_share(users, deploy_args)
token.setTransferable(
True,
{'from': issuer}
)
token.setTradableExchange(
exchange.address,
{'from': issuer}
)
# transfer to exchange contract
_amount = 2 ** 256 - 1
token.transfer(
exchange.address,
_amount,
{'from': issuer}
)
# make SELL order
_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
exchange.createOrder(
token.address,
_amount,
_price,
False,
agent,
{'from': issuer}
)
# set to not transferable
token.setTransferable(
False,
{'from': issuer}
)
# cancel order
order_id = exchange.latestOrderId()
with brownie.reverts(revert_msg="Must be transferable."):
exchange.forceCancelOrder(
order_id,
{'from': agent}
)
# assertion
assert token.balanceOf(issuer) == 0
assert token.balanceOf(exchange.address) == _amount
assert exchange.balanceOf(issuer, token.address) == 0
assert exchange.commitmentOf(issuer, token.address) == _amount
# TEST_executeOrder
class TestExecuteOrder:
#######################################
# Normal
#######################################
# Normal_1
# Take order: BUY
def test_normal_1(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make SELL order by issuer
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': trader}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount - _take_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _make_amount
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == _take_amount
agreement_id = exchange.latestAgreementId(order_id)
assert exchange.getAgreement(order_id, agreement_id)[0:5] == [
trader.address,
_take_amount,
_price,
False,
False
]
assert exchange.lastPrice(token.address) == 0
# Normal_2
# Take order: SELL
def test_normal_2(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make BUY order by trader
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = True
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': trader}
)
# take SELL order by issuer
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
token.transfer.transact(
exchange.address,
_take_amount,
{'from': issuer}
)
exchange.executeOrder.transact(
order_id,
_take_amount,
False,
{'from': issuer}
)
# assertion
assert exchange.getOrder(order_id) == [
trader.address,
token.address,
_make_amount - _take_amount,
_price,
True,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _take_amount
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == _take_amount
agreement_id = exchange.latestAgreementId(order_id)
assert exchange.getAgreement(order_id, agreement_id)[0:5] == [
issuer,
_take_amount,
_price,
False,
False
]
assert exchange.lastPrice(token.address) == 0
#######################################
# Error
#######################################
# Error_1
# Order ID must be less than or equal to the latest order ID
def test_error_1(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# transfer to contract -> make order: SELL
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
with brownie.reverts():
exchange.executeOrder.transact(
order_id + 1,
_take_amount,
True,
{'from': trader}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _make_amount
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == _make_amount
assert exchange.lastPrice(token.address) == 0
# Error_2_1
# Take order: BUY
# Take amount must be greater than 0
def test_error_2_1(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# transfer to contract -> make order: SELL
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
order_id = exchange.latestOrderId()
with brownie.reverts():
exchange.executeOrder.transact(
order_id,
0,
True,
{'from': trader}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _make_amount
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == _make_amount
assert exchange.lastPrice(token.address) == 0
# Error_2_2
# Take order: BUY
# The BUY/SELL type must be different from the original order
def test_error_2_2(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make BUY order by trader
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = True
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
with brownie.reverts():
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': trader}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount,
_price,
True,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == 0
assert exchange.lastPrice(token.address) == 0
# Error_2_3
# Take order: BUY
# The Maker and the taker must be the different
def test_error_2_3(self, users, exchange):
issuer = users['issuer']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make SELL order by issuer
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
with brownie.reverts():
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': issuer}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _make_amount
assert exchange.commitmentOf(issuer, token.address) == _make_amount
assert exchange.lastPrice(token.address) == 0
# Error_2_4
# Take order: BUY
# Orders that have already been canceled cannot be taken
def test_error_2_4(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make SELL order by issuer
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# cancel order
order_id = exchange.latestOrderId()
exchange.cancelOrder.transact(order_id, {'from': issuer})
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
with brownie.reverts():
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': trader}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount,
_price,
False,
agent.address,
True
]
assert token.balanceOf(issuer) == deploy_args[2]
assert exchange.commitmentOf(issuer, token.address) == 0
assert exchange.lastPrice(token.address) == 0
# Error_2_5
# Take order: BUY
# Status must be True
def test_error_2_5(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make SELL order by issuer
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# change token status
token.setStatus.transact(False, {'from': issuer})
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
with brownie.reverts():
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': trader}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _make_amount
assert exchange.commitmentOf(issuer, token.address) == _make_amount
assert exchange.lastPrice(token.address) == 0
# Error_2_6
# Take order: BUY
# The amount must be within the remaining amount of the make order
def test_error_2_6(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make SELL order by issuer
_make_amount = 100
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
_take_amount = 101
order_id = exchange.latestOrderId()
with brownie.reverts():
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': trader}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _make_amount
assert exchange.commitmentOf(issuer, token.address) == _make_amount
assert exchange.lastPrice(token.address) == 0
# Error_3_1
# Take order: SELL
# Take amount must be greater than 0
def test_error_3_1(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make BUY order by trader
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = True
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': trader}
)
# take SELL order by issuer
order_id = exchange.latestOrderId()
token.transfer.transact(
exchange.address,
2 ** 256 - 1,
{'from': issuer}
)
exchange.executeOrder.transact(
order_id,
0,
False,
{'from': issuer}
)
# assertion
assert exchange.getOrder(order_id) == [
trader.address,
token.address,
_make_amount,
_price,
True,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == 0
assert exchange.lastPrice(token.address) == 0
# Error_3_2
# Take order: SELL
# The BUY/SELL type must be different from the original order
def test_error_3_2(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# transfer to contract -> make order: SELL
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take order: SELL
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
exchange.executeOrder.transact(
order_id,
_take_amount,
False,
{'from': trader}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _make_amount
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == _make_amount
assert exchange.lastPrice(token.address) == 0
# Error_3_3
# Take order: SELL
# The Maker and the taker must be the different
def test_error_3_3(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make BUY order by trader
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = True
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take SELL order by issuer
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
token.transfer.transact(
exchange.address,
_take_amount,
{'from': issuer}
)
exchange.executeOrder.transact(
order_id,
_take_amount,
False,
{'from': issuer}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount,
_price,
True,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == 0
assert exchange.lastPrice(token.address) == 0
# Error_3_4
# Take order: SELL
# Orders that have already been canceled cannot be taken
def test_error_3_4(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make BUY order by trader
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = True
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': trader}
)
# cancel order
order_id = exchange.latestOrderId()
exchange.cancelOrder.transact(
order_id,
{'from': trader}
)
# take SELL order by issuer
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
token.transfer.transact(
exchange.address,
_take_amount,
{'from': issuer}
)
exchange.executeOrder.transact(
order_id,
_take_amount,
False,
{'from': issuer}
)
# Assert: orderbook
assert exchange.getOrder(order_id) == [
trader.address,
token.address,
_make_amount,
_price,
True,
agent.address,
True
]
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == 0
assert exchange.lastPrice(token.address) == 0
# Error_3_5
# Take order: SELL
# Status must be True
def test_error_3_5(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make BUY order by trader
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = True
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': trader}
)
# change token status
token.setStatus.transact(False, {'from': issuer})
# take SELL order by issuer
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
token.transfer.transact(
exchange.address,
_take_amount,
{'from': issuer}
)
exchange.executeOrder.transact(
order_id,
_take_amount,
False,
{'from': issuer}
)
# assertion
assert exchange.getOrder(order_id) == [
trader.address,
token.address,
_make_amount,
_price,
True,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == 0
assert exchange.lastPrice(token.address) == 0
# Error_3_6
# Take order: SELL
# The deposited balance must exceed the order amount
def test_error_3_6(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make BUY order by trader
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = True
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': trader}
)
# take SELL order by issuer
_take_amount = 100
order_id = exchange.latestOrderId()
token.transfer.transact(
exchange.address,
_take_amount,
{'from': issuer}
)
exchange.executeOrder.transact(
order_id,
_take_amount + 1,
False,
{'from': issuer}
)
# assertion
assert exchange.getOrder(order_id) == [
trader.address,
token.address,
_make_amount,
_price,
True,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == 0
assert exchange.lastPrice(token.address) == 0
# Error_3_7
# Take order: SELL
# The amount must be within the remaining amount of the make order
def test_error_3_7(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make BUY order by trader
_make_amount = 100
_price = 2 ** 256 - 1
_isBuy = True
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': trader}
)
# take SELL order by issuer
_take_amount = 101
order_id = exchange.latestOrderId()
token.transfer.transact(
exchange.address,
_take_amount,
{'from': issuer}
)
exchange.executeOrder.transact(
order_id,
_take_amount,
False,
{'from': issuer}
)
# assertion
assert exchange.getOrder(order_id) == [
trader.address,
token.address,
_make_amount,
_price,
True,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == 0
assert exchange.lastPrice(token.address) == 0
# Error_3_8
# Take order: SELL
# REVERT: Must be transferable.
def test_error_3_8(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = [
'test_share',
'test_symbol',
2 ** 256 - 1,
2 ** 256 - 1,
2 ** 256 - 1,
'20200829',
'20200831',
'20191231',
2 ** 256 - 1
]
token = deploy_share(users, deploy_args)
token.setTransferable(
True,
{'from': issuer}
)
token.setTradableExchange(
exchange.address,
{'from': issuer}
)
# make BUY order by trader
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = True
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': trader}
)
# transfer to exchange contract
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
token.transfer(
exchange.address,
_take_amount,
{'from': issuer}
)
# set to not transferable
token.setTransferable(
False,
{'from': issuer}
)
# take SELL
with brownie.reverts(revert_msg="Must be transferable."):
exchange.executeOrder.transact(
order_id,
_take_amount,
False,
{'from': trader} # invalid msg.sender
)
# assertion
assert token.balanceOf(issuer) == deploy_args[2] - _take_amount
assert token.balanceOf(trader) == 0
assert token.balanceOf(exchange.address) == _take_amount
assert exchange.balanceOf(issuer, token.address) == _take_amount
assert exchange.commitmentOf(issuer, token.address) == 0
assert exchange.balanceOf(trader, token.address) == 0
assert exchange.commitmentOf(trader, token.address) == 0
# TEST_confirmAgreement
class TestConfirmAgreement:
#######################################
# Normal
#######################################
# Normal_1
# Take order: BUY
def test_normal_1(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make SELL order by issuer
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': trader}
)
# confirm agreement
agreement_id = exchange.latestAgreementId(order_id)
tx = exchange.confirmAgreement.transact(
order_id,
agreement_id,
{'from': agent}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount - _take_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _make_amount
assert token.balanceOf(trader) == _take_amount
assert exchange.commitmentOf(issuer, token.address) == 0
assert exchange.getAgreement(order_id, agreement_id)[0:5] == [
trader,
_take_amount,
_price,
False,
True
]
assert exchange.lastPrice(token.address) == _price
assert tx.events["SettlementOK"]["tokenAddress"] == token.address
assert tx.events["SettlementOK"]["orderId"] == order_id
assert tx.events["SettlementOK"]["agreementId"] == agreement_id
assert tx.events["SettlementOK"]["buyAddress"] == trader.address
assert tx.events["SettlementOK"]["sellAddress"] == issuer.address
assert tx.events["SettlementOK"]["price"] == _price
assert tx.events["SettlementOK"]["amount"] == _take_amount
assert tx.events["SettlementOK"]["agentAddress"] == agent.address
assert tx.events["HolderChanged"]["token"] == token.address
assert tx.events["HolderChanged"]["from"] == issuer.address
assert tx.events["HolderChanged"]["to"] == trader.address
assert tx.events["HolderChanged"]["value"] == _take_amount
# Normal_2
# Take order: SELL
def test_normal_2(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make BUY order by trader
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = True
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': trader}
)
# take SELL order by issuer
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
token.transfer.transact(
exchange.address,
_take_amount,
{'from': issuer}
)
exchange.executeOrder.transact(
order_id,
_take_amount,
False,
{'from': issuer}
)
# confirm agreement
agreement_id = exchange.latestAgreementId(order_id)
tx = exchange.confirmAgreement.transact(
order_id,
agreement_id,
{'from': agent}
)
# assertion
assert exchange.getOrder(order_id) == [
trader.address,
token.address,
_make_amount - _take_amount,
_price,
True,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _take_amount
assert token.balanceOf(trader) == _make_amount
assert exchange.commitmentOf(issuer, token.address) == 0
assert exchange.getAgreement(order_id, agreement_id)[0:5] == [
issuer.address,
_take_amount,
_price,
False,
True
]
assert exchange.lastPrice(token.address) == _price
assert tx.events["SettlementOK"]["tokenAddress"] == token.address
assert tx.events["SettlementOK"]["orderId"] == order_id
assert tx.events["SettlementOK"]["agreementId"] == agreement_id
assert tx.events["SettlementOK"]["buyAddress"] == trader.address
assert tx.events["SettlementOK"]["sellAddress"] == issuer.address
assert tx.events["SettlementOK"]["price"] == _price
assert tx.events["SettlementOK"]["amount"] == _take_amount
assert tx.events["SettlementOK"]["agentAddress"] == agent.address
assert tx.events["HolderChanged"]["token"] == token.address
assert tx.events["HolderChanged"]["from"] == issuer.address
assert tx.events["HolderChanged"]["to"] == trader.address
assert tx.events["HolderChanged"]["value"] == _take_amount
#######################################
# Error
#######################################
# Error_1
# Order ID must be less than or equal to the latest order ID
def test_error_1(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make SELL order by issuer
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': trader}
)
# confirm agreement
agreement_id = exchange.latestAgreementId(order_id)
with brownie.reverts():
exchange.confirmAgreement.transact(
order_id + 1,
agreement_id,
{'from': agent}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount - _take_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _make_amount
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == _make_amount
assert exchange.getAgreement(order_id, agreement_id)[0:5] == [
trader,
_take_amount,
_price,
False,
False
]
assert exchange.lastPrice(token.address) == 0
# Error_2
# Agreement ID must be less than or equal to the latest agreement ID
def test_error_2(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make SELL order by issuer
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': trader}
)
# confirm agreement
agreement_id = exchange.latestAgreementId(order_id)
with brownie.reverts():
exchange.confirmAgreement.transact(
order_id,
agreement_id + 1,
{'from': agent}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount - _take_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _make_amount
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == _make_amount
assert exchange.getAgreement(order_id, agreement_id)[0:5] == [
trader,
_take_amount,
_price,
False,
False
]
assert exchange.lastPrice(token.address) == 0
# Error_3
# If it has already been confirmed, it cannot be confirmed
def test_error_3(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make SELL order by issuer
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': trader}
)
# confirm agreement (1)
agreement_id = exchange.latestAgreementId(order_id)
exchange.confirmAgreement.transact(
order_id,
agreement_id,
{'from': agent}
)
# confirm agreement (2)
with brownie.reverts():
exchange.confirmAgreement.transact(
order_id,
agreement_id,
{'from': agent}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount - _take_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _make_amount
assert token.balanceOf(trader) == _take_amount
assert exchange.commitmentOf(issuer, token.address) == 0
assert exchange.getAgreement(order_id, agreement_id)[0:5] == [
trader,
_take_amount,
_price,
False,
True
]
assert exchange.lastPrice(token.address) == _price
# Error_4
# If it has already been cancelled, it cannot be confirmed
def test_error_4(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make SELL order by issuer
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': trader}
)
# cancel agreement
agreement_id = exchange.latestAgreementId(order_id)
exchange.cancelAgreement.transact(
order_id,
agreement_id,
{'from': agent}
)
# confirm agreement
with brownie.reverts():
exchange.confirmAgreement.transact(
order_id,
agreement_id,
{'from': agent}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == 0
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == _make_amount
assert exchange.getAgreement(order_id, agreement_id)[0:5] == [
trader,
_take_amount,
_price,
True,
False
]
assert exchange.lastPrice(token.address) == 0
# Error_5
# The executor must be the agent specified in the make order
def test_error_5(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make SELL order by issuer
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': trader}
)
# confirm agreement
agreement_id = exchange.latestAgreementId(order_id)
with brownie.reverts():
exchange.confirmAgreement.transact(
order_id,
agreement_id + 1,
{'from': users['user1']}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount - _take_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _make_amount
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == _make_amount
assert exchange.getAgreement(order_id, agreement_id)[0:5] == [
trader,
_take_amount,
_price,
False,
False
]
assert exchange.lastPrice(token.address) == 0
# Error_6
# Take order: SELL
# REVERT: Must be transferable.
def test_error_6(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = [
'test_share',
'test_symbol',
2 ** 256 - 1,
2 ** 256 - 1,
2 ** 256 - 1,
'20200829',
'20200831',
'20191231',
2 ** 256 - 1
]
token = deploy_share(users, deploy_args)
token.setTransferable(
True,
{'from': issuer}
)
token.setTradableExchange(
exchange.address,
{'from': issuer}
)
# make BUY order by trader
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = True
exchange.createOrder(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': trader}
)
# take SELL order by issuer
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
token.transfer.transact(
exchange.address,
_take_amount,
{'from': issuer}
)
exchange.executeOrder.transact(
order_id,
_take_amount,
False,
{'from': issuer}
)
# set to not transferable
token.setTransferable(
False,
{'from': issuer}
)
# confirm agreement
agreement_id = exchange.latestAgreementId(order_id)
with brownie.reverts(revert_msg="Must be transferable."):
exchange.confirmAgreement.transact(
order_id,
agreement_id,
{'from': agent}
)
# assertion
assert token.balanceOf(issuer) == deploy_args[3] - _take_amount
assert token.balanceOf(trader) == 0
assert token.balanceOf(exchange.address) == _take_amount
assert exchange.balanceOf(issuer, token.address) == 0
assert exchange.balanceOf(trader, token.address) == 0
assert exchange.commitmentOf(issuer, token.address) == _take_amount
assert exchange.commitmentOf(trader, token.address) == 0
# TEST_cancelAgreement
class TestCancelAgreement:
#######################################
# Normal
#######################################
# Normal_1
# Make SELL & Take BUY
def test_normal_1(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make SELL order by issuer
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': trader}
)
# cancel agreement
agreement_id = exchange.latestAgreementId(order_id)
tx = exchange.cancelAgreement.transact(
order_id,
agreement_id,
{'from': agent}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == 0
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == _make_amount
assert exchange.getAgreement(order_id, agreement_id)[0:5] == [
trader,
_take_amount,
_price,
True,
False
]
assert exchange.lastPrice(token.address) == 0
assert tx.events["SettlementNG"]["tokenAddress"] == token.address
assert tx.events["SettlementNG"]["orderId"] == order_id
assert tx.events["SettlementNG"]["agreementId"] == agreement_id
assert tx.events["SettlementNG"]["buyAddress"] == trader.address
assert tx.events["SettlementNG"]["sellAddress"] == issuer.address
assert tx.events["SettlementNG"]["price"] == _price
assert tx.events["SettlementNG"]["amount"] == _take_amount
assert tx.events["SettlementNG"]["agentAddress"] == agent.address
# Normal_2
# Make BUY & Take SELL
def test_normal_2(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make BUY order by trader
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = True
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': trader}
)
# take SELL order by issuer
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
token.transfer.transact(
exchange.address,
_take_amount,
{'from': issuer}
)
exchange.executeOrder.transact(
order_id,
_take_amount,
False,
{'from': issuer}
)
# cancel agreement
agreement_id = exchange.latestAgreementId(order_id)
tx = exchange.cancelAgreement.transact(
order_id,
agreement_id,
{'from': agent}
)
# assertion
assert exchange.getOrder(order_id) == [
trader.address,
token.address,
_make_amount,
_price,
True,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2]
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == 0
assert exchange.getAgreement(order_id, agreement_id)[0:5] == [
issuer.address,
_take_amount,
_price,
True,
False
]
assert exchange.lastPrice(token.address) == 0
assert tx.events["SettlementNG"]["tokenAddress"] == token.address
assert tx.events["SettlementNG"]["orderId"] == order_id
assert tx.events["SettlementNG"]["agreementId"] == agreement_id
assert tx.events["SettlementNG"]["buyAddress"] == trader.address
assert tx.events["SettlementNG"]["sellAddress"] == issuer.address
assert tx.events["SettlementNG"]["price"] == _price
assert tx.events["SettlementNG"]["amount"] == _take_amount
assert tx.events["SettlementNG"]["agentAddress"] == agent.address
#######################################
# Error
#######################################
# Error_1
# Order ID must be less than or equal to the latest order ID
def test_error_1(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make SELL order by issuer
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': trader}
)
# cancel agreement
agreement_id = exchange.latestAgreementId(order_id)
with brownie.reverts():
exchange.cancelAgreement.transact(
order_id + 1,
agreement_id,
{'from': agent}
)
# assert
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount - _take_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _make_amount
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == _make_amount
assert exchange.getAgreement(order_id, agreement_id)[0:5] == [
trader.address,
_take_amount,
_price,
False,
False
]
assert exchange.lastPrice(token.address) == 0
# Error_2
# Agreement ID must be less than or equal to the latest agreement ID
def test_error_2(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make SELL order by issuer
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': trader}
)
# cancel agreement
agreement_id = exchange.latestAgreementId(order_id)
with brownie.reverts():
exchange.cancelAgreement.transact(
order_id,
agreement_id + 1,
{'from': agent}
)
# assert
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount - _take_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _make_amount
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == _make_amount
assert exchange.getAgreement(order_id, agreement_id)[0:5] == [
trader.address,
_take_amount,
_price,
False,
False
]
assert exchange.lastPrice(token.address) == 0
# Error_3
# If it has already been confirmed, it cannot be confirmed
def test_error_3(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make SELL order by issuer
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': trader}
)
# confirm agreement
agreement_id = exchange.latestAgreementId(order_id)
exchange.confirmAgreement.transact(
order_id,
agreement_id,
{'from': agent}
)
# cancel agreement
with brownie.reverts():
exchange.confirmAgreement.transact(
order_id,
agreement_id,
{'from': agent}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount - _take_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _make_amount
assert token.balanceOf(trader) == _take_amount
assert exchange.commitmentOf(issuer, token.address) == 0
assert exchange.getAgreement(order_id, agreement_id)[0:5] == [
trader,
_take_amount,
_price,
False,
True
]
assert exchange.lastPrice(token.address) == _price
# Error_4
# If it has already been cancelled, it cannot be confirmed
def test_error_4(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make SELL order by issuer
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': trader}
)
# cancel agreement (1)
agreement_id = exchange.latestAgreementId(order_id)
exchange.cancelAgreement.transact(
order_id,
agreement_id,
{'from': agent}
)
# cancel agreement (2)
with brownie.reverts():
exchange.cancelAgreement.transact(
order_id,
agreement_id,
{'from': agent}
)
# assertion
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == 0
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == _make_amount
assert exchange.getAgreement(order_id, agreement_id)[0:5] == [
trader,
_take_amount,
_price,
True,
False
]
assert exchange.lastPrice(token.address) == 0
# Error_5
# The executor must be the agent specified in the make order
def test_error_5(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# make SELL order by issuer
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# take BUY order by trader
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
exchange.executeOrder.transact(
order_id,
_take_amount,
True,
{'from': trader}
)
# cancel agreement
agreement_id = exchange.latestAgreementId(order_id)
with brownie.reverts():
exchange.cancelAgreement.transact(
order_id,
agreement_id,
{'from': users['user1']}
)
# assert
assert exchange.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount - _take_amount,
_price,
False,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _make_amount
assert token.balanceOf(trader) == 0
assert exchange.commitmentOf(issuer, token.address) == _make_amount
assert exchange.getAgreement(order_id, agreement_id)[0:5] == [
trader.address,
_take_amount,
_price,
False,
False
]
assert exchange.lastPrice(token.address) == 0
# Error_6
# Make BUY & Take SELL
# REVERT: Must be transferable.
def test_error_6(self, users, exchange):
issuer = users['issuer']
trader = users['trader']
agent = users['agent']
# issue token
deploy_args = [
'test_share',
'test_symbol',
2 ** 256 - 1,
2 ** 256 - 1,
2 ** 256 - 1,
'20200829',
'20200831',
'20191231',
2 ** 256 - 1
]
token = deploy_share(users, deploy_args)
token.setTransferable(
True,
{'from': issuer}
)
token.setTradableExchange(
exchange.address,
{'from': issuer}
)
# make BUY order by trader
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = True
exchange.createOrder(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': trader}
)
# take SELL order by issuer
_take_amount = 2 ** 256 - 1
order_id = exchange.latestOrderId()
token.transfer(
exchange.address,
_take_amount,
{'from': issuer}
)
exchange.executeOrder(
order_id,
_take_amount,
False,
{'from': issuer}
)
# set to not transferable
token.setTransferable(
False,
{'from': issuer}
)
# cancel agreement
agreement_id = exchange.latestAgreementId(order_id)
with brownie.reverts(revert_msg="Must be transferable."):
exchange.cancelAgreement(
order_id,
agreement_id,
{'from': agent}
)
# assertion
assert token.balanceOf(issuer) == deploy_args[3] - _take_amount
assert token.balanceOf(trader) == 0
assert token.balanceOf(exchange.address) == _take_amount
assert exchange.balanceOf(issuer, token.address) == 0
assert exchange.balanceOf(trader, token.address) == 0
assert exchange.commitmentOf(issuer, token.address) == _take_amount
assert exchange.commitmentOf(trader, token.address) == 0
# update exchange
class TestUpdateExchange:
#######################################
# Normal
#######################################
# Normal_1
def test_normal_1(self, users,
exchange, exchange_storage, payment_gateway,
IbetExchange):
issuer = users['issuer']
agent = users['agent']
admin = users['admin']
# issue token
deploy_args = init_args(exchange.address)
token = deploy(users, deploy_args)
# transfer to contract -> make SELL order
_make_amount = 2 ** 256 - 1
_price = 2 ** 256 - 1
_isBuy = False
token.transfer.transact(
exchange.address,
_make_amount,
{'from': issuer}
)
exchange.createOrder.transact(
token.address,
_make_amount,
_price,
_isBuy,
agent,
{'from': issuer}
)
# deploy new exchange contract
exchange_new = admin.deploy(
IbetExchange,
payment_gateway.address,
exchange_storage.address
)
exchange_storage.upgradeVersion.transact(
exchange_new.address,
{'from': admin}
)
# assertion
order_id = exchange_new.latestOrderId()
assert exchange_new.getOrder(order_id) == [
issuer.address,
token.address,
_make_amount,
_price,
_isBuy,
agent.address,
False
]
assert token.balanceOf(issuer) == deploy_args[2] - _make_amount
assert exchange_new.balanceOf(issuer, token.address) == 0
assert exchange_new.commitmentOf(issuer, token.address) == _make_amount
| 27.949487
| 113
| 0.52021
|
b6c6cb7d24d753390113f278f3d9a4eb0a34afed
| 150
|
py
|
Python
|
Grundgeruest/urls.py
|
wmles/olymp
|
97b1a256982c2a75c39ba3a855b63a147d4409c5
|
[
"MIT"
] | null | null | null |
Grundgeruest/urls.py
|
wmles/olymp
|
97b1a256982c2a75c39ba3a855b63a147d4409c5
|
[
"MIT"
] | null | null | null |
Grundgeruest/urls.py
|
wmles/olymp
|
97b1a256982c2a75c39ba3a855b63a147d4409c5
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from . import views
app_name = 'Grundgeruest'
urlpatterns = [
url(r'^$', views.index, name='index'),
]
| 16.666667
| 43
| 0.64
|
f202ce2a09fb4c8f112002ad0407840ebdc2baab
| 139,734
|
py
|
Python
|
emcc.py
|
yishengjiang99/emscripten
|
add7cf2f0b76abb20460f0742c5402ca9b46f8d8
|
[
"MIT"
] | null | null | null |
emcc.py
|
yishengjiang99/emscripten
|
add7cf2f0b76abb20460f0742c5402ca9b46f8d8
|
[
"MIT"
] | null | null | null |
emcc.py
|
yishengjiang99/emscripten
|
add7cf2f0b76abb20460f0742c5402ca9b46f8d8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2011 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""emcc - compiler helper script
=============================
emcc is a drop-in replacement for a compiler like gcc or clang.
See emcc --help for details.
emcc can be influenced by a few environment variables:
EMCC_DEBUG - "1" will log out useful information during compilation, as well as
save each compiler step as an emcc-* file in the temp dir
(by default /tmp/emscripten_temp). "2" will save additional emcc-*
steps, that would normally not be separately produced (so this
slows down compilation).
EMMAKEN_NO_SDK - Will tell emcc *not* to use the emscripten headers. Instead
your system headers will be used.
"""
import json
import logging
import os
import re
import shlex
import stat
import sys
import time
import base64
from enum import Enum
from subprocess import PIPE
import emscripten
from tools import shared, system_libs
from tools import colored_logger, diagnostics, building
from tools.shared import unsuffixed, unsuffixed_basename, WINDOWS, safe_move, safe_copy
from tools.shared import run_process, asbytes, read_and_preprocess, exit_with_error, DEBUG
from tools.shared import do_replace
from tools.response_file import substitute_response_files
from tools.minimal_runtime_shell import generate_minimal_runtime_html
import tools.line_endings
from tools.toolchain_profiler import ToolchainProfiler
from tools import js_manipulation
from tools import wasm2c
from tools import webassembly
from tools import config
if __name__ == '__main__':
ToolchainProfiler.record_process_start()
try:
from urllib.parse import quote
except ImportError:
# Python 2 compatibility
from urllib import quote
logger = logging.getLogger('emcc')
# endings = dot + a suffix, safe to test by filename.endswith(endings)
C_ENDINGS = ('.c', '.i')
CXX_ENDINGS = ('.cpp', '.cxx', '.cc', '.c++', '.CPP', '.CXX', '.C', '.CC', '.C++', '.ii')
OBJC_ENDINGS = ('.m', '.mi')
OBJCXX_ENDINGS = ('.mm', '.mii')
ASSEMBLY_CPP_ENDINGS = ('.S',)
SPECIAL_ENDINGLESS_FILENAMES = (os.devnull,)
SOURCE_ENDINGS = C_ENDINGS + CXX_ENDINGS + OBJC_ENDINGS + OBJCXX_ENDINGS + SPECIAL_ENDINGLESS_FILENAMES + ASSEMBLY_CPP_ENDINGS
C_ENDINGS = C_ENDINGS + SPECIAL_ENDINGLESS_FILENAMES # consider the special endingless filenames like /dev/null to be C
EXECUTABLE_ENDINGS = ('.wasm', '.html', '.js', '.mjs', '.out', '')
DYNAMICLIB_ENDINGS = ('.dylib', '.so') # Windows .dll suffix is not included in this list, since those are never linked to directly on the command line.
STATICLIB_ENDINGS = ('.a',)
ASSEMBLY_ENDINGS = ('.ll', '.s')
HEADER_ENDINGS = ('.h', '.hxx', '.hpp', '.hh', '.H', '.HXX', '.HPP', '.HH')
# Supported LLD flags which we will pass through to the linker.
SUPPORTED_LINKER_FLAGS = (
'--start-group', '--end-group',
'-(', '-)',
'--whole-archive', '--no-whole-archive',
'-whole-archive', '-no-whole-archive'
)
# Unsupported LLD flags which we will ignore.
# Maps to true if the flag takes an argument.
UNSUPPORTED_LLD_FLAGS = {
# macOS-specific linker flag that libtool (ltmain.sh) will if macOS is detected.
'-bind_at_load': False,
'-M': False,
# wasm-ld doesn't support soname or other dynamic linking flags (yet). Ignore them
# in order to aid build systems that want to pass these flags.
'-soname': True,
'-allow-shlib-undefined': False,
'-rpath': True,
'-rpath-link': True,
'-version-script': True,
}
LIB_PREFIXES = ('', 'lib')
DEFAULT_ASYNCIFY_IMPORTS = [
'emscripten_sleep', 'emscripten_wget', 'emscripten_wget_data', 'emscripten_idb_load',
'emscripten_idb_store', 'emscripten_idb_delete', 'emscripten_idb_exists',
'emscripten_idb_load_blob', 'emscripten_idb_store_blob', 'SDL_Delay',
'emscripten_scan_registers', 'emscripten_lazy_load_code',
'emscripten_fiber_swap',
'wasi_snapshot_preview1.fd_sync', '__wasi_fd_sync', '_emval_await']
# Mapping of emcc opt levels to llvm opt levels. We use llvm opt level 3 in emcc
# opt levels 2 and 3 (emcc 3 is unsafe opts, so unsuitable for the only level to
# get llvm opt level 3, and speed-wise emcc level 2 is already the slowest/most
# optimizing level)
LLVM_OPT_LEVEL = {
0: ['-O0'],
1: ['-O1'],
2: ['-O3'],
3: ['-O3'],
}
# Target options
final_js = None
UBSAN_SANITIZERS = {
'alignment',
'bool',
'builtin',
'bounds',
'enum',
'float-cast-overflow',
'float-divide-by-zero',
'function',
'implicit-unsigned-integer-truncation',
'implicit-signed-integer-truncation',
'implicit-integer-sign-change',
'integer-divide-by-zero',
'nonnull-attribute',
'null',
'nullability-arg',
'nullability-assign',
'nullability-return',
'object-size',
'pointer-overflow',
'return',
'returns-nonnull-attribute',
'shift',
'signed-integer-overflow',
'unreachable',
'unsigned-integer-overflow',
'vla-bound',
'vptr',
'undefined',
'undefined-trap',
'implicit-integer-truncation',
'implicit-integer-arithmetic-value-change',
'implicit-conversion',
'integer',
'nullability',
}
VALID_ENVIRONMENTS = ('web', 'webview', 'worker', 'node', 'shell')
# this function uses the global 'final' variable, which contains the current
# final output file. if a method alters final, and calls this method, then it
# must modify final globally (i.e. it can't receive final as a param and
# return it)
# TODO: refactor all this, a singleton that abstracts over the final output
# and saving of intermediates
def save_intermediate(name, suffix='js'):
if not DEBUG:
return
if not final_js:
logger.debug('(not saving intermediate %s because not generating JS)' % name)
return
building.save_intermediate(final_js, name + '.' + suffix)
def save_intermediate_with_wasm(name, wasm_binary):
if not DEBUG:
return
save_intermediate(name) # save the js
building.save_intermediate(wasm_binary, name + '.wasm')
class TimeLogger(object):
last = time.time()
@staticmethod
def update():
TimeLogger.last = time.time()
def log_time(name):
"""Log out times for emcc stages"""
if DEBUG:
now = time.time()
logger.debug('emcc step "%s" took %.2f seconds', name, now - TimeLogger.last)
TimeLogger.update()
def base64_encode(b):
b64 = base64.b64encode(b)
if type(b64) == bytes:
return b64.decode('ascii')
else:
return b64
class OFormat(Enum):
WASM = 1
JS = 2
MJS = 3
HTML = 4
BARE = 5
class EmccOptions(object):
def __init__(self):
self.output_file = None
self.post_link = False
self.executable = False
self.compiler_wrapper = None
self.oformat = None
self.requested_debug = ''
self.profiling = False
self.profiling_funcs = False
self.tracing = False
self.emit_symbol_map = False
self.llvm_opts = None
self.use_closure_compiler = None
self.closure_args = []
self.js_transform = None
self.pre_js = '' # before all js
self.post_js = '' # after all js
self.extern_pre_js = '' # before all js, external to optimized code
self.extern_post_js = '' # after all js, external to optimized code
self.preload_files = []
self.embed_files = []
self.exclude_files = []
self.ignore_dynamic_linking = False
self.shell_path = shared.path_from_root('src', 'shell.html')
self.source_map_base = ''
self.emrun = False
self.cpu_profiler = False
self.thread_profiler = False
self.memory_profiler = False
self.memory_init_file = None
self.use_preload_cache = False
self.use_preload_plugins = False
self.proxy_to_worker = False
self.default_object_extension = '.o'
self.valid_abspaths = []
self.cfi = False
# Specifies the line ending format to use for all generated text files.
# Defaults to using the native EOL on each platform (\r\n on Windows, \n on
# Linux & MacOS)
self.output_eol = os.linesep
self.no_entry = False
self.shared = False
self.relocatable = False
def will_metadce():
# The metadce JS parsing code does not currently support the JS that gets generated
# when assertions are enabled.
if shared.Settings.ASSERTIONS:
return False
return shared.Settings.OPT_LEVEL >= 3 or shared.Settings.SHRINK_LEVEL >= 1
def setup_environment_settings():
# Environment setting based on user input
environments = shared.Settings.ENVIRONMENT.split(',')
if any([x for x in environments if x not in VALID_ENVIRONMENTS]):
exit_with_error('Invalid environment specified in "ENVIRONMENT": ' + shared.Settings.ENVIRONMENT + '. Should be one of: ' + ','.join(VALID_ENVIRONMENTS))
shared.Settings.ENVIRONMENT_MAY_BE_WEB = not shared.Settings.ENVIRONMENT or 'web' in environments
shared.Settings.ENVIRONMENT_MAY_BE_WEBVIEW = not shared.Settings.ENVIRONMENT or 'webview' in environments
shared.Settings.ENVIRONMENT_MAY_BE_NODE = not shared.Settings.ENVIRONMENT or 'node' in environments
shared.Settings.ENVIRONMENT_MAY_BE_SHELL = not shared.Settings.ENVIRONMENT or 'shell' in environments
# The worker case also includes Node.js workers when pthreads are
# enabled and Node.js is one of the supported environments for the build to
# run on. Node.js workers are detected as a combination of
# ENVIRONMENT_IS_WORKER and ENVIRONMENT_IS_NODE.
shared.Settings.ENVIRONMENT_MAY_BE_WORKER = \
not shared.Settings.ENVIRONMENT or \
'worker' in environments or \
(shared.Settings.ENVIRONMENT_MAY_BE_NODE and shared.Settings.USE_PTHREADS)
if not shared.Settings.ENVIRONMENT_MAY_BE_WORKER and shared.Settings.PROXY_TO_WORKER:
exit_with_error('If you specify --proxy-to-worker and specify a "-s ENVIRONMENT=" directive, it must include "worker" as a target! (Try e.g. -s ENVIRONMENT=web,worker)')
if not shared.Settings.ENVIRONMENT_MAY_BE_WORKER and shared.Settings.USE_PTHREADS:
exit_with_error('When building with multithreading enabled and a "-s ENVIRONMENT=" directive is specified, it must include "worker" as a target! (Try e.g. -s ENVIRONMENT=web,worker)')
def minify_whitespace():
return shared.Settings.OPT_LEVEL >= 2 and shared.Settings.DEBUG_LEVEL == 0
def embed_memfile():
return (shared.Settings.SINGLE_FILE or
(shared.Settings.MEM_INIT_METHOD == 0 and
(not shared.Settings.MAIN_MODULE and
not shared.Settings.SIDE_MODULE and
not shared.Settings.GENERATE_SOURCE_MAP)))
def expand_byte_size_suffixes(value):
"""Given a string with KB/MB size suffixes, such as "32MB", computes how
many bytes that is and returns it as an integer.
"""
value = value.strip()
match = re.match(r'^(\d+)\s*([kmgt]?b)?$', value, re.I)
if not match:
exit_with_error("invalid byte size `%s`. Valid suffixes are: kb, mb, gb, tb" % value)
value, suffix = match.groups()
value = int(value)
if suffix:
size_suffixes = {suffix: 1024 ** i for i, suffix in enumerate(['b', 'kb', 'mb', 'gb', 'tb'])}
value *= size_suffixes[suffix.lower()]
return value
def apply_settings(changes):
"""Take a list of settings in form `NAME=VALUE` and apply them to the global
Settings object.
"""
def standardize_setting_change(key, value):
# boolean NO_X settings are aliases for X
# (note that *non*-boolean setting values have special meanings,
# and we can't just flip them, so leave them as-is to be
# handled in a special way later)
if key.startswith('NO_') and value in ('0', '1'):
key = key[3:]
value = str(1 - int(value))
return key, value
for change in changes:
key, value = change.split('=', 1)
key, value = standardize_setting_change(key, value)
if key in shared.Settings.internal_settings:
exit_with_error('%s is an internal setting and cannot be set from command line', key)
# map legacy settings which have aliases to the new names
# but keep the original key so errors are correctly reported via the `setattr` below
user_key = key
if key in shared.Settings.legacy_settings and key in shared.Settings.alt_names:
key = shared.Settings.alt_names[key]
# In those settings fields that represent amount of memory, translate suffixes to multiples of 1024.
if key in ('TOTAL_STACK', 'INITIAL_MEMORY', 'MEMORY_GROWTH_LINEAR_STEP', 'MEMORY_GROWTH_GEOMETRIC_STEP',
'GL_MAX_TEMP_BUFFER_SIZE', 'MAXIMUM_MEMORY', 'DEFAULT_PTHREAD_STACK_SIZE'):
value = str(expand_byte_size_suffixes(value))
if value and value[0] == '@':
filename = value[1:]
if not os.path.exists(filename):
exit_with_error('%s: file not found parsing argument: %s' % (filename, change))
value = open(filename).read()
else:
value = value.replace('\\', '\\\\')
try:
value = parse_value(value)
except Exception as e:
exit_with_error('a problem occurred in evaluating the content after a "-s", specifically "%s": %s', change, str(e))
# Do some basic type checking by comparing to the existing settings.
# Sadly we can't do this generically in the SettingsManager since there are settings
# that so change types internally over time.
existing = getattr(shared.Settings, user_key, None)
if existing is not None:
# We only currently worry about lists vs non-lists.
if (type(existing) == list) != (type(value) == list):
exit_with_error('setting `%s` expects `%s` but got `%s`' % (user_key, type(existing), type(value)))
setattr(shared.Settings, user_key, value)
if key == 'EXPORTED_FUNCTIONS':
# used for warnings in emscripten.py
shared.Settings.USER_EXPORTED_FUNCTIONS = shared.Settings.EXPORTED_FUNCTIONS[:]
# TODO(sbc): Remove this legacy way.
if key == 'WASM_OBJECT_FILES':
shared.Settings.LTO = 0 if value else 'full'
def is_ar_file_with_missing_index(archive_file):
# We parse the archive header outselves because llvm-nm --print-armap is slower and less
# reliable.
# See: https://github.com/emscripten-core/emscripten/issues/10195
archive_header = b'!<arch>\n'
file_header_size = 60
with open(archive_file, 'rb') as f:
header = f.read(len(archive_header))
if header != archive_header:
# This is not even an ar file
return False
file_header = f.read(file_header_size)
if len(file_header) != file_header_size:
# We don't have any file entires at all so we don't consider the index missing
return False
name = file_header[:16].strip()
# If '/' is the name of the first file we have an index
return name != b'/'
def ensure_archive_index(archive_file):
# Fastcomp linking works without archive indexes.
if not shared.Settings.AUTO_ARCHIVE_INDEXES:
return
if is_ar_file_with_missing_index(archive_file):
diagnostics.warning('emcc', '%s: archive is missing an index; Use emar when creating libraries to ensure an index is created', archive_file)
diagnostics.warning('emcc', '%s: adding index', archive_file)
run_process([shared.LLVM_RANLIB, archive_file])
def get_all_js_syms():
# Runs the js compiler to generate a list of all symbols available in the JS
# libraries. This must be done separately for each linker invokation since the
# list of symbols depends on what settings are used.
# TODO(sbc): Find a way to optimize this. Potentially we could add a super-set
# mode of the js compiler that would generate a list of all possible symbols
# that could be checked in.
old_full = shared.Settings.INCLUDE_FULL_LIBRARY
try:
# Temporarily define INCLUDE_FULL_LIBRARY since we want a full list
# of all available JS library functions.
shared.Settings.INCLUDE_FULL_LIBRARY = True
shared.Settings.ONLY_CALC_JS_SYMBOLS = True
emscripten.generate_struct_info()
glue, forwarded_data = emscripten.compile_settings()
forwarded_json = json.loads(forwarded_data)
library_fns = forwarded_json['Functions']['libraryFunctions']
library_fns_list = []
for name in library_fns:
if shared.is_c_symbol(name):
name = shared.demangle_c_symbol_name(name)
library_fns_list.append(name)
finally:
shared.Settings.ONLY_CALC_JS_SYMBOLS = False
shared.Settings.INCLUDE_FULL_LIBRARY = old_full
return library_fns_list
def filter_link_flags(flags, using_lld):
def is_supported(f):
if using_lld:
for flag, takes_arg in UNSUPPORTED_LLD_FLAGS.items():
# lld allows various flags to have either a single -foo or double --foo
if f.startswith(flag) or f.startswith('-' + flag):
diagnostics.warning('linkflags', 'ignoring unsupported linker flag: `%s`', f)
return False, takes_arg
return True, False
else:
if f in SUPPORTED_LINKER_FLAGS:
return True, False
# Silently ignore -l/-L flags when not using lld. If using lld allow
# them to pass through the linker
if f.startswith('-l') or f.startswith('-L'):
return False, False
diagnostics.warning('linkflags', 'ignoring unsupported linker flag: `%s`', f)
return False, False
results = []
skip_next = False
for f in flags:
if skip_next:
skip_next = False
continue
keep, skip_next = is_supported(f[1])
if keep:
results.append(f)
return results
def fix_windows_newlines(text):
# Avoid duplicating \r\n to \r\r\n when writing out text.
if WINDOWS:
text = text.replace('\r\n', '\n')
return text
def cxx_to_c_compiler(cxx):
# Convert C++ compiler name into C compiler name
dirname, basename = os.path.split(cxx)
basename = basename.replace('clang++', 'clang').replace('g++', 'gcc').replace('em++', 'emcc')
return os.path.join(dirname, basename)
def get_binaryen_passes():
# run the binaryen optimizer in -O2+. in -O0 we don't need it obviously, while
# in -O1 we don't run it as the LLVM optimizer has been run, and it does the
# great majority of the work; not running the binaryen optimizer in that case
# keeps -O1 mostly-optimized while compiling quickly and without rewriting
# DWARF etc.
run_binaryen_optimizer = shared.Settings.OPT_LEVEL >= 2
passes = []
# safe heap must run before post-emscripten, so post-emscripten can apply the sbrk ptr
if shared.Settings.SAFE_HEAP:
passes += ['--safe-heap']
if shared.Settings.MEMORY64 == 2:
passes += ['--memory64-lowering']
if run_binaryen_optimizer:
passes += ['--post-emscripten']
if not shared.Settings.EXIT_RUNTIME:
passes += ['--no-exit-runtime']
if run_binaryen_optimizer:
passes += [building.opt_level_to_str(shared.Settings.OPT_LEVEL, shared.Settings.SHRINK_LEVEL)]
elif shared.Settings.STANDALONE_WASM:
# even if not optimizing, make an effort to remove all unused imports and
# exports, to make the wasm as standalone as possible
passes += ['--remove-unused-module-elements']
# when optimizing, use the fact that low memory is never used (1024 is a
# hardcoded value in the binaryen pass)
if run_binaryen_optimizer and shared.Settings.GLOBAL_BASE >= 1024:
passes += ['--low-memory-unused']
if shared.Settings.AUTODEBUG:
# adding '--flatten' here may make these even more effective
passes += ['--instrument-locals']
passes += ['--log-execution']
passes += ['--instrument-memory']
if shared.Settings.LEGALIZE_JS_FFI:
# legalize it again now, as the instrumentation may need it
passes += ['--legalize-js-interface']
if shared.Settings.EMULATE_FUNCTION_POINTER_CASTS:
# note that this pass must run before asyncify, as if it runs afterwards we only
# generate the byn$fpcast_emu functions after asyncify runs, and so we wouldn't
# be able to further process them.
passes += ['--fpcast-emu']
if shared.Settings.ASYNCIFY:
passes += ['--asyncify']
if shared.Settings.ASSERTIONS:
passes += ['--pass-arg=asyncify-asserts']
if shared.Settings.ASYNCIFY_ADVISE:
passes += ['--pass-arg=asyncify-verbose']
if shared.Settings.ASYNCIFY_IGNORE_INDIRECT:
passes += ['--pass-arg=asyncify-ignore-indirect']
passes += ['--pass-arg=asyncify-imports@%s' % ','.join(shared.Settings.ASYNCIFY_IMPORTS)]
# shell escaping can be confusing; try to emit useful warnings
def check_human_readable_list(items):
for item in items:
if item.count('(') != item.count(')'):
logger.warning('''emcc: ASYNCIFY list contains an item without balanced parentheses ("(", ")"):''')
logger.warning(''' ''' + item)
logger.warning('''This may indicate improper escaping that led to splitting inside your names.''')
logger.warning('''Try to quote the entire argument, like this: -s 'ASYNCIFY_ONLY=["foo(int, char)", "bar"]' ''')
break
if shared.Settings.ASYNCIFY_REMOVE:
check_human_readable_list(shared.Settings.ASYNCIFY_REMOVE)
passes += ['--pass-arg=asyncify-removelist@%s' % ','.join(shared.Settings.ASYNCIFY_REMOVE)]
if shared.Settings.ASYNCIFY_ADD:
check_human_readable_list(shared.Settings.ASYNCIFY_ADD)
passes += ['--pass-arg=asyncify-addlist@%s' % ','.join(shared.Settings.ASYNCIFY_ADD)]
if shared.Settings.ASYNCIFY_ONLY:
check_human_readable_list(shared.Settings.ASYNCIFY_ONLY)
passes += ['--pass-arg=asyncify-onlylist@%s' % ','.join(shared.Settings.ASYNCIFY_ONLY)]
if shared.Settings.BINARYEN_IGNORE_IMPLICIT_TRAPS:
passes += ['--ignore-implicit-traps']
# normally we can assume the memory, if imported, has not been modified
# beforehand (in fact, in most cases the memory is not even imported anyhow,
# but it is still safe to pass the flag), and is therefore filled with zeros.
# the one exception is dynamic linking of a side module: the main module is ok
# as it is loaded first, but the side module may be assigned memory that was
# previously used.
if run_binaryen_optimizer and not shared.Settings.SIDE_MODULE:
passes += ['--zero-filled-memory']
if shared.Settings.BINARYEN_EXTRA_PASSES:
# BINARYEN_EXTRA_PASSES is comma-separated, and we support both '-'-prefixed and
# unprefixed pass names
extras = shared.Settings.BINARYEN_EXTRA_PASSES.split(',')
passes += [('--' + p) if p[0] != '-' else p for p in extras if p]
return passes
def make_js_executable(script):
src = open(script).read()
cmd = shared.shlex_join(config.JS_ENGINE)
if not os.path.isabs(config.JS_ENGINE[0]):
# TODO: use whereis etc. And how about non-*NIX?
cmd = '/usr/bin/env -S ' + cmd
logger.debug('adding `#!` to JavaScript file: %s' % cmd)
# add shebang
with open(script, 'w') as f:
f.write('#!%s\n' % cmd)
f.write(src)
try:
os.chmod(script, stat.S_IMODE(os.stat(script).st_mode) | stat.S_IXUSR) # make executable
except OSError:
pass # can fail if e.g. writing the executable to /dev/null
def do_split_module(wasm_file):
os.rename(wasm_file, wasm_file + '.orig')
args = ['--instrument']
building.run_binaryen_command('wasm-split', wasm_file + '.orig', outfile=wasm_file, args=args)
def is_dash_s_for_emcc(args, i):
# -s OPT=VALUE or -s OPT or -sOPT are all interpreted as emscripten flags.
# -s by itself is a linker option (alias for --strip-all)
if args[i] == '-s':
if len(args) <= i + 1:
return False
arg = args[i + 1]
else:
arg = args[i][2:]
arg = arg.split('=')[0]
return arg.isidentifier() and arg.isupper()
def parse_s_args(args):
settings_changes = []
for i in range(len(args)):
if args[i].startswith('-s'):
if is_dash_s_for_emcc(args, i):
if args[i] == '-s':
key = args[i + 1]
args[i + 1] = ''
else:
key = args[i][2:]
args[i] = ''
# If not = is specified default to 1
if '=' not in key:
key += '=1'
# Special handling of browser version targets. A version -1 means that the specific version
# is not supported at all. Replace those with INT32_MAX to make it possible to compare e.g.
# #if MIN_FIREFOX_VERSION < 68
if re.match(r'MIN_.*_VERSION(=.*)?', key):
try:
if int(key.split('=')[1]) < 0:
key = key.split('=')[0] + '=0x7FFFFFFF'
except Exception:
pass
settings_changes.append(key)
newargs = [a for a in args if a]
return (settings_changes, newargs)
def calc_cflags(options):
# Flags we pass to the compiler when building C/C++ code
# We add these to the user's flags (newargs), but not when building .s or .S assembly files
cflags = []
if options.tracing:
cflags.append('-D__EMSCRIPTEN_TRACING__=1')
if shared.Settings.USE_PTHREADS:
cflags.append('-D__EMSCRIPTEN_PTHREADS__=1')
if not shared.Settings.STRICT:
# The preprocessor define EMSCRIPTEN is deprecated. Don't pass it to code
# in strict mode. Code should use the define __EMSCRIPTEN__ instead.
cflags.append('-DEMSCRIPTEN')
# if exception catching is disabled, we can prevent that code from being
# generated in the frontend
if shared.Settings.DISABLE_EXCEPTION_CATCHING == 1 and not shared.Settings.EXCEPTION_HANDLING:
cflags.append('-fignore-exceptions')
if shared.Settings.INLINING_LIMIT:
cflags.append('-fno-inline-functions')
if shared.Settings.RELOCATABLE:
cflags.append('-fPIC')
cflags.append('-fvisibility=default')
if shared.Settings.LTO:
cflags.append('-flto=' + shared.Settings.LTO)
else:
# With LTO mode these args get passed instead
# at link time when the backend runs.
for a in building.llvm_backend_args():
cflags += ['-mllvm', a]
return cflags
def get_file_suffix(filename):
"""Parses the essential suffix of a filename, discarding Unix-style version
numbers in the name. For example for 'libz.so.1.2.8' returns '.so'"""
if filename in SPECIAL_ENDINGLESS_FILENAMES:
return filename
while filename:
filename, suffix = os.path.splitext(filename)
if not suffix[1:].isdigit():
return suffix
return ''
def in_temp(name):
temp_dir = shared.get_emscripten_temp_dir()
return os.path.join(temp_dir, os.path.basename(name))
run_via_emxx = False
#
# Main run() function
#
def run(args):
target = None
# Additional compiler flags that we treat as if they were passed to us on the
# commandline
EMCC_CFLAGS = os.environ.get('EMCC_CFLAGS')
if DEBUG:
cmd = shared.shlex_join(args)
if EMCC_CFLAGS:
cmd += ' + ' + EMCC_CFLAGS
logger.warning('invocation: ' + cmd + ' (in ' + os.getcwd() + ')')
if EMCC_CFLAGS:
args.extend(shlex.split(EMCC_CFLAGS))
# Strip args[0] (program name)
args = args[1:]
misc_temp_files = shared.configuration.get_temp_files()
# Handle some global flags
# read response files very early on
try:
args = substitute_response_files(args)
except IOError as e:
exit_with_error(e)
if '--help' in args:
# Documentation for emcc and its options must be updated in:
# site/source/docs/tools_reference/emcc.rst
# This then gets built (via: `make -C site text`) to:
# site/build/text/docs/tools_reference/emcc.txt
# This then needs to be copied to its final home in docs/emcc.txt from where
# we read it here. We have CI rules that ensure its always up-to-date.
with open(shared.path_from_root('docs', 'emcc.txt'), 'r') as f:
print(f.read())
print('''
------------------------------------------------------------------
emcc: supported targets: llvm bitcode, WebAssembly, NOT elf
(autoconf likes to see elf above to enable shared object support)
''')
return 0
if '--version' in args:
# if the emscripten folder is not a git repo, don't run git show - that can
# look up and find the revision in a parent directory that is a git repo
revision = ''
if os.path.exists(shared.path_from_root('.git')):
revision = run_process(['git', 'rev-parse', 'HEAD'], stdout=PIPE, stderr=PIPE, cwd=shared.path_from_root()).stdout.strip()
elif os.path.exists(shared.path_from_root('emscripten-revision.txt')):
revision = open(shared.path_from_root('emscripten-revision.txt')).read().strip()
if revision:
revision = ' (%s)' % revision
print('''%s%s
Copyright (C) 2014 the Emscripten authors (see AUTHORS.txt)
This is free and open source software under the MIT license.
There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
''' % (version_string(), revision))
return 0
if run_via_emxx:
clang = shared.CLANG_CXX
else:
clang = shared.CLANG_CC
if len(args) == 1 and args[0] == '-v': # -v with no inputs
# autoconf likes to see 'GNU' in the output to enable shared object support
print(version_string(), file=sys.stderr)
return shared.check_call([clang, '-v'] + shared.get_clang_flags(), check=False).returncode
if '-dumpmachine' in args:
print(shared.get_llvm_target())
return 0
if '-dumpversion' in args: # gcc's doc states "Print the compiler version [...] and don't do anything else."
print(shared.EMSCRIPTEN_VERSION)
return 0
if '--cflags' in args:
# fake running the command, to see the full args we pass to clang
args = [x for x in args if x != '--cflags']
with misc_temp_files.get_file(suffix='.o') as temp_target:
input_file = 'hello_world.c'
cmd = [shared.PYTHON, sys.argv[0], shared.path_from_root('tests', input_file), '-v', '-c', '-o', temp_target] + args
proc = run_process(cmd, stderr=PIPE, check=False)
if proc.returncode != 0:
print(proc.stderr)
exit_with_error('error getting cflags')
lines = [x for x in proc.stderr.splitlines() if clang in x and input_file in x]
parts = shlex.split(lines[0].replace('\\', '\\\\'))
parts = [x for x in parts if x not in ['-c', '-o', '-v', '-emit-llvm'] and input_file not in x and temp_target not in x]
print(shared.shlex_join(parts[1:]))
return 0
shared.check_sanity()
def get_language_mode(args):
return_next = False
for item in args:
if return_next:
return item
if item == '-x':
return_next = True
continue
if item.startswith('-x'):
return item[2:]
return ''
language_mode = get_language_mode(args)
EMMAKEN_CFLAGS = os.environ.get('EMMAKEN_CFLAGS')
if EMMAKEN_CFLAGS:
args += shlex.split(EMMAKEN_CFLAGS)
# ---------------- Utilities ---------------
seen_names = {}
def uniquename(name):
if name not in seen_names:
seen_names[name] = str(len(seen_names))
return unsuffixed(name) + '_' + seen_names[name] + shared.suffix(name)
# ---------------- End configs -------------
def optimizing(opts):
return '-O0' not in opts
def need_llvm_debug_info():
return shared.Settings.DEBUG_LEVEL >= 3
with ToolchainProfiler.profile_block('parse arguments and setup'):
## Parse args
newargs = list(args)
# Scan and strip emscripten specific cmdline warning flags.
# This needs to run before other cmdline flags have been parsed, so that
# warnings are properly printed during arg parse.
newargs = diagnostics.capture_warnings(newargs)
if not config.config_file:
diagnostics.warning('deprecated', 'Specifying EM_CONFIG as a python literal is deprecated. Please use a file instead.')
for i in range(len(newargs)):
if newargs[i] in ('-l', '-L', '-I'):
# Scan for individual -l/-L/-I arguments and concatenate the next arg on
# if there is no suffix
newargs[i] += newargs[i + 1]
newargs[i + 1] = ''
options, settings_changes, user_js_defines, newargs = parse_args(newargs)
if options.post_link or options.oformat == OFormat.BARE:
diagnostics.warning('experimental', '--oformat=base/--post-link are experimental and subject to change.')
if '-print-search-dirs' in newargs:
return run_process([clang, '-print-search-dirs'], check=False).returncode
if options.emrun:
options.pre_js += open(shared.path_from_root('src', 'emrun_prejs.js')).read() + '\n'
options.post_js += open(shared.path_from_root('src', 'emrun_postjs.js')).read() + '\n'
# emrun mode waits on program exit
shared.Settings.EXIT_RUNTIME = 1
if options.cpu_profiler:
options.post_js += open(shared.path_from_root('src', 'cpuprofiler.js')).read() + '\n'
if options.memory_profiler:
shared.Settings.MEMORYPROFILER = 1
if options.thread_profiler:
options.post_js += open(shared.path_from_root('src', 'threadprofiler.js')).read() + '\n'
if options.llvm_opts is None:
options.llvm_opts = LLVM_OPT_LEVEL[shared.Settings.OPT_LEVEL]
elif type(options.llvm_opts) == int:
options.llvm_opts = ['-O%d' % options.llvm_opts]
if options.memory_init_file is None:
options.memory_init_file = shared.Settings.OPT_LEVEL >= 2
# TODO: support source maps with js_transform
if options.js_transform and shared.Settings.GENERATE_SOURCE_MAP:
logger.warning('disabling source maps because a js transform is being done')
shared.Settings.DEBUG_LEVEL = 3
explicit_settings_changes, newargs = parse_s_args(newargs)
settings_changes += explicit_settings_changes
settings_key_changes = {}
for s in settings_changes:
key, value = s.split('=', 1)
settings_key_changes[key] = value
# Find input files
# These three arrays are used to store arguments of different types for
# type-specific processing. In order to shuffle the arguments back together
# after processing, all of these arrays hold tuples (original_index, value).
# Note that the index part of the tuple can have a fractional part for input
# arguments that expand into multiple processed arguments, as in -Wl,-f1,-f2.
input_files = []
libs = []
link_flags = []
has_header_inputs = False
lib_dirs = []
has_dash_c = '-c' in newargs
has_dash_S = '-S' in newargs
has_dash_E = '-E' in newargs
compile_only = has_dash_c or has_dash_S or has_dash_E
def add_link_flag(i, f):
if f.startswith('-l'):
libs.append((i, f[2:]))
if f.startswith('-L'):
lib_dirs.append(f[2:])
link_flags.append((i, f))
# find input files with a simple heuristic. we should really analyze
# based on a full understanding of gcc params, right now we just assume that
# what is left contains no more |-x OPT| things
skip = False
for i in range(len(newargs)):
if skip:
skip = False
continue
arg = newargs[i]
if arg in ('-MT', '-MF', '-MJ', '-MQ', '-D', '-U', '-o', '-x',
'-Xpreprocessor', '-include', '-imacros', '-idirafter',
'-iprefix', '-iwithprefix', '-iwithprefixbefore',
'-isysroot', '-imultilib', '-A', '-isystem', '-iquote',
'-install_name', '-compatibility_version',
'-current_version', '-I', '-L', '-include-pch',
'-Xlinker', '-Xclang'):
skip = True
if not arg.startswith('-'):
# os.devnul should always be reported as existing but there is bug in windows
# python before 3.8:
# https://bugs.python.org/issue1311
if not os.path.exists(arg) and arg != os.devnull:
exit_with_error('%s: No such file or directory ("%s" was expected to be an input file, based on the commandline arguments provided)', arg, arg)
file_suffix = get_file_suffix(arg)
if file_suffix in SOURCE_ENDINGS + DYNAMICLIB_ENDINGS + ASSEMBLY_ENDINGS + HEADER_ENDINGS or building.is_ar(arg):
# we already removed -o <target>, so all these should be inputs
newargs[i] = ''
if file_suffix in SOURCE_ENDINGS or (has_dash_c and file_suffix == '.bc'):
input_files.append((i, arg))
elif file_suffix in HEADER_ENDINGS:
input_files.append((i, arg))
has_header_inputs = True
elif file_suffix in ASSEMBLY_ENDINGS or building.is_bitcode(arg) or building.is_ar(arg):
input_files.append((i, arg))
elif building.is_wasm(arg):
input_files.append((i, arg))
elif file_suffix in (STATICLIB_ENDINGS + DYNAMICLIB_ENDINGS):
# if it's not, and it's a library, just add it to libs to find later
libname = unsuffixed_basename(arg)
for prefix in LIB_PREFIXES:
if not prefix:
continue
if libname.startswith(prefix):
libname = libname[len(prefix):]
break
libs.append((i, libname))
elif file_suffix in STATICLIB_ENDINGS:
if not building.is_ar(arg):
if building.is_bitcode(arg):
message = arg + ': File has a suffix of a static library ' + str(STATICLIB_ENDINGS) + ', but instead is an LLVM bitcode file! When linking LLVM bitcode files use .bc or .o.'
else:
message = arg + ': Unknown format, not a static library!'
exit_with_error(message)
else:
newargs[i] = ''
input_files.append((i, arg))
elif arg.startswith('-L'):
add_link_flag(i, arg)
newargs[i] = ''
elif arg.startswith('-l'):
add_link_flag(i, arg)
newargs[i] = ''
elif arg.startswith('-Wl,'):
# Multiple comma separated link flags can be specified. Create fake
# fractional indices for these: -Wl,a,b,c,d at index 4 becomes:
# (4, a), (4.25, b), (4.5, c), (4.75, d)
link_flags_to_add = arg.split(',')[1:]
for flag_index, flag in enumerate(link_flags_to_add):
add_link_flag(i + float(flag_index) / len(link_flags_to_add), flag)
newargs[i] = ''
elif arg == '-Xlinker':
add_link_flag(i + 1, newargs[i + 1])
newargs[i] = ''
newargs[i + 1] = ''
elif arg == '-s':
# -s and some other compiler flags are normally passed onto the linker
# TODO(sbc): Pass this and other flags through when using lld
# link_flags.append((i, arg))
newargs[i] = ''
elif arg == '-':
input_files.append((i, arg))
newargs[i] = ''
newargs = [a for a in newargs if a]
# Libraries are searched before settings_changes are applied, so apply the
# value for STRICT from command line already now.
strict_cmdline = settings_key_changes.get('STRICT')
if strict_cmdline:
shared.Settings.STRICT = int(strict_cmdline)
# Apply optimization level settings
shared.Settings.apply_opt_level(opt_level=shared.Settings.OPT_LEVEL, shrink_level=shared.Settings.SHRINK_LEVEL, noisy=True)
# For users that opt out of WARN_ON_UNDEFINED_SYMBOLS we assume they also
# want to opt out of ERROR_ON_UNDEFINED_SYMBOLS.
if 'WARN_ON_UNDEFINED_SYMBOLS=0' in settings_changes:
shared.Settings.ERROR_ON_UNDEFINED_SYMBOLS = 0
if shared.Settings.MINIMAL_RUNTIME or 'MINIMAL_RUNTIME=1' in settings_changes or 'MINIMAL_RUNTIME=2' in settings_changes:
# Remove the default exported functions 'malloc', 'free', etc. those should only be linked in if used
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE = []
# Apply -s settings in newargs here (after optimization levels, so they can override them)
apply_settings(settings_changes)
specified_target = options.output_file
if os.environ.get('EMMAKEN_JUST_CONFIGURE') or 'conftest.c' in args:
# configure tests want a more shell-like style, where we emit return codes on exit()
shared.Settings.EXIT_RUNTIME = 1
# use node.js raw filesystem access, to behave just like a native executable
shared.Settings.NODERAWFS = 1
# Add `#!` line to output JS and make it executable.
options.executable = True
# Autoconf expects the executable output file to be called `a.out`
default_target_name = 'a.out'
elif shared.Settings.SIDE_MODULE:
default_target_name = 'a.out.wasm'
else:
default_target_name = 'a.out.js'
# specified_target is the user-specified one, target is what we will generate
if specified_target:
target = specified_target
# check for the existence of the output directory now, to avoid having
# to do so repeatedly when each of the various output files (.mem, .wasm,
# etc) are written. This gives a more useful error message than the
# IOError and python backtrace that users would otherwise see.
dirname = os.path.dirname(target)
if dirname and not os.path.isdir(dirname):
exit_with_error("specified output file (%s) is in a directory that does not exist" % target)
else:
target = default_target_name
shared.Settings.TARGET_BASENAME = target_basename = unsuffixed_basename(target)
final_suffix = get_file_suffix(target)
if has_dash_c or has_dash_S or has_dash_E or '-M' in newargs or '-MM' in newargs:
if has_dash_c:
if '-emit-llvm' in newargs:
options.default_object_extension = '.bc'
elif has_dash_S:
if '-emit-llvm' in newargs:
options.default_object_extension = '.ll'
else:
options.default_object_extension = '.s'
elif '-M' in newargs or '-MM' in newargs:
options.default_object_extension = '.mout' # not bitcode, not js; but just dependency rule of the input file
if specified_target:
if len(input_files) > 1:
exit_with_error('cannot specify -o with -c/-S/-E/-M and multiple source files')
else:
target = target_basename + options.default_object_extension
# If no output format was sepecific we try to imply the format based on
# the output filename extension.
if not options.oformat:
if shared.Settings.SIDE_MODULE or final_suffix == '.wasm':
options.oformat = OFormat.WASM
elif final_suffix == '.mjs':
options.oformat = OFormat.MJS
elif final_suffix == '.html':
options.oformat = OFormat.HTML
else:
options.oformat = OFormat.JS
if options.oformat == OFormat.MJS:
shared.Settings.EXPORT_ES6 = 1
shared.Settings.MODULARIZE = 1
if options.oformat in (OFormat.WASM, OFormat.BARE):
# If the user asks directly for a wasm file then this *is* the target
wasm_target = target
else:
# Otherwise the wasm file is produced alongside the final target.
wasm_target = unsuffixed(target) + '.wasm'
# Apply user -jsD settings
for s in user_js_defines:
shared.Settings.attrs[s[0]] = s[1]
shared.verify_settings()
if (options.oformat == OFormat.WASM or shared.Settings.PURE_WASI) and not shared.Settings.SIDE_MODULE:
# if the output is just a wasm file, it will normally be a standalone one,
# as there is no JS. an exception are side modules, as we can't tell at
# compile time whether JS will be involved or not - the main module may
# have JS, and the side module is expected to link against that.
# we also do not support standalone mode in fastcomp.
shared.Settings.STANDALONE_WASM = 1
if shared.Settings.LZ4:
shared.Settings.EXPORTED_RUNTIME_METHODS += ['LZ4']
if shared.Settings.WASM2C:
# wasm2c only makes sense with standalone wasm - there will be no JS,
# just wasm and then C
shared.Settings.STANDALONE_WASM = 1
# wasm2c doesn't need any special handling of i64, we have proper i64
# handling on the FFI boundary, which is exactly like the case of JS with
# BigInt support
shared.Settings.WASM_BIGINT = 1
if options.no_entry:
shared.Settings.EXPECT_MAIN = 0
elif shared.Settings.STANDALONE_WASM:
if '_main' in shared.Settings.EXPORTED_FUNCTIONS:
# TODO(sbc): Make this into a warning?
logger.debug('including `_main` in EXPORTED_FUNCTIONS is not necessary in standalone mode')
else:
# In normal non-standalone mode we have special handling of `_main` in EXPORTED_FUNCTIONS.
# 1. If the user specifies exports, but doesn't include `_main` we assume they want to build a
# reactor.
# 2. If the user doesn't export anything we default to exporting `_main` (unless `--no-entry`
# is specified (see above).
if 'EXPORTED_FUNCTIONS' in settings_key_changes:
if '_main' not in shared.Settings.USER_EXPORTED_FUNCTIONS:
shared.Settings.EXPECT_MAIN = 0
else:
assert not shared.Settings.EXPORTED_FUNCTIONS
shared.Settings.EXPORTED_FUNCTIONS = ['_main']
if shared.Settings.STANDALONE_WASM:
# In STANDALONE_WASM mode we either build a command or a reactor.
# See https://github.com/WebAssembly/WASI/blob/master/design/application-abi.md
# For a command we always want EXIT_RUNTIME=1
# For a reactor we always want EXIT_RUNTIME=0
if 'EXIT_RUNTIME' in settings_key_changes:
exit_with_error('Explictly setting EXIT_RUNTIME not compatible with STANDALONE_WASM. EXIT_RUNTIME will always be True for programs (with a main function) and False for reactors (not main function).')
shared.Settings.EXIT_RUNTIME = shared.Settings.EXPECT_MAIN
def filter_out_dynamic_libs(inputs):
# If not compiling to JS, then we are compiling to an intermediate bitcode
# objects or library, so ignore dynamic linking, since multiple dynamic
# linkings can interfere with each other
if final_suffix not in EXECUTABLE_ENDINGS or options.ignore_dynamic_linking:
def check(input_file):
if get_file_suffix(input_file) in DYNAMICLIB_ENDINGS:
if not options.ignore_dynamic_linking:
diagnostics.warning('emcc', 'ignoring dynamic library %s because not compiling to JS or HTML, remember to link it when compiling to JS or HTML at the end', os.path.basename(input_file))
return False
else:
return True
return [f for f in inputs if check(f[1])]
return inputs
def filter_out_duplicate_dynamic_libs(inputs):
# Filter out duplicate shared libraries.
# See test_core.py:test_redundant_link
seen = set()
rtn = []
for i in inputs:
if get_file_suffix(i[1]) in DYNAMICLIB_ENDINGS and os.path.exists(i[1]):
abspath = os.path.abspath(i[1])
if abspath in seen:
continue
seen.add(abspath)
rtn.append(i)
return rtn
input_files = filter_out_dynamic_libs(input_files)
input_files = filter_out_duplicate_dynamic_libs(input_files)
if not input_files and not link_flags:
exit_with_error('no input files')
# Note the exports the user requested
building.user_requested_exports = shared.Settings.EXPORTED_FUNCTIONS[:]
def default_setting(name, new_default):
if name not in settings_key_changes:
setattr(shared.Settings, name, new_default)
# -s ASSERTIONS=1 implies basic stack overflow checks, and ASSERTIONS=2
# implies full stack overflow checks (unless the user specifically set
# something else)
if shared.Settings.ASSERTIONS:
default_setting('STACK_OVERFLOW_CHECK', max(shared.Settings.ASSERTIONS, shared.Settings.STACK_OVERFLOW_CHECK))
if shared.Settings.LLD_REPORT_UNDEFINED or shared.Settings.STANDALONE_WASM:
# Reporting undefined symbols at wasm-ld time requires us to know if we have a `main` function
# or not, as does standalone wasm mode.
# TODO(sbc): Remove this once this becomes the default
shared.Settings.IGNORE_MISSING_MAIN = 0
if shared.Settings.STRICT:
default_setting('STRICT_JS', 1)
default_setting('AUTO_JS_LIBRARIES', 0)
default_setting('AUTO_NATIVE_LIBRARIES', 0)
default_setting('AUTO_ARCHIVE_INDEXES', 0)
default_setting('IGNORE_MISSING_MAIN', 0)
default_setting('DEFAULT_TO_CXX', 0)
# If set to 1, we will run the autodebugger (the automatic debugging tool, see
# tools/autodebugger). Note that this will disable inclusion of libraries. This
# is useful because including dlmalloc makes it hard to compare native and js
# builds
if os.environ.get('EMCC_AUTODEBUG'):
shared.Settings.AUTODEBUG = 1
# Use settings
if shared.Settings.DEBUG_LEVEL > 1 and options.use_closure_compiler:
diagnostics.warning('emcc', 'disabling closure because debug info was requested')
options.use_closure_compiler = False
if shared.Settings.WASM == 2 and shared.Settings.SINGLE_FILE:
exit_with_error('cannot have both WASM=2 and SINGLE_FILE enabled at the same time')
if shared.Settings.SEPARATE_DWARF and shared.Settings.WASM2JS:
exit_with_error('cannot have both SEPARATE_DWARF and WASM2JS at the same time (as there is no wasm file)')
if shared.Settings.MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION and shared.Settings.MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION:
exit_with_error('MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION and MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION are mutually exclusive!')
if options.emrun:
if shared.Settings.MINIMAL_RUNTIME:
exit_with_error('--emrun is not compatible with -s MINIMAL_RUNTIME=1')
shared.Settings.EXPORTED_RUNTIME_METHODS.append('addOnExit')
if options.use_closure_compiler:
shared.Settings.USE_CLOSURE_COMPILER = options.use_closure_compiler
if shared.Settings.CLOSURE_WARNINGS not in ['quiet', 'warn', 'error']:
exit_with_error('Invalid option -s CLOSURE_WARNINGS=%s specified! Allowed values are "quiet", "warn" or "error".' % shared.Settings.CLOSURE_WARNINGS)
# Include dynCall() function by default in DYNCALLS builds in classic runtime; in MINIMAL_RUNTIME, must add this explicitly.
if shared.Settings.DYNCALLS and not shared.Settings.MINIMAL_RUNTIME:
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$dynCall']
if shared.Settings.MAIN_MODULE:
assert not shared.Settings.SIDE_MODULE
if shared.Settings.MAIN_MODULE == 1:
shared.Settings.INCLUDE_FULL_LIBRARY = 1
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$preloadDylibs']
elif shared.Settings.SIDE_MODULE:
assert not shared.Settings.MAIN_MODULE
# memory init file is not supported with side modules, must be executable synchronously (for dlopen)
options.memory_init_file = False
if shared.Settings.MAIN_MODULE or shared.Settings.SIDE_MODULE:
if shared.Settings.MAIN_MODULE == 1 or shared.Settings.SIDE_MODULE == 1:
shared.Settings.LINKABLE = 1
shared.Settings.EXPORT_ALL = 1
shared.Settings.RELOCATABLE = 1
if shared.Settings.RELOCATABLE:
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$reportUndefinedSymbols', '$relocateExports', '$GOTHandler']
if options.use_closure_compiler:
exit_with_error('cannot use closure compiler on shared modules')
if shared.Settings.MINIMAL_RUNTIME:
exit_with_error('MINIMAL_RUNTIME is not compatible with relocatable output')
if shared.Settings.WASM2JS:
exit_with_error('WASM2JS is not compatible with relocatable output')
# shared modules need memory utilities to allocate their memory
shared.Settings.EXPORTED_RUNTIME_METHODS += ['allocate']
shared.Settings.ALLOW_TABLE_GROWTH = 1
# various settings require sbrk() access
if shared.Settings.DETERMINISTIC or \
shared.Settings.EMSCRIPTEN_TRACING or \
shared.Settings.MALLOC == 'emmalloc' or \
shared.Settings.SAFE_HEAP or \
shared.Settings.MEMORYPROFILER:
shared.Settings.EXPORTED_FUNCTIONS += ['_sbrk']
if shared.Settings.MEMORYPROFILER:
shared.Settings.EXPORTED_FUNCTIONS += ['___heap_base',
'_emscripten_stack_get_base',
'_emscripten_stack_get_end',
'_emscripten_stack_get_current']
if shared.Settings.ASYNCIFY_LAZY_LOAD_CODE:
shared.Settings.ASYNCIFY = 1
if shared.Settings.ASYNCIFY:
# See: https://github.com/emscripten-core/emscripten/issues/12065
# See: https://github.com/emscripten-core/emscripten/issues/12066
shared.Settings.DYNCALLS = 1
shared.Settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_get_base',
'_emscripten_stack_get_end',
'_emscripten_stack_set_limits']
# SSEx is implemented on top of SIMD128 instruction set, but do not pass SSE flags to LLVM
# so it won't think about generating native x86 SSE code.
newargs = [x for x in newargs if x not in shared.SIMD_INTEL_FEATURE_TOWER and x not in shared.SIMD_NEON_FLAGS]
link_to_object = False
if options.shared or options.relocatable:
# Until we have a better story for actually producing runtime shared libraries
# we support a compatibility mode where shared libraries are actually just
# object files linked with `wasm-ld --relocatable` or `llvm-link` in the case
# of LTO.
if final_suffix in EXECUTABLE_ENDINGS:
diagnostics.warning('emcc', '-shared/-r used with executable output suffix. This behaviour is deprecated. Please remove -shared/-r to build an executable or avoid the executable suffix (%s) when building object files.' % final_suffix)
else:
if options.shared:
diagnostics.warning('emcc', 'linking a library with `-shared` will emit a static object file. This is a form of emulation to support existing build systems. If you want to build a runtime shared library use the SIDE_MODULE setting.')
link_to_object = True
if shared.Settings.STACK_OVERFLOW_CHECK:
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$abortStackOverflow']
shared.Settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_get_end', '_emscripten_stack_get_free']
if shared.Settings.RELOCATABLE:
shared.Settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_set_limits']
else:
shared.Settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_init']
if shared.Settings.STACK_OVERFLOW_CHECK == 2:
shared.Settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_get_base']
if shared.Settings.MODULARIZE:
assert not options.proxy_to_worker, '-s MODULARIZE=1 is not compatible with --proxy-to-worker (if you want to run in a worker with -s MODULARIZE=1, you likely want to do the worker side setup manually)'
# in MINIMAL_RUNTIME we may not need to emit the Promise code, as the
# HTML output creates a singleton instance, and it does so without the
# Promise. However, in Pthreads mode the Promise is used for worker
# creation.
if shared.Settings.MINIMAL_RUNTIME and options.oformat == OFormat.HTML and not shared.Settings.USE_PTHREADS:
shared.Settings.EXPORT_READY_PROMISE = 0
if shared.Settings.LEGACY_VM_SUPPORT:
if shared.Settings.WASM2JS:
shared.Settings.POLYFILL_OLD_MATH_FUNCTIONS = 1
# Support all old browser versions
shared.Settings.MIN_FIREFOX_VERSION = 0
shared.Settings.MIN_SAFARI_VERSION = 0
shared.Settings.MIN_IE_VERSION = 0
shared.Settings.MIN_EDGE_VERSION = 0
shared.Settings.MIN_CHROME_VERSION = 0
if shared.Settings.MIN_SAFARI_VERSION <= 9 and shared.Settings.WASM2JS:
shared.Settings.WORKAROUND_IOS_9_RIGHT_SHIFT_BUG = 1
if shared.Settings.MIN_CHROME_VERSION <= 37:
shared.Settings.WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG = 1
setup_environment_settings()
# Silently drop any individual backwards compatibility emulation flags that are known never to occur on browsers that support WebAssembly.
if not shared.Settings.WASM2JS:
shared.Settings.POLYFILL_OLD_MATH_FUNCTIONS = 0
shared.Settings.WORKAROUND_IOS_9_RIGHT_SHIFT_BUG = 0
shared.Settings.WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG = 0
if shared.Settings.STB_IMAGE and final_suffix in EXECUTABLE_ENDINGS:
input_files.append((len(newargs), shared.path_from_root('third_party', 'stb_image.c')))
shared.Settings.EXPORTED_FUNCTIONS += ['_stbi_load', '_stbi_load_from_memory', '_stbi_image_free']
# stb_image 2.x need to have STB_IMAGE_IMPLEMENTATION defined to include the implementation
# when compiling
newargs.append('-DSTB_IMAGE_IMPLEMENTATION')
if shared.Settings.USE_WEBGL2:
shared.Settings.MAX_WEBGL_VERSION = 2
if not shared.Settings.GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS and shared.Settings.GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS:
exit_with_error('-s GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=0 only makes sense with -s GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0!')
forced_stdlibs = []
if shared.Settings.ASMFS and final_suffix in EXECUTABLE_ENDINGS:
forced_stdlibs.append('libasmfs')
shared.Settings.FILESYSTEM = 0
shared.Settings.SYSCALLS_REQUIRE_FILESYSTEM = 0
shared.Settings.FETCH = 1
shared.Settings.SYSTEM_JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_asmfs.js')))
# Explicitly drop linking in a malloc implementation if program is not using any dynamic allocation calls.
if not shared.Settings.USES_DYNAMIC_ALLOC:
shared.Settings.MALLOC = 'none'
if shared.Settings.MALLOC == 'emmalloc':
shared.Settings.SYSTEM_JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_emmalloc.js')))
if shared.Settings.FETCH and final_suffix in EXECUTABLE_ENDINGS:
forced_stdlibs.append('libfetch')
shared.Settings.SYSTEM_JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_fetch.js')))
if shared.Settings.USE_PTHREADS:
shared.Settings.FETCH_WORKER_FILE = unsuffixed(os.path.basename(target)) + '.fetch.js'
if shared.Settings.DEMANGLE_SUPPORT:
shared.Settings.EXPORTED_FUNCTIONS += ['___cxa_demangle']
if shared.Settings.FULL_ES3:
shared.Settings.FULL_ES2 = 1
shared.Settings.MAX_WEBGL_VERSION = max(2, shared.Settings.MAX_WEBGL_VERSION)
if shared.Settings.EMBIND:
forced_stdlibs.append('libembind')
shared.Settings.EXPORTED_FUNCTIONS += ['_stackSave', '_stackRestore', '_stackAlloc']
if not shared.Settings.STANDALONE_WASM:
# in standalone mode, crt1 will call the constructors from inside the wasm
shared.Settings.EXPORTED_FUNCTIONS.append('___wasm_call_ctors')
if shared.Settings.RELOCATABLE and not shared.Settings.DYNAMIC_EXECUTION:
exit_with_error('cannot have both DYNAMIC_EXECUTION=0 and RELOCATABLE enabled at the same time, since RELOCATABLE needs to eval()')
if shared.Settings.SIDE_MODULE and shared.Settings.GLOBAL_BASE != -1:
exit_with_error('Cannot set GLOBAL_BASE when building SIDE_MODULE')
if shared.Settings.RELOCATABLE:
default_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
default_setting('WARN_ON_UNDEFINED_SYMBOLS', 0)
if shared.Settings.DISABLE_EXCEPTION_THROWING and not shared.Settings.DISABLE_EXCEPTION_CATCHING:
exit_with_error("DISABLE_EXCEPTION_THROWING was set (probably from -fno-exceptions) but is not compatible with enabling exception catching (DISABLE_EXCEPTION_CATCHING=0). If you don't want exceptions, set DISABLE_EXCEPTION_CATCHING to 1; if you do want exceptions, don't link with -fno-exceptions")
if options.proxy_to_worker:
shared.Settings.PROXY_TO_WORKER = 1
if options.use_preload_plugins or len(options.preload_files) or len(options.embed_files):
if shared.Settings.NODERAWFS:
exit_with_error('--preload-file and --embed-file cannot be used with NODERAWFS which disables virtual filesystem')
# if we include any files, or intend to use preload plugins, then we definitely need filesystem support
shared.Settings.FORCE_FILESYSTEM = 1
if options.proxy_to_worker or options.use_preload_plugins:
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$Browser']
if not shared.Settings.MINIMAL_RUNTIME:
# In non-MINIMAL_RUNTIME, the core runtime depends on these functions to be present. (In MINIMAL_RUNTIME, they are
# no longer always bundled in)
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$demangle', '$demangleAll', '$jsStackTrace', '$stackTrace']
if shared.Settings.FILESYSTEM:
# to flush streams on FS exit, we need to be able to call fflush
# we only include it if the runtime is exitable, or when ASSERTIONS
# (ASSERTIONS will check that streams do not need to be flushed,
# helping people see when they should have enabled EXIT_RUNTIME)
if shared.Settings.EXIT_RUNTIME or shared.Settings.ASSERTIONS:
shared.Settings.EXPORTED_FUNCTIONS += ['_fflush']
if shared.Settings.SUPPORT_ERRNO:
# so setErrNo JS library function can report errno back to C
shared.Settings.EXPORTED_FUNCTIONS += ['___errno_location']
if shared.Settings.SAFE_HEAP:
# SAFE_HEAP check includes calling emscripten_get_sbrk_ptr() from wasm
shared.Settings.EXPORTED_FUNCTIONS += ['_emscripten_get_sbrk_ptr', '_emscripten_stack_get_base']
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$unSign']
if not shared.Settings.DECLARE_ASM_MODULE_EXPORTS:
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$exportAsmFunctions']
if shared.Settings.ALLOW_MEMORY_GROWTH:
# Setting ALLOW_MEMORY_GROWTH turns off ABORTING_MALLOC, as in that mode we default to
# the behavior of trying to grow and returning 0 from malloc on failure, like
# a standard system would. However, if the user sets the flag it
# overrides that.
default_setting('ABORTING_MALLOC', 0)
if shared.Settings.USE_PTHREADS:
if shared.Settings.USE_PTHREADS == 2:
exit_with_error('USE_PTHREADS=2 is not longer supported')
if shared.Settings.ALLOW_MEMORY_GROWTH:
diagnostics.warning('pthreads-mem-growth', 'USE_PTHREADS + ALLOW_MEMORY_GROWTH may run non-wasm code slowly, see https://github.com/WebAssembly/design/issues/1271')
# UTF8Decoder.decode doesn't work with a view of a SharedArrayBuffer
shared.Settings.TEXTDECODER = 0
shared.Settings.SYSTEM_JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_pthread.js')))
newargs += ['-pthread']
# some pthreads code is in asm.js library functions, which are auto-exported; for the wasm backend, we must
# manually export them
shared.Settings.EXPORTED_FUNCTIONS += [
'___emscripten_pthread_data_constructor',
'___pthread_tsd_run_dtors',
'__emscripten_call_on_thread',
'__emscripten_do_dispatch_to_thread',
'__emscripten_main_thread_futex',
'__emscripten_thread_init',
'_emscripten_current_thread_process_queued_calls',
'__emscripten_allow_main_runtime_queued_calls',
'_emscripten_futex_wake',
'_emscripten_get_global_libc',
'_emscripten_main_browser_thread_id',
'_emscripten_main_thread_process_queued_calls',
'_emscripten_register_main_browser_thread_id',
'_emscripten_run_in_main_runtime_thread_js',
'_emscripten_stack_set_limits',
'_emscripten_sync_run_in_main_thread_2',
'_emscripten_sync_run_in_main_thread_4',
'_emscripten_tls_init',
'_pthread_self',
]
# Some of these symbols are using by worker.js but otherwise unreferenced.
# Because emitDCEGraph only considered the main js file, and not worker.js
# we have explictly mark these symbols as user-exported so that they will
# kept alive through DCE.
# TODO: Find a less hacky way to do this, perhaps by also scanning worker.js
# for roots.
building.user_requested_exports.append('_emscripten_tls_init')
building.user_requested_exports.append('_emscripten_current_thread_process_queued_calls')
# set location of worker.js
shared.Settings.PTHREAD_WORKER_FILE = unsuffixed(os.path.basename(target)) + '.worker.js'
else:
shared.Settings.SYSTEM_JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_pthread_stub.js')))
if shared.Settings.FORCE_FILESYSTEM and not shared.Settings.MINIMAL_RUNTIME:
# when the filesystem is forced, we export by default methods that filesystem usage
# may need, including filesystem usage from standalone file packager output (i.e.
# file packages not built together with emcc, but that are loaded at runtime
# separately, and they need emcc's output to contain the support they need)
if not shared.Settings.ASMFS:
shared.Settings.EXPORTED_RUNTIME_METHODS += [
'FS_createPath',
'FS_createDataFile',
'FS_createPreloadedFile',
'FS_createLazyFile',
'FS_createDevice',
'FS_unlink'
]
shared.Settings.EXPORTED_RUNTIME_METHODS += [
'addRunDependency',
'removeRunDependency',
]
if not shared.Settings.MINIMAL_RUNTIME or shared.Settings.EXIT_RUNTIME:
# MINIMAL_RUNTIME only needs callRuntimeCallbacks in certain cases, but the normal runtime
# always does.
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$callRuntimeCallbacks']
if shared.Settings.USE_PTHREADS:
# memalign is used to ensure allocated thread stacks are aligned.
shared.Settings.EXPORTED_FUNCTIONS += ['_memalign']
if shared.Settings.MINIMAL_RUNTIME:
building.user_requested_exports += ['exit']
if shared.Settings.PROXY_TO_PTHREAD:
shared.Settings.EXPORTED_FUNCTIONS += ['_emscripten_proxy_main']
# pthread stack setup and other necessary utilities
def include_and_export(name):
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$' + name]
shared.Settings.EXPORTED_FUNCTIONS += [name]
include_and_export('establishStackSpace')
include_and_export('invokeEntryPoint')
if not shared.Settings.MINIMAL_RUNTIME:
# noExitRuntime does not apply to MINIMAL_RUNTIME.
include_and_export('getNoExitRuntime')
if shared.Settings.MODULARIZE:
if shared.Settings.EXPORT_NAME == 'Module':
exit_with_error('pthreads + MODULARIZE currently require you to set -s EXPORT_NAME=Something (see settings.js) to Something != Module, so that the .worker.js file can work')
# MODULARIZE+USE_PTHREADS mode requires extra exports out to Module so that worker.js
# can access them:
# general threading variables:
shared.Settings.EXPORTED_RUNTIME_METHODS += ['PThread']
# To keep code size to minimum, MINIMAL_RUNTIME does not utilize the global ExitStatus
# object, only regular runtime has it.
if not shared.Settings.MINIMAL_RUNTIME:
shared.Settings.EXPORTED_RUNTIME_METHODS += ['ExitStatus']
if shared.Settings.SIDE_MODULE:
diagnostics.warning('experimental', '-s SIDE_MODULE + pthreads is experimental')
elif shared.Settings.MAIN_MODULE:
diagnostics.warning('experimental', '-s MAIN_MODULE + pthreads is experimental')
elif shared.Settings.LINKABLE:
diagnostics.warning('experimental', '-s LINKABLE + pthreads is experimental')
if shared.Settings.PROXY_TO_WORKER:
exit_with_error('--proxy-to-worker is not supported with -s USE_PTHREADS>0! Use the option -s PROXY_TO_PTHREAD=1 if you want to run the main thread of a multithreaded application in a web worker.')
else:
if shared.Settings.PROXY_TO_PTHREAD:
exit_with_error('-s PROXY_TO_PTHREAD=1 requires -s USE_PTHREADS to work!')
def check_memory_setting(setting):
if shared.Settings[setting] % webassembly.WASM_PAGE_SIZE != 0:
exit_with_error(f'{setting} must be a multiple of WebAssembly page size (64KiB), was {shared.Settings[setting]}')
check_memory_setting('INITIAL_MEMORY')
if shared.Settings.INITIAL_MEMORY >= 2 * 1024 * 1024 * 1024:
exit_with_error('INITIAL_MEMORY must be less than 2GB due to current spec limitations')
if shared.Settings.INITIAL_MEMORY < shared.Settings.TOTAL_STACK:
exit_with_error(f'INITIAL_MEMORY must be larger than TOTAL_STACK, was {shared.Settings.INITIAL_MEMORY} (TOTAL_STACK={shared.Settings.TOTAL_STACK})')
if shared.Settings.MAXIMUM_MEMORY != -1:
check_memory_setting('MAXIMUM_MEMORY')
if shared.Settings.MEMORY_GROWTH_LINEAR_STEP != -1:
check_memory_setting('MEMORY_GROWTH_LINEAR_STEP')
if shared.Settings.USE_PTHREADS and shared.Settings.ALLOW_MEMORY_GROWTH and shared.Settings.MAXIMUM_MEMORY == -1:
exit_with_error('If pthreads and memory growth are enabled, MAXIMUM_MEMORY must be set')
if shared.Settings.EXPORT_ES6 and not shared.Settings.MODULARIZE:
# EXPORT_ES6 requires output to be a module
if 'MODULARIZE' in settings_key_changes:
exit_with_error('EXPORT_ES6 requires MODULARIZE to be set')
shared.Settings.MODULARIZE = 1
if shared.Settings.MODULARIZE and not shared.Settings.DECLARE_ASM_MODULE_EXPORTS:
# When MODULARIZE option is used, currently requires declaring all module exports
# individually - TODO: this could be optimized
exit_with_error('DECLARE_ASM_MODULE_EXPORTS=0 is not compatible with MODULARIZE')
# When not declaring asm module exports in outer scope one by one, disable minifying
# asm.js/wasm module export names so that the names can be passed directly to the outer scope.
# Also, if using library_exports.js API, disable minification so that the feature can work.
if not shared.Settings.DECLARE_ASM_MODULE_EXPORTS or 'exports.js' in [x for _, x in libs]:
shared.Settings.MINIFY_ASMJS_EXPORT_NAMES = 0
# Enable minification of wasm imports and exports when appropriate, if we
# are emitting an optimized JS+wasm combo (then the JS knows how to load the minified names).
# Things that process the JS after this operation would be done must disable this.
# For example, ASYNCIFY_LAZY_LOAD_CODE needs to identify import names.
if will_metadce() and \
shared.Settings.OPT_LEVEL >= 2 and \
shared.Settings.DEBUG_LEVEL <= 2 and \
options.oformat not in (OFormat.WASM, OFormat.BARE) and \
not shared.Settings.LINKABLE and \
not shared.Settings.STANDALONE_WASM and \
not shared.Settings.AUTODEBUG and \
not shared.Settings.ASSERTIONS and \
not shared.Settings.RELOCATABLE and \
not shared.Settings.ASYNCIFY_LAZY_LOAD_CODE and \
shared.Settings.MINIFY_ASMJS_EXPORT_NAMES:
shared.Settings.MINIFY_WASM_IMPORTS_AND_EXPORTS = 1
shared.Settings.MINIFY_WASM_IMPORTED_MODULES = 1
if shared.Settings.MINIMAL_RUNTIME:
# Minimal runtime uses a different default shell file
if options.shell_path == shared.path_from_root('src', 'shell.html'):
options.shell_path = shared.path_from_root('src', 'shell_minimal_runtime.html')
if shared.Settings.ASSERTIONS and shared.Settings.MINIMAL_RUNTIME:
# In ASSERTIONS-builds, functions UTF8ArrayToString() and stringToUTF8Array() (which are not JS library functions), both
# use warnOnce(), which in MINIMAL_RUNTIME is a JS library function, so explicitly have to mark dependency to warnOnce()
# in that case. If string functions are turned to library functions in the future, then JS dependency tracking can be
# used and this special directive can be dropped.
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$warnOnce']
# Require explicit -lfoo.js flags to link with JS libraries.
shared.Settings.AUTO_JS_LIBRARIES = 0
if shared.Settings.MODULARIZE and shared.Settings.EXPORT_NAME == 'Module' and options.oformat == OFormat.HTML and \
(options.shell_path == shared.path_from_root('src', 'shell.html') or options.shell_path == shared.path_from_root('src', 'shell_minimal.html')):
exit_with_error('Due to collision in variable name "Module", the shell file "' + options.shell_path + '" is not compatible with build options "-s MODULARIZE=1 -s EXPORT_NAME=Module". Either provide your own shell file, change the name of the export to something else to avoid the name collision. (see https://github.com/emscripten-core/emscripten/issues/7950 for details)')
if shared.Settings.STANDALONE_WASM:
if shared.Settings.USE_PTHREADS:
exit_with_error('STANDALONE_WASM does not support pthreads yet')
if shared.Settings.MINIMAL_RUNTIME:
exit_with_error('MINIMAL_RUNTIME reduces JS size, and is incompatible with STANDALONE_WASM which focuses on ignoring JS anyhow and being 100% wasm')
# the wasm must be runnable without the JS, so there cannot be anything that
# requires JS legalization
shared.Settings.LEGALIZE_JS_FFI = 0
# TODO(sbc): Remove WASM2JS here once the size regression it would introduce has been fixed.
if shared.Settings.USE_PTHREADS or shared.Settings.RELOCATABLE or shared.Settings.ASYNCIFY_LAZY_LOAD_CODE or shared.Settings.WASM2JS:
shared.Settings.IMPORTED_MEMORY = 1
if shared.Settings.WASM_BIGINT:
shared.Settings.LEGALIZE_JS_FFI = 0
shared.Settings.GENERATE_SOURCE_MAP = shared.Settings.DEBUG_LEVEL >= 4 and not shared.Settings.SINGLE_FILE
if options.use_closure_compiler == 2 and not shared.Settings.WASM2JS:
exit_with_error('closure compiler mode 2 assumes the code is asm.js, so not meaningful for wasm')
if any(s.startswith('MEM_INIT_METHOD=') for s in settings_changes):
exit_with_error('MEM_INIT_METHOD is not supported in wasm. Memory will be embedded in the wasm binary if threads are not used, and included in a separate file if threads are used.')
if shared.Settings.WASM2JS:
shared.Settings.MAYBE_WASM2JS = 1
# when using wasm2js, if the memory segments are in the wasm then they
# end up converted by wasm2js into base64 encoded JS. alternatively, we
# can use a .mem file like asm.js used to.
# generally we follow what the options tell us to do (which is to use
# a .mem file in most cases, since it is binary & compact). however, for
# pthreads we must keep the memory segments in the wasm as they will be
# passive segments which the .mem format cannot handle.
shared.Settings.MEM_INIT_IN_WASM = not options.memory_init_file or shared.Settings.SINGLE_FILE or shared.Settings.USE_PTHREADS
else:
# wasm includes the mem init in the wasm binary. The exception is
# wasm2js, which behaves more like js.
options.memory_init_file = True
shared.Settings.MEM_INIT_IN_WASM = True
# wasm side modules have suffix .wasm
if shared.Settings.SIDE_MODULE and target.endswith('.js'):
diagnostics.warning('emcc', 'output suffix .js requested, but wasm side modules are just wasm files; emitting only a .wasm, no .js')
sanitize = set()
for arg in newargs:
if arg.startswith('-fsanitize='):
sanitize.update(arg.split('=', 1)[1].split(','))
elif arg.startswith('-fno-sanitize='):
sanitize.difference_update(arg.split('=', 1)[1].split(','))
if sanitize:
shared.Settings.USE_OFFSET_CONVERTER = 1
shared.Settings.EXPORTED_FUNCTIONS += [
'_memalign',
'_emscripten_builtin_memalign',
'_emscripten_builtin_malloc',
'_emscripten_builtin_free',
'___heap_base',
'___global_base'
]
if shared.Settings.USE_OFFSET_CONVERTER and shared.Settings.USE_PTHREADS:
shared.Settings.EXPORTED_RUNTIME_METHODS += ['WasmOffsetConverter']
if sanitize & UBSAN_SANITIZERS:
if '-fsanitize-minimal-runtime' in newargs:
shared.Settings.UBSAN_RUNTIME = 1
else:
shared.Settings.UBSAN_RUNTIME = 2
if 'leak' in sanitize:
shared.Settings.USE_LSAN = 1
shared.Settings.EXIT_RUNTIME = 1
if shared.Settings.LINKABLE:
exit_with_error('LSan does not support dynamic linking')
if 'address' in sanitize:
shared.Settings.USE_ASAN = 1
if not shared.Settings.UBSAN_RUNTIME:
shared.Settings.UBSAN_RUNTIME = 2
shared.Settings.EXPORTED_FUNCTIONS += [
'_emscripten_builtin_memset',
'_asan_c_load_1', '_asan_c_load_1u',
'_asan_c_load_2', '_asan_c_load_2u',
'_asan_c_load_4', '_asan_c_load_4u',
'_asan_c_load_f', '_asan_c_load_d',
'_asan_c_store_1', '_asan_c_store_1u',
'_asan_c_store_2', '_asan_c_store_2u',
'_asan_c_store_4', '_asan_c_store_4u',
'_asan_c_store_f', '_asan_c_store_d',
]
if shared.Settings.ASAN_SHADOW_SIZE != -1:
diagnostics.warning('emcc', 'ASAN_SHADOW_SIZE is ignored and will be removed in a future release')
if shared.Settings.GLOBAL_BASE != -1:
exit_with_error("ASan does not support custom GLOBAL_BASE")
max_mem = shared.Settings.INITIAL_MEMORY
if shared.Settings.ALLOW_MEMORY_GROWTH:
max_mem = shared.Settings.MAXIMUM_MEMORY
if max_mem == -1:
exit_with_error('ASan requires a finite MAXIMUM_MEMORY')
shadow_size = max_mem // 8
shared.Settings.GLOBAL_BASE = shadow_size
if shared.Settings.SAFE_HEAP:
# SAFE_HEAP instruments ASan's shadow memory accesses.
# Since the shadow memory starts at 0, the act of accessing the shadow memory is detected
# by SAFE_HEAP as a null pointer dereference.
exit_with_error('ASan does not work with SAFE_HEAP')
if shared.Settings.LINKABLE:
exit_with_error('ASan does not support dynamic linking')
if sanitize and '-g4' in args:
shared.Settings.LOAD_SOURCE_MAP = 1
if shared.Settings.LOAD_SOURCE_MAP and shared.Settings.USE_PTHREADS:
shared.Settings.EXPORTED_RUNTIME_METHODS += ['WasmSourceMap']
if shared.Settings.GLOBAL_BASE == -1:
# default if nothing else sets it
# a higher global base is useful for optimizing load/store offsets, as it
# enables the --post-emscripten pass
shared.Settings.GLOBAL_BASE = 1024
# various settings require malloc/free support from JS
if shared.Settings.RELOCATABLE or \
shared.Settings.BUILD_AS_WORKER or \
shared.Settings.USE_WEBGPU or \
shared.Settings.USE_PTHREADS or \
shared.Settings.OFFSCREENCANVAS_SUPPORT or \
shared.Settings.LEGACY_GL_EMULATION or \
shared.Settings.DISABLE_EXCEPTION_CATCHING != 1 or \
shared.Settings.ASYNCIFY or \
shared.Settings.ASMFS or \
shared.Settings.DEMANGLE_SUPPORT or \
shared.Settings.FORCE_FILESYSTEM or \
shared.Settings.STB_IMAGE or \
shared.Settings.EMBIND or \
shared.Settings.FETCH or \
shared.Settings.PROXY_POSIX_SOCKETS or \
options.memory_profiler or \
sanitize:
shared.Settings.EXPORTED_FUNCTIONS += ['_malloc', '_free']
if shared.Settings.ASYNCIFY:
if not shared.Settings.ASYNCIFY_IGNORE_INDIRECT:
# if we are not ignoring indirect calls, then we must treat invoke_* as if
# they are indirect calls, since that is what they do - we can't see their
# targets statically.
shared.Settings.ASYNCIFY_IMPORTS += ['invoke_*']
# with pthreads we may call main through the __call_main mechanism, which can
# therefore reach anything in the program, so mark it as possibly causing a
# sleep (the asyncify analysis doesn't look through JS, just wasm, so it can't
# see what it itself calls)
if shared.Settings.USE_PTHREADS:
shared.Settings.ASYNCIFY_IMPORTS += ['__call_main']
# add the default imports
shared.Settings.ASYNCIFY_IMPORTS += DEFAULT_ASYNCIFY_IMPORTS
# return the full import name, including module. The name may
# already have a module prefix; if not, we assume it is "env".
def get_full_import_name(name):
if '.' in name:
return name
return 'env.' + name
shared.Settings.ASYNCIFY_IMPORTS = [get_full_import_name(i) for i in shared.Settings.ASYNCIFY_IMPORTS]
if shared.Settings.WASM2JS and shared.Settings.GENERATE_SOURCE_MAP:
exit_with_error('wasm2js does not support source maps yet (debug in wasm for now)')
if shared.Settings.NODE_CODE_CACHING:
if shared.Settings.WASM_ASYNC_COMPILATION:
exit_with_error('NODE_CODE_CACHING requires sync compilation (WASM_ASYNC_COMPILATION=0)')
if not shared.Settings.target_environment_may_be('node'):
exit_with_error('NODE_CODE_CACHING only works in node, but target environments do not include it')
if shared.Settings.SINGLE_FILE:
exit_with_error('NODE_CODE_CACHING saves a file on the side and is not compatible with SINGLE_FILE')
if options.tracing and shared.Settings.ALLOW_MEMORY_GROWTH:
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['emscripten_trace_report_memory_layout']
shared.Settings.EXPORTED_FUNCTIONS += ['_emscripten_stack_get_current',
'_emscripten_stack_get_base',
'_emscripten_stack_get_end']
# Any "pointers" passed to JS will now be i64's, in both modes.
if shared.Settings.MEMORY64:
if settings_key_changes.get('WASM_BIGINT') == '0':
exit_with_error('MEMORY64 is not compatible with WASM_BIGINT=0')
shared.Settings.WASM_BIGINT = 1
# check if we can address the 2GB mark and higher: either if we start at
# 2GB, or if we allow growth to either any amount or to 2GB or more.
if shared.Settings.INITIAL_MEMORY > 2 * 1024 * 1024 * 1024 or \
(shared.Settings.ALLOW_MEMORY_GROWTH and
(shared.Settings.MAXIMUM_MEMORY < 0 or
shared.Settings.MAXIMUM_MEMORY > 2 * 1024 * 1024 * 1024)):
shared.Settings.CAN_ADDRESS_2GB = 1
shared.Settings.EMSCRIPTEN_VERSION = shared.EMSCRIPTEN_VERSION
shared.Settings.PROFILING_FUNCS = options.profiling_funcs
shared.Settings.SOURCE_MAP_BASE = options.source_map_base or ''
## Compile source code to bitcode
logger.debug('compiling to bitcode')
temp_files = []
# exit block 'parse arguments and setup'
log_time('parse arguments and setup')
if options.post_link:
process_libraries(libs, lib_dirs, temp_files)
if len(input_files) != 1:
exit_with_error('--post-link requires a single input file')
post_link(options, input_files[0][1], wasm_target, target)
return 0
with ToolchainProfiler.profile_block('compile inputs'):
def is_link_flag(flag):
if flag.startswith('-nostdlib'):
return True
return flag.startswith(('-l', '-L', '-Wl,'))
CXX = [shared.CLANG_CXX]
CC = [shared.CLANG_CC]
if config.COMPILER_WRAPPER:
logger.debug('using compiler wrapper: %s', config.COMPILER_WRAPPER)
CXX.insert(0, config.COMPILER_WRAPPER)
CC.insert(0, config.COMPILER_WRAPPER)
if 'EMMAKEN_COMPILER' in os.environ:
diagnostics.warning('deprecated', '`EMMAKEN_COMPILER` is deprecated.\n'
'To use an alteranative LLVM build set `LLVM_ROOT` in the config file (or `EM_LLVM_ROOT` env var).\n'
'To wrap invocations of clang use the `COMPILER_WRAPPER` setting (or `EM_COMPILER_WRAPPER` env var.\n')
CXX = [os.environ['EMMAKEN_COMPILER']]
CC = [cxx_to_c_compiler(os.environ['EMMAKEN_COMPILER'])]
compile_args = [a for a in newargs if a and not is_link_flag(a)]
cflags = calc_cflags(options)
system_libs.ensure_sysroot()
system_libs.add_ports_cflags(cflags, shared.Settings)
def use_cxx(src):
if 'c++' in language_mode or run_via_emxx:
return True
# Next consider the filename
if src.endswith(C_ENDINGS + OBJC_ENDINGS):
return False
if src.endswith(CXX_ENDINGS):
return True
# Finally fall back to the default
if shared.Settings.DEFAULT_TO_CXX:
# Default to using C++ even when run as `emcc`.
# This means that emcc will act as a C++ linker when no source files are
# specified.
# This differs to clang and gcc where the default is always C unless run as
# clang++/g++.
return True
return False
def get_compiler(cxx):
if cxx:
return CXX
return CC
def get_clang_command(src_file):
per_file_cflags = shared.get_cflags(args)
return get_compiler(use_cxx(src_file)) + cflags + per_file_cflags + compile_args + [src_file]
def get_clang_command_asm(src_file):
asflags = shared.get_clang_flags()
return get_compiler(use_cxx(src_file)) + asflags + compile_args + [src_file]
# preprocessor-only (-E) support
if has_dash_E or '-M' in newargs or '-MM' in newargs or '-fsyntax-only' in newargs:
for input_file in [x[1] for x in input_files]:
cmd = get_clang_command(input_file)
if specified_target:
cmd += ['-o', specified_target]
# Do not compile, but just output the result from preprocessing stage or
# output the dependency rule. Warning: clang and gcc behave differently
# with -MF! (clang seems to not recognize it)
logger.debug(('just preprocessor ' if has_dash_E else 'just dependencies: ') + ' '.join(cmd))
shared.check_call(cmd)
return 0
# Precompiled headers support
if has_header_inputs:
headers = [header for _, header in input_files]
for header in headers:
if not header.endswith(HEADER_ENDINGS):
exit_with_error('cannot mix precompile headers with non-header inputs: ' + str(headers) + ' : ' + header)
cmd = get_clang_command(header)
if specified_target:
cmd += ['-o', specified_target]
logger.debug("running (for precompiled headers): " + cmd[0] + ' ' + ' '.join(cmd[1:]))
shared.check_call(cmd)
return 0
def get_object_filename(input_file):
if compile_only:
# In compile-only mode we don't use any temp file. The object files
# are written directly to their final output locations.
if specified_target:
assert len(input_files) == 1
return specified_target
else:
return unsuffixed_basename(input_file) + options.default_object_extension
else:
return in_temp(unsuffixed(uniquename(input_file)) + options.default_object_extension)
def compile_source_file(i, input_file):
logger.debug('compiling source file: ' + input_file)
output_file = get_object_filename(input_file)
temp_files.append((i, output_file))
if get_file_suffix(input_file) in ASSEMBLY_ENDINGS:
cmd = get_clang_command_asm(input_file)
else:
cmd = get_clang_command(input_file)
if not has_dash_c:
cmd += ['-c']
cmd += ['-o', output_file]
shared.check_call(cmd)
if output_file not in ('-', os.devnull):
assert os.path.exists(output_file)
# First, generate LLVM bitcode. For each input file, we get base.o with bitcode
for i, input_file in input_files:
file_suffix = get_file_suffix(input_file)
if file_suffix in SOURCE_ENDINGS + ASSEMBLY_ENDINGS or (has_dash_c and file_suffix == '.bc'):
compile_source_file(i, input_file)
elif file_suffix in DYNAMICLIB_ENDINGS:
logger.debug('using shared library: ' + input_file)
temp_files.append((i, input_file))
elif building.is_ar(input_file):
logger.debug('using static library: ' + input_file)
ensure_archive_index(input_file)
temp_files.append((i, input_file))
elif language_mode:
compile_source_file(i, input_file)
elif input_file == '-':
exit_with_error('-E or -x required when input is from standard input')
else:
# Default to assuming the inputs are object files and pass them to the linker
logger.debug('using object file: ' + input_file)
temp_files.append((i, input_file))
# exit block 'compile inputs'
log_time('compile inputs')
if compile_only:
logger.debug('stopping after compile phase')
for flag in link_flags:
diagnostics.warning('unused-command-line-argument', "argument unused during compilation: '%s'" % flag[1])
return 0
if specified_target and specified_target.startswith('-'):
exit_with_error('invalid output filename: `%s`' % specified_target)
ldflags = shared.emsdk_ldflags(newargs)
for f in ldflags:
add_link_flag(sys.maxsize, f)
using_lld = not (link_to_object and shared.Settings.LTO)
link_flags = filter_link_flags(link_flags, using_lld)
# Decide what we will link
consumed = process_libraries(libs, lib_dirs, temp_files)
# Filter out libraries that are actually JS libs
link_flags = [l for l in link_flags if l[0] not in consumed]
temp_files = filter_out_dynamic_libs(temp_files)
linker_inputs = [val for _, val in sorted(temp_files + link_flags)]
if link_to_object:
with ToolchainProfiler.profile_block('linking to object file'):
logger.debug('link_to_object: ' + str(linker_inputs) + ' -> ' + target)
building.link_to_object(linker_inputs, target)
logger.debug('stopping after linking to object file')
return 0
if final_suffix in ('.o', '.bc', '.so', '.dylib') and not shared.Settings.SIDE_MODULE:
diagnostics.warning('emcc', 'generating an executable with an object extension (%s). If you meant to build an object file please use `-c, `-r`, or `-shared`' % final_suffix)
## Continue on to create JavaScript
with ToolchainProfiler.profile_block('calculate system libraries'):
# link in ports and system libraries, if necessary
if not shared.Settings.SIDE_MODULE: # shared libraries/side modules link no C libraries, need them in parent
extra_files_to_link = system_libs.get_ports(shared.Settings)
if '-nostdlib' not in newargs and '-nodefaultlibs' not in newargs:
link_as_cxx = run_via_emxx
# Traditionally we always link as C++. For compatibility we continue to do that,
# unless running in strict mode.
if not shared.Settings.STRICT and '-nostdlib++' not in newargs:
link_as_cxx = True
extra_files_to_link += system_libs.calculate([f for _, f in sorted(temp_files)] + extra_files_to_link, link_as_cxx, forced=forced_stdlibs)
linker_inputs += extra_files_to_link
# exit block 'calculate system libraries'
log_time('calculate system libraries')
def dedup_list(lst):
rtn = []
for item in lst:
if item not in rtn:
rtn.append(item)
return rtn
# Make a final pass over shared.Settings.EXPORTED_FUNCTIONS to remove any
# duplication between functions added by the driver/libraries and function
# specified by the user
shared.Settings.EXPORTED_FUNCTIONS = dedup_list(shared.Settings.EXPORTED_FUNCTIONS)
with ToolchainProfiler.profile_block('link'):
logger.debug('linking: ' + str(linker_inputs))
# if EMCC_DEBUG=2 then we must link now, so the temp files are complete.
# if using the wasm backend, we might be using vanilla LLVM, which does not allow our
# fastcomp deferred linking opts.
# TODO: we could check if this is a fastcomp build, and still speed things up here
js_funcs = None
if shared.Settings.LLD_REPORT_UNDEFINED and shared.Settings.ERROR_ON_UNDEFINED_SYMBOLS:
js_funcs = get_all_js_syms()
log_time('JS symbol generation')
building.link_lld(linker_inputs, wasm_target, external_symbol_list=js_funcs)
# Special handling for when the user passed '-Wl,--version'. In this case the linker
# does not create the output file, but just prints its version and exits with 0.
if '--version' in linker_inputs:
return 0
# exit block 'link'
log_time('link')
if target == os.devnull:
# TODO(sbc): In theory we should really run the whole pipeline even if the output is
# /dev/null, but that will take some refactoring
return 0
# Perform post-link steps (unless we are running bare mode)
if options.oformat != OFormat.BARE:
post_link(options, wasm_target, wasm_target, target)
return 0
def post_link(options, in_wasm, wasm_target, target):
global final_js
target_basename = unsuffixed_basename(target)
if options.oformat != OFormat.WASM:
final_js = in_temp(target_basename + '.js')
if shared.Settings.MEM_INIT_IN_WASM:
memfile = None
else:
memfile = shared.replace_or_append_suffix(target, '.mem')
with ToolchainProfiler.profile_block('emscript'):
# Emscripten
logger.debug('emscript')
if options.memory_init_file:
shared.Settings.MEM_INIT_METHOD = 1
else:
assert shared.Settings.MEM_INIT_METHOD != 1
if embed_memfile():
shared.Settings.SUPPORT_BASE64_EMBEDDING = 1
emscripten.run(in_wasm, wasm_target, final_js, memfile)
save_intermediate('original')
# exit block 'emscript'
log_time('emscript)')
with ToolchainProfiler.profile_block('source transforms'):
# Embed and preload files
if len(options.preload_files) or len(options.embed_files):
logger.debug('setting up files')
file_args = ['--from-emcc', '--export-name=' + shared.Settings.EXPORT_NAME]
if len(options.preload_files):
file_args.append('--preload')
file_args += options.preload_files
if len(options.embed_files):
file_args.append('--embed')
file_args += options.embed_files
if len(options.exclude_files):
file_args.append('--exclude')
file_args += options.exclude_files
if options.use_preload_cache:
file_args.append('--use-preload-cache')
if shared.Settings.LZ4:
file_args.append('--lz4')
if options.use_preload_plugins:
file_args.append('--use-preload-plugins')
file_code = shared.check_call([shared.FILE_PACKAGER, unsuffixed(target) + '.data'] + file_args, stdout=PIPE).stdout
options.pre_js = js_manipulation.add_files_pre_js(options.pre_js, file_code)
# Apply pre and postjs files
if final_js and (options.pre_js or options.post_js):
logger.debug('applying pre/postjses')
src = open(final_js).read()
final_js += '.pp.js'
with open(final_js, 'w') as f:
# pre-js code goes right after the Module integration code (so it
# can use Module), we have a marker for it
f.write(do_replace(src, '// {{PRE_JSES}}', fix_windows_newlines(options.pre_js)))
f.write(fix_windows_newlines(options.post_js))
options.pre_js = src = options.post_js = None
save_intermediate('pre-post')
# Apply a source code transformation, if requested
if options.js_transform:
safe_copy(final_js, final_js + '.tr.js')
final_js += '.tr.js'
posix = not shared.WINDOWS
logger.debug('applying transform: %s', options.js_transform)
shared.check_call(building.remove_quotes(shlex.split(options.js_transform, posix=posix) + [os.path.abspath(final_js)]))
save_intermediate('transformed')
# exit block 'source transforms'
log_time('source transforms')
if memfile and not shared.Settings.MINIMAL_RUNTIME:
# MINIMAL_RUNTIME doesn't use `var memoryInitializer` but instead expects Module['mem'] to
# be loaded before the module. See src/postamble_minimal.js.
with ToolchainProfiler.profile_block('memory initializer'):
# For the wasm backend, we don't have any memory info in JS. All we need to do
# is set the memory initializer url.
src = open(final_js).read()
src = do_replace(src, '// {{MEM_INITIALIZER}}', 'var memoryInitializer = "%s";' % os.path.basename(memfile))
open(final_js + '.mem.js', 'w').write(src)
final_js += '.mem.js'
log_time('memory initializer')
with ToolchainProfiler.profile_block('binaryen'):
do_binaryen(target, options, wasm_target)
log_time('binaryen')
# If we are not emitting any JS then we are all done now
if options.oformat == OFormat.WASM:
return
with ToolchainProfiler.profile_block('final emitting'):
# Remove some trivial whitespace
# TODO: do not run when compress has already been done on all parts of the code
# src = open(final_js).read()
# src = re.sub(r'\n+[ \n]*\n+', '\n', src)
# open(final_js, 'w').write(src)
if shared.Settings.USE_PTHREADS:
target_dir = os.path.dirname(os.path.abspath(target))
worker_output = os.path.join(target_dir, shared.Settings.PTHREAD_WORKER_FILE)
with open(worker_output, 'w') as f:
f.write(shared.read_and_preprocess(shared.path_from_root('src', 'worker.js'), expand_macros=True))
# Minify the worker.js file in optimized builds
if (shared.Settings.OPT_LEVEL >= 1 or shared.Settings.SHRINK_LEVEL >= 1) and not shared.Settings.DEBUG_LEVEL:
minified_worker = building.acorn_optimizer(worker_output, ['minifyWhitespace'], return_output=True)
open(worker_output, 'w').write(minified_worker)
# track files that will need native eols
generated_text_files_with_native_eols = []
if shared.Settings.MODULARIZE:
modularize()
module_export_name_substitution()
# Run a final regex pass to clean up items that were not possible to optimize by Closure, or unoptimalities that were left behind
# by processing steps that occurred after Closure.
if shared.Settings.MINIMAL_RUNTIME == 2 and shared.Settings.USE_CLOSURE_COMPILER and shared.Settings.DEBUG_LEVEL == 0 and not shared.Settings.SINGLE_FILE:
# Process .js runtime file. Note that we need to handle the license text
# here, so that it will not confuse the hacky script.
shared.JS.handle_license(final_js)
shared.run_process([shared.PYTHON, shared.path_from_root('tools', 'hacky_postprocess_around_closure_limitations.py'), final_js])
# Apply pre and postjs files
if options.extern_pre_js or options.extern_post_js:
logger.debug('applying extern pre/postjses')
src = open(final_js).read()
final_js += '.epp.js'
with open(final_js, 'w') as f:
f.write(fix_windows_newlines(options.extern_pre_js))
f.write(src)
f.write(fix_windows_newlines(options.extern_post_js))
save_intermediate('extern-pre-post')
shared.JS.handle_license(final_js)
if options.oformat in (OFormat.JS, OFormat.MJS):
js_target = target
else:
js_target = unsuffixed(target) + '.js'
# The JS is now final. Move it to its final location
safe_move(final_js, js_target)
if not shared.Settings.SINGLE_FILE:
generated_text_files_with_native_eols += [js_target]
# If we were asked to also generate HTML, do that
if options.oformat == OFormat.HTML:
generate_html(target, options, js_target, target_basename,
wasm_target, memfile)
elif options.proxy_to_worker:
generate_worker_js(target, js_target, target_basename)
if embed_memfile() and memfile:
shared.try_delete(memfile)
if shared.Settings.SPLIT_MODULE:
diagnostics.warning('experimental', 'The SPLIT_MODULE setting is experimental and subject to change')
do_split_module(wasm_target)
for f in generated_text_files_with_native_eols:
tools.line_endings.convert_line_endings_in_file(f, os.linesep, options.output_eol)
if options.executable:
make_js_executable(js_target)
log_time('final emitting')
# exit block 'final emitting'
return 0
def version_string():
return 'emcc (Emscripten gcc/clang-like replacement + linker emulating GNU ld) %s' % shared.EMSCRIPTEN_VERSION
def parse_args(newargs):
options = EmccOptions()
settings_changes = []
user_js_defines = []
should_exit = False
eh_enabled = False
wasm_eh_enabled = False
skip = False
for i in range(len(newargs)):
if skip:
skip = False
continue
# On Windows Vista (and possibly others), excessive spaces in the command line
# leak into the items in this array, so trim e.g. 'foo.cpp ' -> 'foo.cpp'
newargs[i] = newargs[i].strip()
arg = newargs[i]
arg_value = None
def check_flag(value):
# Check for and consume a flag
if arg == value:
newargs[i] = ''
return True
return False
def check_arg(name):
nonlocal arg_value
if arg.startswith(name) and '=' in arg:
arg_value = arg.split('=', 1)[1]
newargs[i] = ''
return True
if arg == name:
if len(newargs) <= i + 1:
exit_with_error("option '%s' requires an argument" % arg)
arg_value = newargs[i + 1]
newargs[i] = ''
newargs[i + 1] = ''
return True
return False
def consume_arg():
nonlocal arg_value
assert arg_value is not None
rtn = arg_value
arg_value = None
return rtn
def consume_arg_file():
name = consume_arg()
if not os.path.isfile(name):
exit_with_error("'%s': file not found: '%s'" % (arg, name))
return name
if arg.startswith('-O'):
# Let -O default to -O2, which is what gcc does.
options.requested_level = arg[2:] or '2'
if options.requested_level == 's':
options.llvm_opts = ['-Os']
options.requested_level = 2
shared.Settings.SHRINK_LEVEL = 1
settings_changes.append('INLINING_LIMIT=50')
elif options.requested_level == 'z':
options.llvm_opts = ['-Oz']
options.requested_level = 2
shared.Settings.SHRINK_LEVEL = 2
settings_changes.append('INLINING_LIMIT=25')
shared.Settings.OPT_LEVEL = validate_arg_level(options.requested_level, 3, 'Invalid optimization level: ' + arg, clamp=True)
elif check_arg('--js-opts'):
logger.warning('--js-opts ignored when using llvm backend')
consume_arg()
elif check_arg('--llvm-opts'):
options.llvm_opts = parse_value(consume_arg())
elif arg.startswith('-flto'):
if '=' in arg:
shared.Settings.LTO = arg.split('=')[1]
else:
shared.Settings.LTO = "full"
elif check_arg('--llvm-lto'):
logger.warning('--llvm-lto ignored when using llvm backend')
consume_arg()
elif check_arg('--closure-args'):
args = consume_arg()
options.closure_args += shlex.split(args)
elif check_arg('--closure'):
options.use_closure_compiler = int(consume_arg())
elif check_arg('--js-transform'):
options.js_transform = consume_arg()
elif check_arg('--pre-js'):
options.pre_js += open(consume_arg_file()).read() + '\n'
elif check_arg('--post-js'):
options.post_js += open(consume_arg_file()).read() + '\n'
elif check_arg('--extern-pre-js'):
options.extern_pre_js += open(consume_arg_file()).read() + '\n'
elif check_arg('--extern-post-js'):
options.extern_post_js += open(consume_arg_file()).read() + '\n'
elif check_arg('--compiler-wrapper'):
config.COMPILER_WRAPPER = consume_arg()
elif check_flag('--post-link'):
options.post_link = True
elif check_arg('--oformat'):
formats = [f.lower() for f in OFormat.__members__]
fmt = consume_arg()
if fmt not in formats:
exit_with_error('invalid output format: `%s` (must be one of %s)' % (fmt, formats))
options.oformat = getattr(OFormat, fmt.upper())
elif check_arg('--minify'):
arg = consume_arg()
if arg != '0':
exit_with_error('0 is the only supported option for --minify; 1 has been deprecated')
shared.Settings.DEBUG_LEVEL = max(1, shared.Settings.DEBUG_LEVEL)
elif arg.startswith('-g'):
options.requested_debug = arg
requested_level = arg[2:] or '3'
if is_int(requested_level):
# the -gX value is the debug level (-g1, -g2, etc.)
shared.Settings.DEBUG_LEVEL = validate_arg_level(requested_level, 4, 'Invalid debug level: ' + arg)
# if we don't need to preserve LLVM debug info, do not keep this flag
# for clang
if shared.Settings.DEBUG_LEVEL < 3:
newargs[i] = ''
else:
# for 3+, report -g to clang as -g4 is not accepted
newargs[i] = '-g'
else:
if requested_level.startswith('force_dwarf'):
exit_with_error('gforce_dwarf was a temporary option and is no longer necessary (use -g)')
elif requested_level.startswith('separate-dwarf'):
# emit full DWARF but also emit it in a file on the side
newargs[i] = '-g'
# if a file is provided, use that; otherwise use the default location
# (note that we do not know the default location until all args have
# been parsed, so just note True for now).
if requested_level != 'separate-dwarf':
if not requested_level.startswith('separate-dwarf=') or requested_level.count('=') != 1:
exit_with_error('invalid -gseparate-dwarf=FILENAME notation')
shared.Settings.SEPARATE_DWARF = requested_level.split('=')[1]
else:
shared.Settings.SEPARATE_DWARF = True
# a non-integer level can be something like -gline-tables-only. keep
# the flag for the clang frontend to emit the appropriate DWARF info.
# set the emscripten debug level to 3 so that we do not remove that
# debug info during link (during compile, this does not make a
# difference).
shared.Settings.DEBUG_LEVEL = 3
elif check_flag('-profiling') or check_flag('--profiling'):
shared.Settings.DEBUG_LEVEL = max(shared.Settings.DEBUG_LEVEL, 2)
options.profiling = True
elif check_flag('-profiling-funcs') or check_flag('--profiling-funcs'):
options.profiling_funcs = True
elif newargs[i] == '--tracing' or newargs[i] == '--memoryprofiler':
if newargs[i] == '--memoryprofiler':
options.memory_profiler = True
options.tracing = True
newargs[i] = ''
settings_changes.append("EMSCRIPTEN_TRACING=1")
shared.Settings.SYSTEM_JS_LIBRARIES.append((0, shared.path_from_root('src', 'library_trace.js')))
elif check_flag('--emit-symbol-map'):
options.emit_symbol_map = True
shared.Settings.EMIT_SYMBOL_MAP = 1
elif check_flag('--bind'):
shared.Settings.EMBIND = 1
shared.Settings.SYSTEM_JS_LIBRARIES.append((0, shared.path_from_root('src', 'embind', 'emval.js')))
shared.Settings.SYSTEM_JS_LIBRARIES.append((0, shared.path_from_root('src', 'embind', 'embind.js')))
elif check_arg('--embed-file'):
options.embed_files.append(consume_arg())
elif check_arg('--preload-file'):
options.preload_files.append(consume_arg())
elif check_arg('--exclude-file'):
options.exclude_files.append(consume_arg())
elif check_flag('--use-preload-cache'):
options.use_preload_cache = True
elif check_flag('--no-heap-copy'):
diagnostics.warning('legacy-settings', 'ignoring legacy flag --no-heap-copy (that is the only mode supported now)')
elif check_flag('--use-preload-plugins'):
options.use_preload_plugins = True
elif check_flag('--ignore-dynamic-linking'):
options.ignore_dynamic_linking = True
elif arg == '-v':
shared.PRINT_STAGES = True
elif check_arg('--shell-file'):
options.shell_path = consume_arg_file()
elif check_arg('--source-map-base'):
options.source_map_base = consume_arg()
elif check_flag('--no-entry'):
options.no_entry = True
elif check_arg('--js-library'):
shared.Settings.SYSTEM_JS_LIBRARIES.append((i + 1, os.path.abspath(consume_arg_file())))
elif check_flag('--remove-duplicates'):
diagnostics.warning('legacy-settings', '--remove-duplicates is deprecated as it is no longer needed. If you cannot link without it, file a bug with a testcase')
elif check_flag('--jcache'):
logger.error('jcache is no longer supported')
elif check_flag('--clear-cache'):
logger.info('clearing cache as requested by --clear-cache')
shared.Cache.erase()
shared.check_sanity(force=True) # this is a good time for a sanity check
should_exit = True
elif check_flag('--clear-ports'):
logger.info('clearing ports and cache as requested by --clear-ports')
system_libs.Ports.erase()
shared.Cache.erase()
shared.check_sanity(force=True) # this is a good time for a sanity check
should_exit = True
elif check_flag('--check'):
print(version_string(), file=sys.stderr)
shared.check_sanity(force=True)
should_exit = True
elif check_flag('--show-ports'):
system_libs.show_ports()
should_exit = True
elif check_arg('--memory-init-file'):
options.memory_init_file = int(consume_arg())
elif check_flag('--proxy-to-worker'):
options.proxy_to_worker = True
elif check_arg('--valid-abspath'):
options.valid_abspaths.append(consume_arg())
elif check_flag('--separate-asm'):
exit_with_error('cannot --separate-asm with the wasm backend, since not emitting asm.js')
elif arg.startswith(('-I', '-L')):
path_name = arg[2:]
if os.path.isabs(path_name) and not is_valid_abspath(options, path_name):
# Of course an absolute path to a non-system-specific library or header
# is fine, and you can ignore this warning. The danger are system headers
# that are e.g. x86 specific and nonportable. The emscripten bundled
# headers are modified to be portable, local system ones are generally not.
diagnostics.warning(
'absolute-paths', '-I or -L of an absolute path "' + arg +
'" encountered. If this is to a local system header/library, it may '
'cause problems (local system files make sense for compiling natively '
'on your system, but not necessarily to JavaScript).')
elif check_flag('--emrun'):
options.emrun = True
elif check_flag('--cpuprofiler'):
options.cpu_profiler = True
elif check_flag('--threadprofiler'):
options.thread_profiler = True
settings_changes.append('PTHREADS_PROFILING=1')
elif arg == '-fno-exceptions':
shared.Settings.DISABLE_EXCEPTION_CATCHING = 1
shared.Settings.DISABLE_EXCEPTION_THROWING = 1
shared.Settings.EXCEPTION_HANDLING = 0
elif arg == '-fexceptions':
eh_enabled = True
elif arg == '-fwasm-exceptions':
wasm_eh_enabled = True
elif arg == '-fignore-exceptions':
shared.Settings.DISABLE_EXCEPTION_CATCHING = 1
elif check_arg('--default-obj-ext'):
options.default_object_extension = consume_arg()
if not options.default_object_extension.startswith('.'):
options.default_object_extension = '.' + options.default_object_extension
elif arg == '-fsanitize=cfi':
options.cfi = True
elif check_arg('--output_eol'):
style = consume_arg()
if style.lower() == 'windows':
options.output_eol = '\r\n'
elif style.lower() == 'linux':
options.output_eol = '\n'
else:
exit_with_error('Invalid value "' + style + '" to --output_eol!')
elif check_arg('--generate-config'):
optarg = consume_arg()
path = os.path.expanduser(optarg)
if os.path.exists(path):
exit_with_error('File ' + optarg + ' passed to --generate-config already exists!')
else:
config.generate_config(optarg)
should_exit = True
# Record USE_PTHREADS setting because it controls whether --shared-memory is passed to lld
elif arg == '-pthread':
settings_changes.append('USE_PTHREADS=1')
elif arg in ('-fno-diagnostics-color', '-fdiagnostics-color=never'):
colored_logger.disable()
diagnostics.color_enabled = False
elif arg == '-fno-rtti':
shared.Settings.USE_RTTI = 0
elif arg == '-frtti':
shared.Settings.USE_RTTI = 1
elif arg.startswith('-jsD'):
key = arg[4:]
if '=' in key:
key, value = key.split('=')
else:
value = '1'
if key in shared.Settings.attrs:
exit_with_error(arg + ': cannot change built-in settings values with a -jsD directive. Pass -s ' + key + '=' + value + ' instead!')
user_js_defines += [(key, value)]
newargs[i] = ''
elif check_flag('-shared'):
options.shared = True
elif check_flag('-r'):
options.relocatable = True
elif check_arg('-o'):
options.output_file = consume_arg()
elif arg.startswith('-o'):
options.output_file = arg[2:]
newargs[i] = ''
elif arg == '-mllvm':
# Ignore the next argument rather than trying to parse it. This is needed
# because llvm args could, for example, start with `-o` and we don't want
# to confuse that with a normal `-o` flag.
skip = True
if should_exit:
sys.exit(0)
# TODO Currently -fexceptions only means Emscripten EH. Switch to wasm
# exception handling by default when -fexceptions is given when wasm
# exception handling becomes stable.
if wasm_eh_enabled:
shared.Settings.EXCEPTION_HANDLING = 1
shared.Settings.DISABLE_EXCEPTION_THROWING = 1
shared.Settings.DISABLE_EXCEPTION_CATCHING = 1
elif eh_enabled:
shared.Settings.EXCEPTION_HANDLING = 0
shared.Settings.DISABLE_EXCEPTION_THROWING = 0
shared.Settings.DISABLE_EXCEPTION_CATCHING = 0
newargs = [a for a in newargs if a]
return options, settings_changes, user_js_defines, newargs
def emit_js_source_maps(target, js_transform_tempfiles):
logger.debug('generating source maps')
shared.run_js_tool(shared.path_from_root('tools', 'source-maps', 'sourcemapper.js'),
js_transform_tempfiles +
['--sourceRoot', os.getcwd(),
'--mapFileBaseName', target,
'--offset', '0'])
def do_binaryen(target, options, wasm_target):
global final_js
logger.debug('using binaryen')
if shared.Settings.GENERATE_SOURCE_MAP and not shared.Settings.SOURCE_MAP_BASE:
logger.warning("Wasm source map won't be usable in a browser without --source-map-base")
# whether we need to emit -g (function name debug info) in the final wasm
debug_info = shared.Settings.DEBUG_LEVEL >= 2 or options.profiling_funcs
# whether we need to emit -g in the intermediate binaryen invocations (but not necessarily at the very end).
# this is necessary for emitting a symbol map at the end.
intermediate_debug_info = bool(debug_info or options.emit_symbol_map or shared.Settings.ASYNCIFY_ONLY or shared.Settings.ASYNCIFY_REMOVE or shared.Settings.ASYNCIFY_ADD)
# note that wasm-ld can strip DWARF info for us too (--strip-debug), but it
# also strips the Names section. so to emit just the Names section we don't
# tell wasm-ld to strip anything, and we do it here.
strip_debug = shared.Settings.DEBUG_LEVEL < 3
strip_producers = not shared.Settings.EMIT_PRODUCERS_SECTION
# run wasm-opt if we have work for it: either passes, or if we are using
# source maps (which requires some extra processing to keep the source map
# but remove DWARF)
passes = get_binaryen_passes()
if passes or shared.Settings.GENERATE_SOURCE_MAP:
# if we need to strip certain sections, and we have wasm-opt passes
# to run anyhow, do it with them.
if strip_debug:
passes += ['--strip-debug']
if strip_producers:
passes += ['--strip-producers']
building.save_intermediate(wasm_target, 'pre-byn.wasm')
building.run_wasm_opt(wasm_target,
wasm_target,
args=passes,
debug=intermediate_debug_info)
else:
# we are not running wasm-opt. if we need to strip certain sections
# then do so using llvm-objcopy which is fast and does not rewrite the
# code (which is better for debug info)
if strip_debug or strip_producers:
building.save_intermediate(wasm_target, 'pre-strip.wasm')
building.strip(wasm_target, wasm_target, debug=strip_debug, producers=strip_producers)
if shared.Settings.EVAL_CTORS:
building.save_intermediate(wasm_target, 'pre-ctors.wasm')
building.eval_ctors(final_js, wasm_target, debug_info=intermediate_debug_info)
# after generating the wasm, do some final operations
if shared.Settings.SIDE_MODULE:
webassembly.add_dylink_section(wasm_target, shared.Settings.RUNTIME_LINKED_LIBS)
if shared.Settings.EMIT_EMSCRIPTEN_METADATA:
diagnostics.warning('deprecated', 'We hope to remove support for EMIT_EMSCRIPTEN_METADATA. See https://github.com/emscripten-core/emscripten/issues/12231')
webassembly.add_emscripten_metadata(wasm_target)
if final_js:
# pthreads memory growth requires some additional JS fixups
if shared.Settings.USE_PTHREADS and shared.Settings.ALLOW_MEMORY_GROWTH:
final_js = building.apply_wasm_memory_growth(final_js)
# >=2GB heap support requires pointers in JS to be unsigned. rather than
# require all pointers to be unsigned by default, which increases code size
# a little, keep them signed, and just unsign them here if we need that.
if shared.Settings.CAN_ADDRESS_2GB:
final_js = building.use_unsigned_pointers_in_js(final_js)
if shared.Settings.USE_ASAN:
final_js = building.instrument_js_for_asan(final_js)
if shared.Settings.SAFE_HEAP:
final_js = building.instrument_js_for_safe_heap(final_js)
if shared.Settings.OPT_LEVEL >= 2 and shared.Settings.DEBUG_LEVEL <= 2:
# minify the JS. Do not minify whitespace if Closure is used, so that
# Closure can print out readable error messages (Closure will then
# minify whitespace afterwards)
save_intermediate_with_wasm('preclean', wasm_target)
final_js = building.minify_wasm_js(js_file=final_js,
wasm_file=wasm_target,
expensive_optimizations=will_metadce(),
minify_whitespace=minify_whitespace() and not options.use_closure_compiler,
debug_info=intermediate_debug_info)
save_intermediate_with_wasm('postclean', wasm_target)
if shared.Settings.ASYNCIFY_LAZY_LOAD_CODE:
building.asyncify_lazy_load_code(wasm_target, debug=intermediate_debug_info)
def preprocess_wasm2js_script():
return read_and_preprocess(shared.path_from_root('src', 'wasm2js.js'), expand_macros=True)
def run_closure_compiler():
global final_js
final_js = building.closure_compiler(final_js, pretty=not minify_whitespace(),
extra_closure_args=options.closure_args)
save_intermediate_with_wasm('closure', wasm_target)
if final_js and options.use_closure_compiler:
run_closure_compiler()
symbols_file = shared.replace_or_append_suffix(target, '.symbols') if options.emit_symbol_map else None
if shared.Settings.WASM2JS:
if shared.Settings.WASM == 2:
wasm2js_template = wasm_target + '.js'
open(wasm2js_template, 'w').write(preprocess_wasm2js_script())
else:
wasm2js_template = final_js
wasm2js = building.wasm2js(wasm2js_template,
wasm_target,
opt_level=shared.Settings.OPT_LEVEL,
minify_whitespace=minify_whitespace(),
use_closure_compiler=options.use_closure_compiler,
debug_info=debug_info,
symbols_file=symbols_file)
if shared.Settings.WASM == 2:
safe_copy(wasm2js, wasm2js_template)
shared.try_delete(wasm2js)
if shared.Settings.WASM != 2:
final_js = wasm2js
# if we only target JS, we don't need the wasm any more
shared.try_delete(wasm_target)
save_intermediate('wasm2js')
# emit the final symbols, either in the binary or in a symbol map.
# this will also remove debug info if we only kept it around in the intermediate invocations.
# note that if we aren't emitting a binary (like in wasm2js) then we don't
# have anything to do here.
if options.emit_symbol_map and os.path.exists(wasm_target):
building.handle_final_wasm_symbols(wasm_file=wasm_target, symbols_file=symbols_file, debug_info=debug_info)
save_intermediate_with_wasm('symbolmap', wasm_target)
if shared.Settings.DEBUG_LEVEL >= 3 and shared.Settings.SEPARATE_DWARF and os.path.exists(wasm_target):
building.emit_debug_on_side(wasm_target, shared.Settings.SEPARATE_DWARF)
if shared.Settings.WASM2C:
wasm2c.do_wasm2c(wasm_target)
# replace placeholder strings with correct subresource locations
if final_js and shared.Settings.SINGLE_FILE and not shared.Settings.WASM2JS:
js = open(final_js).read()
if shared.Settings.MINIMAL_RUNTIME:
js = do_replace(js, '<<< WASM_BINARY_DATA >>>', base64_encode(open(wasm_target, 'rb').read()))
else:
js = do_replace(js, '<<< WASM_BINARY_FILE >>>', shared.JS.get_subresource_location(wasm_target))
shared.try_delete(wasm_target)
with open(final_js, 'w') as f:
f.write(js)
def modularize():
global final_js
logger.debug('Modularizing, assigning to var ' + shared.Settings.EXPORT_NAME)
src = open(final_js).read()
return_value = shared.Settings.EXPORT_NAME
if shared.Settings.WASM_ASYNC_COMPILATION:
return_value += '.ready'
if not shared.Settings.EXPORT_READY_PROMISE:
return_value = '{}'
src = '''
function(%(EXPORT_NAME)s) {
%(EXPORT_NAME)s = %(EXPORT_NAME)s || {};
%(src)s
return %(return_value)s
}
''' % {
'EXPORT_NAME': shared.Settings.EXPORT_NAME,
'src': src,
'return_value': return_value
}
if shared.Settings.MINIMAL_RUNTIME and not shared.Settings.USE_PTHREADS:
# Single threaded MINIMAL_RUNTIME programs do not need access to
# document.currentScript, so a simple export declaration is enough.
src = 'var %s=%s' % (shared.Settings.EXPORT_NAME, src)
else:
script_url_node = ""
# When MODULARIZE this JS may be executed later,
# after document.currentScript is gone, so we save it.
# In EXPORT_ES6 + USE_PTHREADS the 'thread' is actually an ES6 module webworker running in strict mode,
# so doesn't have access to 'document'. In this case use 'import.meta' instead.
if shared.Settings.EXPORT_ES6 and shared.Settings.USE_ES6_IMPORT_META:
script_url = "import.meta.url"
else:
script_url = "typeof document !== 'undefined' && document.currentScript ? document.currentScript.src : undefined"
if shared.Settings.target_environment_may_be('node'):
script_url_node = "if (typeof __filename !== 'undefined') _scriptDir = _scriptDir || __filename;"
src = '''
var %(EXPORT_NAME)s = (function() {
var _scriptDir = %(script_url)s;
%(script_url_node)s
return (%(src)s);
})();
''' % {
'EXPORT_NAME': shared.Settings.EXPORT_NAME,
'script_url': script_url,
'script_url_node': script_url_node,
'src': src
}
final_js += '.modular.js'
with open(final_js, 'w') as f:
f.write(src)
# Export using a UMD style export, or ES6 exports if selected
if shared.Settings.EXPORT_ES6:
f.write('export default %s;' % shared.Settings.EXPORT_NAME)
elif not shared.Settings.MINIMAL_RUNTIME:
f.write('''\
if (typeof exports === 'object' && typeof module === 'object')
module.exports = %(EXPORT_NAME)s;
else if (typeof define === 'function' && define['amd'])
define([], function() { return %(EXPORT_NAME)s; });
else if (typeof exports === 'object')
exports["%(EXPORT_NAME)s"] = %(EXPORT_NAME)s;
''' % {'EXPORT_NAME': shared.Settings.EXPORT_NAME})
save_intermediate('modularized')
def module_export_name_substitution():
global final_js
logger.debug('Private module export name substitution with ' + shared.Settings.EXPORT_NAME)
src = open(final_js).read()
final_js += '.module_export_name_substitution.js'
if shared.Settings.MINIMAL_RUNTIME:
# In MINIMAL_RUNTIME the Module object is always present to provide the .asm.js/.wasm content
replacement = shared.Settings.EXPORT_NAME
else:
replacement = "typeof %(EXPORT_NAME)s !== 'undefined' ? %(EXPORT_NAME)s : {}" % {"EXPORT_NAME": shared.Settings.EXPORT_NAME}
with open(final_js, 'w') as f:
src = re.sub(r'{\s*[\'"]?__EMSCRIPTEN_PRIVATE_MODULE_EXPORT_NAME_SUBSTITUTION__[\'"]?:\s*1\s*}', replacement, src)
# For Node.js and other shell environments, create an unminified Module object so that
# loading external .asm.js file that assigns to Module['asm'] works even when Closure is used.
if shared.Settings.MINIMAL_RUNTIME and (shared.Settings.target_environment_may_be('node') or shared.Settings.target_environment_may_be('shell')):
src = 'if(typeof Module==="undefined"){var Module={};}\n' + src
f.write(src)
save_intermediate('module_export_name_substitution')
def generate_traditional_runtime_html(target, options, js_target, target_basename,
wasm_target, memfile):
script = ScriptSource()
shell = read_and_preprocess(options.shell_path)
assert '{{{ SCRIPT }}}' in shell, 'HTML shell must contain {{{ SCRIPT }}} , see src/shell.html for an example'
base_js_target = os.path.basename(js_target)
if options.proxy_to_worker:
proxy_worker_filename = (shared.Settings.PROXY_TO_WORKER_FILENAME or target_basename) + '.js'
worker_js = worker_js_script(proxy_worker_filename)
script.inline = ('''
var filename = '%s';
if ((',' + window.location.search.substr(1) + ',').indexOf(',noProxy,') < 0) {
console.log('running code in a web worker');
''' % shared.JS.get_subresource_location(proxy_worker_filename)) + worker_js + '''
} else {
console.log('running code on the main thread');
var fileBytes = tryParseAsDataURI(filename);
var script = document.createElement('script');
if (fileBytes) {
script.innerHTML = intArrayToString(fileBytes);
} else {
script.src = filename;
}
document.body.appendChild(script);
}
'''
else:
# Normal code generation path
script.src = base_js_target
if not shared.Settings.SINGLE_FILE:
if memfile and not shared.Settings.MINIMAL_RUNTIME:
# start to load the memory init file in the HTML, in parallel with the JS
script.un_src()
script.inline = ('''
var memoryInitializer = '%s';
memoryInitializer = Module['locateFile'] ? Module['locateFile'](memoryInitializer, '') : memoryInitializer;
Module['memoryInitializerRequestURL'] = memoryInitializer;
var meminitXHR = Module['memoryInitializerRequest'] = new XMLHttpRequest();
meminitXHR.open('GET', memoryInitializer, true);
meminitXHR.responseType = 'arraybuffer';
meminitXHR.send(null);
''' % shared.JS.get_subresource_location(memfile)) + script.inline
if not shared.Settings.WASM_ASYNC_COMPILATION:
# We need to load the wasm file before anything else, it has to be synchronously ready TODO: optimize
script.un_src()
script.inline = '''
var wasmURL = '%s';
var wasmXHR = new XMLHttpRequest();
wasmXHR.open('GET', wasmURL, true);
wasmXHR.responseType = 'arraybuffer';
wasmXHR.onload = function() {
if (wasmXHR.status === 200 || wasmXHR.status === 0) {
Module.wasmBinary = wasmXHR.response;
} else {
var wasmURLBytes = tryParseAsDataURI(wasmURL);
if (wasmURLBytes) {
Module.wasmBinary = wasmURLBytes.buffer;
}
}
%s
};
wasmXHR.send(null);
''' % (shared.JS.get_subresource_location(wasm_target), script.inline)
if shared.Settings.WASM == 2:
# If target browser does not support WebAssembly, we need to load the .wasm.js file before the main .js file.
script.un_src()
script.inline = '''
function loadMainJs() {
%s
}
if (!window.WebAssembly || location.search.indexOf('_rwasm=0') > 0) {
// Current browser does not support WebAssembly, load the .wasm.js JavaScript fallback
// before the main JS runtime.
var wasm2js = document.createElement('script');
wasm2js.src = '%s';
wasm2js.onload = loadMainJs;
document.body.appendChild(wasm2js);
} else {
// Current browser supports Wasm, proceed with loading the main JS runtime.
loadMainJs();
}
''' % (script.inline, shared.JS.get_subresource_location(wasm_target) + '.js')
# when script.inline isn't empty, add required helper functions such as tryParseAsDataURI
if script.inline:
for filename in ('arrayUtils.js', 'base64Utils.js', 'URIUtils.js'):
content = read_and_preprocess(shared.path_from_root('src', filename))
script.inline = content + script.inline
script.inline = 'var ASSERTIONS = %s;\n%s' % (shared.Settings.ASSERTIONS, script.inline)
# inline script for SINGLE_FILE output
if shared.Settings.SINGLE_FILE:
js_contents = script.inline or ''
if script.src:
js_contents += open(js_target).read()
shared.try_delete(js_target)
script.src = None
script.inline = js_contents
html_contents = do_replace(shell, '{{{ SCRIPT }}}', script.replacement())
html_contents = tools.line_endings.convert_line_endings(html_contents, '\n', options.output_eol)
with open(target, 'wb') as f:
f.write(asbytes(html_contents))
def minify_html(filename):
if shared.Settings.DEBUG_LEVEL >= 2:
return
opts = []
# -g1 and greater retain whitespace and comments in source
if shared.Settings.DEBUG_LEVEL == 0:
opts += ['--collapse-whitespace',
'--collapse-inline-tag-whitespace',
'--remove-comments',
'--remove-tag-whitespace',
'--sort-attributes',
'--sort-class-name']
# -g2 and greater do not minify HTML at all
if shared.Settings.DEBUG_LEVEL <= 1:
opts += ['--decode-entities',
'--collapse-boolean-attributes',
'--remove-attribute-quotes',
'--remove-redundant-attributes',
'--remove-script-type-attributes',
'--remove-style-link-type-attributes',
'--use-short-doctype',
'--minify-css', 'true',
'--minify-js', 'true']
# html-minifier also has the following options, but they look unsafe for use:
# '--remove-optional-tags': removes e.g. <head></head> and <body></body> tags from the page.
# (Breaks at least browser.test_sdl2glshader)
# '--remove-empty-attributes': removes all attributes with whitespace-only values.
# (Breaks at least browser.test_asmfs_hello_file)
# '--remove-empty-elements': removes all elements with empty contents.
# (Breaks at least browser.test_asm_swapping)
logger.debug('minifying HTML file ' + filename)
size_before = os.path.getsize(filename)
start_time = time.time()
shared.check_call(shared.get_npm_cmd('html-minifier-terser') + [filename, '-o', filename] + opts, env=shared.env_with_node_in_path())
elapsed_time = time.time() - start_time
size_after = os.path.getsize(filename)
delta = size_after - size_before
logger.debug('HTML minification took {:.2f}'.format(elapsed_time) + ' seconds, and shrunk size of ' + filename + ' from ' + str(size_before) + ' to ' + str(size_after) + ' bytes, delta=' + str(delta) + ' ({:+.2f}%)'.format(delta * 100.0 / size_before))
def generate_html(target, options, js_target, target_basename,
wasm_target, memfile):
logger.debug('generating HTML')
if shared.Settings.EXPORT_NAME != 'Module' and \
not shared.Settings.MINIMAL_RUNTIME and \
options.shell_path == shared.path_from_root('src', 'shell.html'):
# the minimal runtime shell HTML is designed to support changing the export
# name, but the normal one does not support that currently
exit_with_error('Customizing EXPORT_NAME requires that the HTML be customized to use that name (see https://github.com/emscripten-core/emscripten/issues/10086)')
if shared.Settings.MINIMAL_RUNTIME:
generate_minimal_runtime_html(target, options, js_target, target_basename)
else:
generate_traditional_runtime_html(target, options, js_target, target_basename,
wasm_target, memfile)
if shared.Settings.MINIFY_HTML and (shared.Settings.OPT_LEVEL >= 1 or shared.Settings.SHRINK_LEVEL >= 1):
minify_html(target)
def generate_worker_js(target, js_target, target_basename):
# compiler output is embedded as base64
if shared.Settings.SINGLE_FILE:
proxy_worker_filename = shared.JS.get_subresource_location(js_target)
# compiler output goes in .worker.js file
else:
safe_move(js_target, unsuffixed(js_target) + '.worker.js')
worker_target_basename = target_basename + '.worker'
proxy_worker_filename = (shared.Settings.PROXY_TO_WORKER_FILENAME or worker_target_basename) + '.js'
target_contents = worker_js_script(proxy_worker_filename)
open(target, 'w').write(target_contents)
def worker_js_script(proxy_worker_filename):
web_gl_client_src = open(shared.path_from_root('src', 'webGLClient.js')).read()
idb_store_src = open(shared.path_from_root('src', 'IDBStore.js')).read()
proxy_client_src = open(shared.path_from_root('src', 'proxyClient.js')).read()
proxy_client_src = do_replace(proxy_client_src, '{{{ filename }}}', proxy_worker_filename)
proxy_client_src = do_replace(proxy_client_src, '{{{ IDBStore.js }}}', idb_store_src)
return web_gl_client_src + '\n' + proxy_client_src
def process_libraries(libs, lib_dirs, temp_files):
libraries = []
consumed = []
suffixes = list(STATICLIB_ENDINGS + DYNAMICLIB_ENDINGS)
# Find library files
for i, lib in libs:
logger.debug('looking for library "%s"', lib)
found = False
for prefix in LIB_PREFIXES:
for suff in suffixes:
name = prefix + lib + suff
for lib_dir in lib_dirs:
path = os.path.join(lib_dir, name)
if os.path.exists(path):
logger.debug('found library "%s" at %s', lib, path)
temp_files.append((i, path))
consumed.append(i)
found = True
break
if found:
break
if found:
break
if not found:
jslibs = building.map_to_js_libs(lib)
if jslibs is not None:
libraries += [(i, jslib) for jslib in jslibs]
consumed.append(i)
elif building.map_and_apply_to_settings(lib):
consumed.append(i)
shared.Settings.SYSTEM_JS_LIBRARIES += libraries
# At this point processing SYSTEM_JS_LIBRARIES is finished, no more items will be added to it.
# Sort the input list from (order, lib_name) pairs to a flat array in the right order.
shared.Settings.SYSTEM_JS_LIBRARIES.sort(key=lambda lib: lib[0])
shared.Settings.SYSTEM_JS_LIBRARIES = [lib[1] for lib in shared.Settings.SYSTEM_JS_LIBRARIES]
return consumed
class ScriptSource(object):
def __init__(self):
self.src = None # if set, we have a script to load with a src attribute
self.inline = None # if set, we have the contents of a script to write inline in a script
def un_src(self):
"""Use this if you want to modify the script and need it to be inline."""
if self.src is None:
return
self.inline = '''
var script = document.createElement('script');
script.src = "%s";
document.body.appendChild(script);
''' % self.src
self.src = None
def replacement(self):
"""Returns the script tag to replace the {{{ SCRIPT }}} tag in the target"""
assert (self.src or self.inline) and not (self.src and self.inline)
if self.src:
return '<script async type="text/javascript" src="%s"></script>' % quote(self.src)
else:
return '<script>\n%s\n</script>' % self.inline
def is_valid_abspath(options, path_name):
# Any path that is underneath the emscripten repository root must be ok.
if shared.path_from_root().replace('\\', '/') in path_name.replace('\\', '/'):
return True
def in_directory(root, child):
# make both path absolute
root = os.path.realpath(root)
child = os.path.realpath(child)
# return true, if the common prefix of both is equal to directory
# e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
return os.path.commonprefix([root, child]) == root
for valid_abspath in options.valid_abspaths:
if in_directory(valid_abspath, path_name):
return True
return False
def parse_value(text):
if not text:
return text
# Note that using response files can introduce whitespace, if the file
# has a newline at the end. For that reason, we rstrip() in relevant
# places here.
def parse_string_value(text):
first = text[0]
if first == "'" or first == '"':
text = text.rstrip()
assert text[-1] == text[0] and len(text) > 1, 'unclosed opened quoted string. expected final character to be "%s" and length to be greater than 1 in "%s"' % (text[0], text)
return text[1:-1]
return text
def parse_string_list_members(text):
sep = ','
values = text.split(sep)
result = []
index = 0
while True:
current = values[index].lstrip() # Cannot safely rstrip for cases like: "HERE-> ,"
if not len(current):
exit_with_error('string array should not contain an empty value')
first = current[0]
if not(first == "'" or first == '"'):
result.append(current.rstrip())
else:
start = index
while True: # Continue until closing quote found
if index >= len(values):
exit_with_error("unclosed quoted string. expected final character to be '%s' in '%s'" % (first, values[start]))
new = values[index].rstrip()
if new and new[-1] == first:
if start == index:
result.append(current.rstrip()[1:-1])
else:
result.append((current + sep + new)[1:-1])
break
else:
current += sep + values[index]
index += 1
index += 1
if index >= len(values):
break
return result
def parse_string_list(text):
text = text.rstrip()
if text[-1] != ']':
exit_with_error('unclosed opened string list. expected final character to be "]" in "%s"' % (text))
inner = text[1:-1]
if inner.strip() == "":
return []
return parse_string_list_members(inner)
if text[0] == '[':
# if json parsing fails, we fall back to our own parser, which can handle a few
# simpler syntaxes
try:
return json.loads(text)
except ValueError:
return parse_string_list(text)
try:
return int(text)
except ValueError:
return parse_string_value(text)
def validate_arg_level(level_string, max_level, err_msg, clamp=False):
try:
level = int(level_string)
except ValueError:
raise Exception(err_msg)
if clamp:
if level > max_level:
logger.warning("optimization level '-O" + level_string + "' is not supported; using '-O" + str(max_level) + "' instead")
level = max_level
if not 0 <= level <= max_level:
raise Exception(err_msg)
return level
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
def main(args):
start_time = time.time()
ret = run(args)
logger.debug('total time: %.2f seconds', (time.time() - start_time))
return ret
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
| 42.21571
| 379
| 0.689131
|
77bc5a300cbe4ae8a95e8478cac759ce85b445bf
| 11,501
|
py
|
Python
|
python/tvm/relay/backend/contrib/ethosu/te/convolution.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 1
|
2022-01-29T21:05:36.000Z
|
2022-01-29T21:05:36.000Z
|
python/tvm/relay/backend/contrib/ethosu/te/convolution.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/relay/backend/contrib/ethosu/te/convolution.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 1
|
2022-03-02T16:24:54.000Z
|
2022-03-02T16:24:54.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Tensor Expressions for convolutions for the NPU"""
from typing import Tuple, Union, List
import numpy as np # type: ignore
from tvm import te # type: ignore
from tvm.contrib.ethosu.cascader import TESubgraph, EthosuPart, Propagator, register_matcher
from .dma import dma_ofm_compute, dma_ifm_compute
def conv2d_compute(
ifm: te.Tensor,
weight: te.Tensor,
scale_bias: te.Tensor,
lut: te.Tensor,
ifm_scale: float,
ifm_zero_point: int,
weight_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
strides: Tuple[int, int],
padding: Tuple[int, int, int, int],
dilation: Union[Tuple[int, int], List[int]],
activation: str,
clip_min: int,
clip_max: int,
rounding_mode: str,
upscale: str,
ifm_layout: str,
ofm_layout: str,
) -> te.Tensor:
"""A compute operator representing the capabilities of a 2D convolution for the NPU.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
weight : te.Tensor
The weight tensor.
scale_bias : te.Tensor
The packed per-channel weight scale and bias tensor.
lut : te.Tensor
The look-up table of values to use if activation = "LUT".
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
weight_zero_point : int
The quantization zero point for the weight tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
strides : tuple
The 2 dimensional strides as (stride_height, stride_width).
padding : tuple
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
dilation : Union[Tuple[int, int], List[int]]
The 2 dimensional dilation as (dilation_height, dilation_width).
activation : str
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
clip_min : int
The minimum clipping value if activation = "CLIP".
clip_max : int
The maximum clipping value if activation = "CLIP".
rounding_mode : str
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
upscale : str
The 2x2 upscaling mode to apply to the Input Feature Map tensor.
"NONE" - no upscaling.
"NEAREST" - upscale using nearest neighbour.
"ZEROS" - upscale using zeros.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
ifm_layout : str
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
Returns
-------
te.Tensor
The OFM tensor.
"""
assert ifm.shape[0] == 1
assert ifm_layout in {"NHWC", "NHCWB16"}
assert ofm_layout in {"NHWC", "NHCWB16"}
padding = [int(v) for v in padding]
stride_h, stride_w = [int(v) for v in strides]
dilation_h, dilation_w = [int(v) for v in dilation]
ofm_channels, kernel_h, kernel_w, ifm_channels = [int(v) for v in weight.shape]
upscale_factor = 2 if upscale != "NONE" else 1
# Compute operation for the IFM DMA pipeline
dmaed_ifm = dma_ifm_compute(
ifm,
ifm_layout,
ifm_zero_point,
ifm_scale,
weight.shape[3],
padding,
upscale_factor,
)
# 2D Convolution compute operation
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
ofm_height = (dmaed_ifm.shape[1] - dilated_kernel_h) // stride_h + 1
ofm_width = (dmaed_ifm.shape[2] - dilated_kernel_w) // stride_w + 1
rc = te.reduce_axis((0, ifm_channels), name="rc")
rh = te.reduce_axis((0, kernel_h), name="ry")
rw = te.reduce_axis((0, kernel_w), name="rx")
conv2d_attrs = {
"op": "ethosu_conv2d",
"weight_zero_point": weight_zero_point,
"activation": activation,
"upscale": upscale,
"clip_min": clip_min,
"clip_max": clip_max,
"rounding_mode": rounding_mode,
"stride_h": stride_h,
"stride_w": stride_w,
"dilation_h": dilation_h,
"dilation_w": dilation_w,
}
has_lut = activation in ("TANH", "LUT", "SIGMOID")
# This is a trick to insert the LUT tensor into the TE graph if LUT is present
lut_expr = (lut[0] + lut[255]).astype(ifm.dtype) if has_lut else 0
# Add the LUT tensor to the attributes to be able to later tell which tensor is the LUT
if has_lut:
conv2d_attrs["lut"] = lut
conv = te.compute(
(1, ofm_height, ofm_width, ofm_channels),
lambda nn, hh, ww, cc: te.sum(
dmaed_ifm(
nn, hh * stride_h + rh * dilation_h, ww * stride_w + rw * dilation_w, rc
).astype(ifm.dtype)
* weight[cc, rh, rw, rc].astype(ifm.dtype)
# This is a trick to load 10 elements of the scale_bias at once, not accurate maths
+ (scale_bias[cc, 0] * scale_bias[cc, 9] + lut_expr).astype(ifm.dtype),
axis=[rh, rw, rc],
),
name="ethosu_conv2d",
attrs=conv2d_attrs,
)
nhwc_to_nhcwb16 = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1 / 16, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 16],
[0, 0, 0, 0, 1],
]
nhcwb16_to_nhwc = [
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 16, 0, 1, -16],
[0, 0, 0, 0, 0, 1],
]
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, stride_h, 0, 0, (dilated_kernel_h - stride_h)],
[0, 0, stride_w, 0, (dilated_kernel_w - stride_w)],
[0, 0, 0, 0, ifm_channels],
[0, 0, 0, 0, 1],
]
weights_matrix = [
[0, 0, 0, 1, 0],
[0, 0, 0, 0, kernel_h],
[0, 0, 0, 0, kernel_w],
[0, 0, 0, 0, ifm_channels],
[0, 0, 0, 0, 1],
]
bias_matrix = [
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 10],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
weights_matrix = np.matmul(weights_matrix, nhcwb16_to_nhwc).tolist()
bias_matrix = np.matmul(bias_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
ifm_propagator = Propagator(
ifm_matrix,
[0, -padding[0], -padding[1], 0]
if ifm_layout == "NHWC"
else [0, -padding[0], 0, -padding[1], 0],
)
weights_propagator = Propagator(
weights_matrix,
[0, 0, 0, 0],
)
bias_propagator = Propagator(
bias_matrix,
[0, 0],
)
propagator_attrs = {
"ifm_propagator": ifm_propagator,
"weights_propagator": weights_propagator,
"bias_propagator": bias_propagator,
}
# Compute operation for the OFM DMA pipeline
dma_ofm = dma_ofm_compute(
conv, ofm_layout, ofm_zero_point, ofm_scale, ofm_channels, attrs=propagator_attrs
)
return dma_ofm
@register_matcher
def match_ethosu_conv2d(output_tensor, device_config):
"""Match a Tensor Expression corresponding to an NPU Conv2D.
If the Tensor Expression matches, an EthosuPart will be created that models the
matched Tensor Expression. Otherwise, None will be returned.
Parameters
----------
output_tensor : tvm.te.Tensor
The tensor to attempt to match with.
device_config : EthosuDeviceConfig
Target device configuration
Returns
-------
Union[None, EthosuPart]
The created EthosuPart if there was a match, otherwise None.
"""
write = output_tensor
if write.op.name != "ethosu_write":
return None
convert_to_nhcwb16 = write.op.input_tensors[0]
if convert_to_nhcwb16.op.name != "ethosu_convert_to_nhcwb16":
return None
conv2d = convert_to_nhcwb16.op.input_tensors[0]
if conv2d.op.name != "ethosu_conv2d":
return None
pad = conv2d.op.input_tensors[0]
if pad.op.name != "ethosu_pad":
return None
upscale = pad.op.input_tensors[0]
if upscale.op.name != "ethosu_upscale":
return None
convert_to_nhwc = upscale.op.input_tensors[0]
if convert_to_nhwc.op.name != "ethosu_convert_to_nhwc":
return None
read = convert_to_nhwc.op.input_tensors[0]
if read.op.name != "ethosu_read":
return None
input_tensors = [
read.op.input_tensors[0],
conv2d.op.input_tensors[1],
conv2d.op.input_tensors[2],
]
subgraph = TESubgraph(input_tensors, output_tensor)
propagators = [
write.op.attrs["ifm_propagator"],
write.op.attrs["weights_propagator"],
write.op.attrs["bias_propagator"],
]
ifm_dtype = input_tensors[0].dtype
ofm_dtype = output_tensor.dtype
ifm_channels = int(input_tensors[0].shape[3])
ofm_channels, kernel_height, kernel_width = (int(axis) for axis in input_tensors[1].shape[0:3])
kernel_elements = kernel_height * kernel_width
is_part_kernel = device_config.is_partkernel(
conv2d.op.name, ifm_channels, ifm_dtype, kernel_elements
)
subkernels = len(
device_config.get_kernel_steps(
conv2d.op.name, kernel_height, kernel_width, ifm_dtype, is_part_kernel
)
)
output_layout = convert_to_nhcwb16.op.attrs["layout"]
input_layout = convert_to_nhwc.op.attrs["layout"]
output_quantum = device_config.get_output_quantum(output_layout)
valid_block_configs = device_config.get_valid_block_configs(
propagators[0],
conv2d.op.attrs,
output_tensor.shape,
ofm_channels,
ifm_channels,
output_layout,
input_layout,
ifm_dtype,
ofm_dtype,
kernel_height,
kernel_width,
)
return EthosuPart(
subgraph,
propagators,
output_quantum,
subkernels,
valid_block_configs,
1,
)
| 33.727273
| 99
| 0.628554
|
3e43d40eff0e879910697be7edf94938f33f1711
| 15,919
|
py
|
Python
|
poem/core/data_utils_test.py
|
wondercha/google-research
|
1c3d958e8f99aad52d48a0665bc5e8446ad87d8d
|
[
"Apache-2.0"
] | 3
|
2021-01-18T04:46:49.000Z
|
2021-03-05T09:21:40.000Z
|
poem/core/data_utils_test.py
|
Alfaxad/google-research
|
2c0043ecd507e75e2df9973a3015daf9253e1467
|
[
"Apache-2.0"
] | 7
|
2021-11-10T19:44:38.000Z
|
2022-02-10T06:48:39.000Z
|
poem/core/data_utils_test.py
|
Alfaxad/google-research
|
2c0043ecd507e75e2df9973a3015daf9253e1467
|
[
"Apache-2.0"
] | 4
|
2021-02-08T10:25:45.000Z
|
2021-04-17T14:46:26.000Z
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for data utility functions."""
import numpy as np
import tensorflow as tf
from poem.core import data_utils
class DataUtilsTest(tf.test.TestCase):
def test_flatten_last_dims(self):
# Shape = [2, 3, 4].
x = tf.constant([[[1, 2, 3, 4], [11, 12, 13, 14], [21, 22, 23, 24]],
[[31, 32, 33, 34], [41, 42, 43, 44], [51, 52, 53, 54]]])
flattened_x = data_utils.flatten_last_dims(x, num_last_dims=2)
self.assertAllEqual(flattened_x,
[[1, 2, 3, 4, 11, 12, 13, 14, 21, 22, 23, 24],
[31, 32, 33, 34, 41, 42, 43, 44, 51, 52, 53, 54]])
def test_flatten_first_dims(self):
# Shape = [1, 2, 3, 4, 1].
x = tf.constant([[[[[1], [2], [3], [4]], [[11], [12], [13], [14]],
[[21], [22], [23], [24]]],
[[[31], [32], [33], [34]], [[41], [42], [43], [44]],
[[51], [52], [53], [54]]]]])
flattened_x = data_utils.flatten_first_dims(x, num_last_dims_to_keep=2)
self.assertAllEqual(flattened_x,
[[[1], [2], [3], [4]], [[11], [12], [13], [14]],
[[21], [22], [23], [24]], [[31], [32], [33], [34]],
[[41], [42], [43], [44]], [[51], [52], [53], [54]]])
def test_tile_first_dims(self):
# Shape = [1, 2, 1].
x = tf.constant([[[1], [2]]])
tiled_x = data_utils.tile_first_dims(x, first_dim_multiples=[2, 2])
self.assertAllEqual(tiled_x, [[[1], [2], [1], [2]], [[1], [2], [1], [2]]])
def test_tile_last_dims(self):
# Shape = [2, 1, 2, 1].
x = tf.constant([[[[1], [2]]], [[[3], [4]]]])
tiled_x = data_utils.tile_last_dims(x, last_dim_multiples=[2, 2])
self.assertAllEqual(tiled_x, [[[[1, 1], [2, 2], [1, 1], [2, 2]]],
[[[3, 3], [4, 4], [3, 3], [4, 4]]]])
def test_recursively_expand_dims(self):
# Shape = [2, 3].
x = tf.constant([[1, 2, 3], [4, 5, 6]])
# Shape = [2, 1, 3, 1]
expanded_x = data_utils.recursively_expand_dims(x, axes=[-1, 1])
self.assertAllEqual(expanded_x, [[[[1], [2], [3]]], [[[4], [5], [6]]]])
def test_reshape_by_last_dims(self):
# Shape = [2, 4, 1].
x = tf.constant([[[1], [2], [3], [4]], [[5], [6], [7], [8]]])
# Shape = [2, 2, 2]
reshaped_x = data_utils.reshape_by_last_dims(x, last_dim_shape=[2, 2])
self.assertAllEqual(reshaped_x, [[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
def test_reduce_mean(self):
# Shape = [2, 3, 2].
tensor = tf.constant([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[7.0, 8.0], [9.0, 10.0], [11.0, 12.0]]])
# Shape = [2, 3, 1].
weights = tf.constant([[[1.0], [0.0], [1.0]], [[0.0], [1.0], [0.0]]])
# Shape = [2, 1, 2].
means = data_utils.reduce_weighted_mean(
tensor, weights, axis=-2, keepdims=True)
self.assertAllClose(means, [[[3.0, 4.0]], [[9.0, 10.0]]])
def test_sample_gaussians(self):
means = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
stddevs = tf.constant([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
samples = data_utils.sample_gaussians(
means, stddevs, num_samples=10, seed=1)
self.assertAllClose(
samples,
[[[0.9188682, 2.2969198, 3.0195987], [0.75572956, 2.0198498, 3.1773672],
[1.0592823, 1.5754141, 2.783131], [0.99437296, 2.1287088, 2.9207027],
[1.1856633, 2.1135683, 2.8851492], [0.85146564, 2.2523541, 2.9924083],
[0.973537, 2.3065627, 2.4771068], [0.95621073, 1.886798, 3.0962007],
[1.1132832, 1.5443486, 3.1448436], [0.8687291, 2.0713701, 2.480915]],
[[3.983933, 5.449831, 5.1716466], [4.592585, 4.8772526, 5.5604115],
[3.9216413, 5.035854, 6.3797884], [3.3715236, 5.6646905, 5.2959795],
[4.012618, 5.2385263, 6.262165], [3.8732765, 4.774625, 4.9163604],
[4.0499597, 4.6146727, 5.552255], [3.8872187, 4.020592, 5.7974334],
[4.4120793, 5.756701, 6.1350946], [3.8857353, 5.134413, 7.0477266]]])
def test_compute_lower_percentile_means(self):
# Shape = [2, 3, 3].
x = tf.constant([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
[[11.0, 12.0, 13.0], [14.0, 15.0, 16.0],
[17.0, 18.0, 19.0]]])
lower_half = data_utils.compute_lower_percentile_means(x, axis=[-2, -1])
self.assertAllClose(lower_half, [3.0, 13.0])
def test_mix_batch_singletons(self):
# Shape = [8, 1, 2].
lhs_batch = tf.constant([
[[[1.0, 1.01]]],
[[[1.1, 1.11]]],
[[[3.0, 3.01]]],
[[[3.1, 3.11]]],
[[[5.0, 5.01]]],
[[[5.1, 5.11]]],
[[[7.0, 7.01]]],
[[[7.1, 7.11]]],
])
rhs_batch = tf.constant([
[[[11.0, 11.01]]],
[[[11.1, 11.11]]],
[[[13.0, 13.01]]],
[[[13.1, 13.11]]],
[[[15.0, 15.01]]],
[[[15.1, 15.11]]],
[[[17.0, 17.01]]],
[[[17.1, 17.11]]],
])
# Shape = [8, 1].
assignment = tf.constant([[True], [True], [False], [False], [True], [True],
[False], [False]])
mixed_batch = data_utils.mix_batch([lhs_batch], [rhs_batch],
axis=1,
assignment=assignment)[0]
self.assertAllEqual(
mixed_batch,
np.array([
[[[1.0, 1.01]]],
[[[1.1, 1.11]]],
[[[13.0, 13.01]]],
[[[13.1, 13.11]]],
[[[5.0, 5.01]]],
[[[5.1, 5.11]]],
[[[17.0, 17.01]]],
[[[17.1, 17.11]]],
],
dtype=np.float32))
def test_mix_batch_pairs(self):
# Shape = [8, 2, 2].
lhs_batch = tf.constant([
[[[1.0, 1.01]], [[2.0, 2.01]]],
[[[1.1, 1.11]], [[2.1, 2.11]]],
[[[3.0, 3.01]], [[4.0, 4.01]]],
[[[3.1, 3.11]], [[4.1, 4.11]]],
[[[5.0, 5.01]], [[6.0, 6.01]]],
[[[5.1, 5.11]], [[6.1, 6.11]]],
[[[7.0, 7.01]], [[8.0, 8.01]]],
[[[7.1, 7.11]], [[8.1, 8.11]]],
])
rhs_batch = tf.constant([
[[[11.0, 11.01]], [[12.0, 12.01]]],
[[[11.1, 11.11]], [[12.1, 12.11]]],
[[[13.0, 13.01]], [[14.0, 14.01]]],
[[[13.1, 13.11]], [[14.1, 14.11]]],
[[[15.0, 15.01]], [[16.0, 16.01]]],
[[[15.1, 15.11]], [[16.1, 16.11]]],
[[[17.0, 17.01]], [[18.0, 18.01]]],
[[[17.1, 17.11]], [[18.1, 18.11]]],
])
# Shape = [8, 2].
assignment = tf.constant([[True, True], [True, True], [False, False],
[False, False], [True, False], [True, False],
[False, True], [False, True]])
mixed_batch = data_utils.mix_batch([lhs_batch], [rhs_batch],
axis=1,
assignment=assignment)[0]
self.assertAllEqual(
mixed_batch,
np.array([
[[[1.0, 1.01]], [[2.0, 2.01]]],
[[[1.1, 1.11]], [[2.1, 2.11]]],
[[[13.0, 13.01]], [[14.0, 14.01]]],
[[[13.1, 13.11]], [[14.1, 14.11]]],
[[[5.0, 5.01]], [[16.0, 16.01]]],
[[[5.1, 5.11]], [[16.1, 16.11]]],
[[[17.0, 17.01]], [[8.0, 8.01]]],
[[[17.1, 17.11]], [[8.1, 8.11]]],
],
dtype=np.float32))
def test_mix_batch_pair_lists(self):
lhs_batches, rhs_batches = [None, None], [None, None]
# Shape = [4, 3, 2, 1].
lhs_batches[0] = tf.constant([
[[[1.0], [1.1]], [[1.2], [1.3]], [[1.4], [1.5]]],
[[[2.0], [2.1]], [[2.2], [2.3]], [[2.4], [2.5]]],
[[[3.0], [3.1]], [[3.2], [3.3]], [[3.4], [3.5]]],
[[[4.0], [4.1]], [[4.2], [4.3]], [[4.4], [4.5]]],
])
rhs_batches[0] = tf.constant([
[[[11.0], [11.1]], [[11.2], [11.3]], [[11.4], [11.5]]],
[[[12.0], [12.1]], [[12.2], [12.3]], [[12.4], [12.5]]],
[[[13.0], [13.1]], [[13.2], [13.3]], [[13.4], [13.5]]],
[[[14.0], [14.1]], [[14.2], [14.3]], [[14.4], [14.5]]],
])
# Shape = [4, 3, 2, 2, 1].
lhs_batches[1] = tf.constant([[[[[1.0], [10.0]], [[1.1], [10.1]]],
[[[1.2], [10.2]], [[1.3], [10.3]]],
[[[1.4], [10.4]], [[1.5], [10.5]]]],
[[[[2.0], [20.0]], [[2.1], [20.1]]],
[[[2.2], [20.2]], [[2.3], [20.3]]],
[[[2.4], [20.4]], [[2.5], [20.5]]]],
[[[[3.0], [30.0]], [[3.1], [30.1]]],
[[[3.2], [30.2]], [[3.3], [30.3]]],
[[[3.4], [30.4]], [[3.5], [30.5]]]],
[[[[4.0], [40.0]], [[4.1], [40.1]]],
[[[4.2], [40.2]], [[4.3], [40.3]]],
[[[4.4], [40.4]], [[4.5], [40.5]]]]])
rhs_batches[1] = tf.constant([[[[[11.0], [110.0]], [[11.1], [110.1]]],
[[[11.2], [110.2]], [[11.3], [110.3]]],
[[[11.4], [110.4]], [[11.5], [110.5]]]],
[[[[12.0], [120.0]], [[12.1], [120.1]]],
[[[12.2], [120.2]], [[12.3], [120.3]]],
[[[12.4], [120.4]], [[12.5], [120.5]]]],
[[[[13.0], [130.0]], [[13.1], [130.1]]],
[[[13.2], [130.2]], [[13.3], [130.3]]],
[[[13.4], [130.4]], [[13.5], [130.5]]]],
[[[[14.0], [140.0]], [[14.1], [140.1]]],
[[[14.2], [140.2]], [[14.3], [140.3]]],
[[[14.4], [140.4]], [[14.5], [140.5]]]]])
# Shape = [4, 1, 2].
assignment = tf.constant([[[True, True]], [[True, False]], [[False, True]],
[[False, False]]])
mixed_batches = data_utils.mix_batch(
lhs_batches, rhs_batches, axis=2, assignment=assignment)
self.assertLen(mixed_batches, 2)
self.assertAllEqual(
mixed_batches[0],
# Shape = [4, 3, 2, 1].
np.array([[[[1.0], [1.1]], [[1.2], [1.3]], [[1.4], [1.5]]],
[[[2.0], [12.1]], [[2.2], [12.3]], [[2.4], [12.5]]],
[[[13.0], [3.1]], [[13.2], [3.3]], [[13.4], [3.5]]],
[[[14.0], [14.1]], [[14.2], [14.3]], [[14.4], [14.5]]]],
dtype=np.float32))
self.assertAllEqual(
mixed_batches[1],
# Shape = [4, 3, 2, 2, 1].
np.array([[[[[1.0], [10.0]], [[1.1], [10.1]]],
[[[1.2], [10.2]], [[1.3], [10.3]]],
[[[1.4], [10.4]], [[1.5], [10.5]]]],
[[[[2.0], [20.0]], [[12.1], [120.1]]],
[[[2.2], [20.2]], [[12.3], [120.3]]],
[[[2.4], [20.4]], [[12.5], [120.5]]]],
[[[[13.0], [130.0]], [[3.1], [30.1]]],
[[[13.2], [130.2]], [[3.3], [30.3]]],
[[[13.4], [130.4]], [[3.5], [30.5]]]],
[[[[14.0], [140.0]], [[14.1], [140.1]]],
[[[14.2], [140.2]], [[14.3], [140.3]]],
[[[14.4], [140.4]], [[14.5], [140.5]]]]],
dtype=np.float32))
def test_mix_batch_pairs_with_idle_dim(self):
# Shape = [4, 3, 2, 1].
lhs_batch = tf.constant([[[[1.0], [1.1]], [[1.2], [1.3]], [[1.4], [1.5]]],
[[[2.0], [2.1]], [[2.2], [2.3]], [[2.4], [2.5]]],
[[[3.0], [3.1]], [[3.2], [3.3]], [[3.4], [3.5]]],
[[[4.0], [4.1]], [[4.2], [4.3]], [[4.4], [4.5]]]])
rhs_batch = tf.constant([[[[11.0], [11.1]], [[11.2], [11.3]],
[[11.4], [11.5]]],
[[[12.0], [12.1]], [[12.2], [12.3]],
[[12.4], [12.5]]],
[[[13.0], [13.1]], [[13.2], [13.3]],
[[13.4], [13.5]]],
[[[14.0], [14.1]], [[14.2], [14.3]],
[[14.4], [14.5]]]])
# Shape = [4, 1, 2].
assignment = tf.constant([[[True, True]], [[True, False]], [[False, True]],
[[False, False]]])
mixed_batch = data_utils.mix_batch([lhs_batch], [rhs_batch],
axis=2,
assignment=assignment)[0]
self.assertAllEqual(
mixed_batch,
np.array([[[[1.0], [1.1]], [[1.2], [1.3]], [[1.4], [1.5]]],
[[[2.0], [12.1]], [[2.2], [12.3]], [[2.4], [12.5]]],
[[[13.0], [3.1]], [[13.2], [3.3]], [[13.4], [3.5]]],
[[[14.0], [14.1]], [[14.2], [14.3]], [[14.4], [14.5]]]],
dtype=np.float32))
def test_mix_batch_random(self):
# Shape = [8, 7, 6, 5, 4, 3, 2].
lhs_batch = tf.ones([8, 7, 6, 5, 4, 3, 2])
rhs_batch = tf.zeros([8, 7, 6, 5, 4, 3, 2])
mixed_batch = data_utils.mix_batch([lhs_batch], [rhs_batch], axis=3)[0]
# We only trivially test the shape to make sure the code runs.
self.assertAllEqual(mixed_batch.shape.as_list(), [8, 7, 6, 5, 4, 3, 2])
def test_shuffle_batches(self):
# Shape = [3, 2].
tensor_1 = tf.constant([[1, 2], [3, 4], [5, 6]])
tensor_2 = tf.constant([[11, 12], [13, 14], [15, 16]])
tensor_3 = tf.constant([[21, 22], [23, 24], [25, 26]])
shuffled_tensor_1, shuffled_tensor_2, shuffled_tensor_3 = (
data_utils.shuffle_batches([tensor_1, tensor_2, tensor_3]))
tensor_diff_21 = shuffled_tensor_2 - shuffled_tensor_1
tensor_diff_31 = shuffled_tensor_3 - shuffled_tensor_1
self.assertAllEqual(tensor_diff_21, [[10, 10], [10, 10], [10, 10]])
self.assertAllEqual(tensor_diff_31, [[20, 20], [20, 20], [20, 20]])
def test_update_sub_tensor(self):
# Shape = [3, 5, 2].
x = tf.constant([
[[0.0, 1.0], [2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]],
[[10.0, 11.0], [12.0, 13.0], [14.0, 15.0], [16.0, 17.0], [18.0, 19.0]],
[[20.0, 21.0], [22.0, 23.0], [24.0, 25.0], [26.0, 27.0], [28.0, 29.0]],
])
def update_func(sub_tensor):
# Shape = [3, 3, 2].
delta = tf.constant([[[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]],
[[0.7, 0.8], [0.9, 1.0], [1.1, 1.2]],
[[1.3, 1.4], [1.5, 1.6], [1.7, 1.8]]])
return sub_tensor + delta
updated_x = data_utils.update_sub_tensor(
x, indices=[0, 2, 4], axis=-2, update_func=update_func)
self.assertAllClose(updated_x, [
[[0.1, 1.2], [2.0, 3.0], [4.3, 5.4], [6.0, 7.0], [8.5, 9.6]],
[[10.7, 11.8], [12.0, 13.0], [14.9, 16.0], [16.0, 17.0], [19.1, 20.2]],
[[21.3, 22.4], [22.0, 23.0], [25.5, 26.6], [26.0, 27.0], [29.7, 30.8]],
])
def test_merge_dict(self):
target_dict = {'a': 1, 'b': 2}
source_dict = {'c': 4}
data_utils.merge_dict(source_dict, target_dict)
self.assertDictEqual(target_dict, {'a': 1, 'b': 2, 'c': 4})
target_dict = {'a': 1, 'b': 2}
source_dict = {'b': 3, 'c': 4}
with self.assertRaisesRegexp(ValueError, 'Key conflict: `b`.'):
data_utils.merge_dict(source_dict, target_dict)
if __name__ == '__main__':
tf.test.main()
| 44.46648
| 80
| 0.410579
|
98ae178c95ca0c677f003d6f8bea1e64063a7edb
| 1,611
|
py
|
Python
|
zappa_sentry/__init__.py
|
jneves/zappa-sentry
|
d521c95de77c5be9a492c2c4e3df2a4be030195f
|
[
"MIT"
] | 27
|
2018-02-25T13:43:14.000Z
|
2022-03-09T23:39:12.000Z
|
zappa_sentry/__init__.py
|
jneves/zappa-sentry
|
d521c95de77c5be9a492c2c4e3df2a4be030195f
|
[
"MIT"
] | 8
|
2018-03-15T12:05:52.000Z
|
2021-02-02T22:16:43.000Z
|
zappa_sentry/__init__.py
|
jneves/zappa-sentry
|
d521c95de77c5be9a492c2c4e3df2a4be030195f
|
[
"MIT"
] | 3
|
2018-09-25T08:51:21.000Z
|
2020-05-19T14:33:03.000Z
|
# -*- coding: utf-8 -*-
from configparser import ConfigParser
import json
import os
import sentry_sdk
from sentry_sdk import capture_exception, configure_scope
def unhandled_exceptions(e, event, context):
"Exception handler reports exceptions to sentry but does not capture them."
sentry_config = ConfigParser(os.environ)
sentry_sdk.init(sentry_config.get('DEFAULT', 'SENTRY_DSN'))
with configure_scope() as scope:
try:
package_info_file = open('package_info.json', 'r')
package_info = json.load(package_info_file)
package_info_file.close()
for key, value in package_info.items():
scope.set_tag(key, value)
except OSError:
# not deployed, probably a test
pass
if 'httpMethod' in event:
scope.set_tag('http_method', event['httpMethod'])
scope.set_tag('path', event['path'])
if 'headers' in event:
if 'Host' in event['headers']:
scope.set_tag('host', event['headers']['Host'])
if 'User-Agent' in event['headers']:
scope.set_tag('user_agent', event['headers']['User-Agent'])
if 'requestContext' in event and 'stage' in event['requestContext']:
scope.set_tag('stage', event['requestContext']['stage'])
scope.set_extra('event', event)
capture_exception(e)
return False
def capture_exceptions(e, event, context):
"Exception handler that makes exceptions disappear after processing them."
unhandled_exceptions(e, event, context)
return True
| 31.588235
| 79
| 0.639354
|
95fceb09137691d3e856dabc97931276bde12f99
| 1,956
|
py
|
Python
|
blog/views.py
|
tawhidularefindcc/test-profilewebapp
|
834c3f76bccf46ad98db36cca09657c4fa25178f
|
[
"MIT"
] | null | null | null |
blog/views.py
|
tawhidularefindcc/test-profilewebapp
|
834c3f76bccf46ad98db36cca09657c4fa25178f
|
[
"MIT"
] | null | null | null |
blog/views.py
|
tawhidularefindcc/test-profilewebapp
|
834c3f76bccf46ad98db36cca09657c4fa25178f
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.models import User
from django.shortcuts import render, get_object_or_404
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from .models import Post
def home(request):
context = {
'posts': Post.objects.all()
}
return render(request, 'blog/home.html', context)
class PostListView(ListView):
model = Post
template_name = 'blog/home.html'
context_object_name = 'posts'
ordering = ['-date_posted']
paginate_by = 5
class UserPostListView(ListView):
model = Post
template_name = 'blog/user_posts.html'
context_object_name = 'posts'
paginate_by = 5
def get_queryset(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
return Post.objects.filter(author=user).order_by('-date_posted')
class PostDetailView(DetailView):
model = Post
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['title', 'content']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Post
fields = ['title', 'content']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Post
success_url = '/'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
def about(request):
return render(request, 'blog/about.html', {'title': 'About'})
| 27.942857
| 89
| 0.686605
|
1ca2f45739d13a0f8ac3388566800c18d28990c6
| 18,061
|
py
|
Python
|
tests/providers/google/cloud/operators/test_vision.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | 5
|
2020-07-17T07:33:58.000Z
|
2022-03-02T06:23:47.000Z
|
tests/providers/google/cloud/operators/test_vision.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | 7
|
2020-06-03T14:55:17.000Z
|
2021-12-30T00:01:50.000Z
|
tests/providers/google/cloud/operators/test_vision.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | 12
|
2020-01-09T14:02:39.000Z
|
2022-01-24T07:18:51.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import mock
from google.api_core.exceptions import AlreadyExists
from google.cloud.vision_v1.types import Product, ProductSet, ReferenceImage
from airflow.providers.google.cloud.operators.vision import (
CloudVisionAddProductToProductSetOperator, CloudVisionCreateProductOperator,
CloudVisionCreateProductSetOperator, CloudVisionCreateReferenceImageOperator,
CloudVisionDeleteProductOperator, CloudVisionDeleteProductSetOperator,
CloudVisionDetectImageLabelsOperator, CloudVisionDetectImageSafeSearchOperator,
CloudVisionDetectTextOperator, CloudVisionGetProductOperator, CloudVisionGetProductSetOperator,
CloudVisionImageAnnotateOperator, CloudVisionRemoveProductFromProductSetOperator,
CloudVisionTextDetectOperator, CloudVisionUpdateProductOperator, CloudVisionUpdateProductSetOperator,
)
PRODUCTSET_TEST = ProductSet(display_name='Test Product Set')
PRODUCTSET_ID_TEST = 'my-productset'
PRODUCT_TEST = Product(display_name='My Product 1', product_category='toys')
PRODUCT_ID_TEST = 'my-product'
REFERENCE_IMAGE_TEST = ReferenceImage(uri='gs://bucket_name/file.txt')
REFERENCE_IMAGE_ID_TEST = 'my-reference-image'
ANNOTATE_REQUEST_TEST = {'image': {'source': {'image_uri': 'https://foo.com/image.jpg'}}}
ANNOTATE_REQUEST_BATCH_TEST = [
{'image': {'source': {'image_uri': 'https://foo.com/image1.jpg'}}},
{'image': {'source': {'image_uri': 'https://foo.com/image2.jpg'}}}
]
LOCATION_TEST = 'europe-west1'
GCP_CONN_ID = 'google_cloud_default'
DETECT_TEST_IMAGE = {"source": {"image_uri": "test_uri"}}
class TestCloudVisionProductSetCreate(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.create_product_set.return_value = {}
op = CloudVisionCreateProductSetOperator(
location=LOCATION_TEST, product_set=PRODUCTSET_TEST, task_id='id'
)
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.create_product_set.assert_called_once_with(
location=LOCATION_TEST,
product_set=PRODUCTSET_TEST,
product_set_id=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_already_exists(self, mock_hook):
mock_hook.return_value.create_product_set.side_effect = AlreadyExists(message='')
# Exception AlreadyExists not raised, caught in the operator's execute() - idempotence
op = CloudVisionCreateProductSetOperator(
location=LOCATION_TEST,
product_set=PRODUCTSET_TEST,
product_set_id=PRODUCTSET_ID_TEST,
project_id='mock-project-id',
task_id='id',
)
result = op.execute(None)
self.assertEqual(PRODUCTSET_ID_TEST, result)
class TestCloudVisionProductSetUpdate(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.update_product_set.return_value = {}
op = CloudVisionUpdateProductSetOperator(
location=LOCATION_TEST, product_set=PRODUCTSET_TEST, task_id='id'
)
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.update_product_set.assert_called_once_with(
location=LOCATION_TEST,
product_set=PRODUCTSET_TEST,
product_set_id=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
update_mask=None,
)
class TestCloudVisionProductSetGet(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.get_product_set.return_value = {}
op = CloudVisionGetProductSetOperator(
location=LOCATION_TEST, product_set_id=PRODUCTSET_ID_TEST, task_id='id'
)
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.get_product_set.assert_called_once_with(
location=LOCATION_TEST,
product_set_id=PRODUCTSET_ID_TEST,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudVisionProductSetDelete(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.delete_product_set.return_value = {}
op = CloudVisionDeleteProductSetOperator(
location=LOCATION_TEST, product_set_id=PRODUCTSET_ID_TEST, task_id='id'
)
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.delete_product_set.assert_called_once_with(
location=LOCATION_TEST,
product_set_id=PRODUCTSET_ID_TEST,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudVisionProductCreate(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.create_product.return_value = {}
op = CloudVisionCreateProductOperator(location=LOCATION_TEST, product=PRODUCT_TEST, task_id='id')
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.create_product.assert_called_once_with(
location=LOCATION_TEST,
product=PRODUCT_TEST,
product_id=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_already_exists(self, mock_hook):
mock_hook.return_value.create_product.side_effect = AlreadyExists(message='')
# Exception AlreadyExists not raised, caught in the operator's execute() - idempotence
op = CloudVisionCreateProductOperator(
location=LOCATION_TEST,
product=PRODUCT_TEST,
product_id=PRODUCT_ID_TEST,
project_id='mock-project-id',
task_id='id',
)
result = op.execute(None)
self.assertEqual(PRODUCT_ID_TEST, result)
class TestCloudVisionProductGet(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.get_product.return_value = {}
op = CloudVisionGetProductOperator(location=LOCATION_TEST, product_id=PRODUCT_ID_TEST, task_id='id')
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.get_product.assert_called_once_with(
location=LOCATION_TEST,
product_id=PRODUCT_ID_TEST,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudVisionProductUpdate(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.update_product.return_value = {}
op = CloudVisionUpdateProductOperator(location=LOCATION_TEST, product=PRODUCT_TEST, task_id='id')
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.update_product.assert_called_once_with(
location=LOCATION_TEST,
product=PRODUCT_TEST,
product_id=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
update_mask=None,
)
class TestCloudVisionProductDelete(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.delete_product.return_value = {}
op = CloudVisionDeleteProductOperator(
location=LOCATION_TEST, product_id=PRODUCT_ID_TEST, task_id='id'
)
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.delete_product.assert_called_once_with(
location=LOCATION_TEST,
product_id=PRODUCT_ID_TEST,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudVisionReferenceImageCreate(unittest.TestCase):
@mock.patch(
'airflow.providers.google.cloud.operators.vision.CloudVisionHook',
)
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.create_reference_image.return_value = {}
op = CloudVisionCreateReferenceImageOperator(
location=LOCATION_TEST,
product_id=PRODUCT_ID_TEST,
reference_image=REFERENCE_IMAGE_TEST,
task_id='id',
)
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.create_reference_image.assert_called_once_with(
location=LOCATION_TEST,
product_id=PRODUCT_ID_TEST,
reference_image=REFERENCE_IMAGE_TEST,
reference_image_id=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(
'airflow.providers.google.cloud.operators.vision.CloudVisionHook',
**{'return_value.create_reference_image.side_effect': AlreadyExists("MESSAGe")}
)
def test_already_exists(self, mock_hook):
# Exception AlreadyExists not raised, caught in the operator's execute() - idempotence
op = CloudVisionCreateReferenceImageOperator(
location=LOCATION_TEST,
product_id=PRODUCT_ID_TEST,
reference_image=REFERENCE_IMAGE_TEST,
task_id='id',
)
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.create_reference_image.assert_called_once_with(
location=LOCATION_TEST,
product_id=PRODUCT_ID_TEST,
reference_image=REFERENCE_IMAGE_TEST,
reference_image_id=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudVisionAddProductToProductSetOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
op = CloudVisionAddProductToProductSetOperator(
location=LOCATION_TEST,
product_set_id=PRODUCTSET_ID_TEST,
product_id=PRODUCT_ID_TEST,
task_id='id',
)
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.add_product_to_product_set.assert_called_once_with(
product_set_id=PRODUCTSET_ID_TEST,
product_id=PRODUCT_ID_TEST,
location=LOCATION_TEST,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudVisionRemoveProductFromProductSetOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
op = CloudVisionRemoveProductFromProductSetOperator(
location=LOCATION_TEST,
product_set_id=PRODUCTSET_ID_TEST,
product_id=PRODUCT_ID_TEST,
task_id='id',
)
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.remove_product_from_product_set.assert_called_once_with(
product_set_id=PRODUCTSET_ID_TEST,
product_id=PRODUCT_ID_TEST,
location=LOCATION_TEST,
project_id=None,
retry=None,
timeout=None,
metadata=None,
)
class TestCloudVisionAnnotateImageOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path_for_one_image(self, mock_hook):
op = CloudVisionImageAnnotateOperator(request=ANNOTATE_REQUEST_TEST, task_id='id')
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.annotate_image.assert_called_once_with(
request=ANNOTATE_REQUEST_TEST, retry=None, timeout=None
)
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path_for_batch(self, mock_hook):
op = CloudVisionImageAnnotateOperator(request=ANNOTATE_REQUEST_BATCH_TEST, task_id='id')
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.batch_annotate_images.assert_called_once_with(
requests=ANNOTATE_REQUEST_BATCH_TEST, retry=None, timeout=None
)
class TestCloudVisionDetectTextOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_minimal_green_path(self, mock_hook):
op = CloudVisionDetectTextOperator(image=DETECT_TEST_IMAGE, task_id="id")
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.text_detection.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None, additional_properties=None
)
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_additional_params(self, mock_hook):
op = CloudVisionDetectTextOperator(
image=DETECT_TEST_IMAGE,
task_id="id",
language_hints="pl",
web_detection_params={'param': 'test'},
additional_properties={
'image_context': {
'additional_property_1': 'add_1'
},
'additional_property_2': 'add_2'
}
)
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.text_detection.assert_called_once_with(
image=DETECT_TEST_IMAGE,
max_results=None,
retry=None,
timeout=None,
additional_properties={
'additional_property_2': 'add_2',
'image_context': {
'language_hints': 'pl',
'additional_property_1': 'add_1',
'web_detection_params': {
'param': 'test'
}
}
}
)
class TestCloudVisionDetectDocumentTextOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_minimal_green_path(self, mock_hook):
op = CloudVisionTextDetectOperator(image=DETECT_TEST_IMAGE, task_id="id")
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.document_text_detection.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None, additional_properties=None
)
class TestCloudVisionDetectImageLabelsOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_minimal_green_path(self, mock_hook):
op = CloudVisionDetectImageLabelsOperator(image=DETECT_TEST_IMAGE, task_id="id")
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.label_detection.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None, additional_properties=None
)
class TestCloudVisionDetectImageSafeSearchOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_minimal_green_path(self, mock_hook):
op = CloudVisionDetectImageSafeSearchOperator(image=DETECT_TEST_IMAGE, task_id="id")
op.execute(context=None)
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID)
mock_hook.return_value.safe_search_detection.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None, additional_properties=None
)
| 42.798578
| 108
| 0.701678
|
7f748ede3247db2ff3481069e8c83c66c5fee907
| 699
|
py
|
Python
|
AoC20/day_18/a.py
|
a-recknagel/AoC20
|
7aa0013dc745bdc0ad357e1168b212bd065fd092
|
[
"MIT"
] | null | null | null |
AoC20/day_18/a.py
|
a-recknagel/AoC20
|
7aa0013dc745bdc0ad357e1168b212bd065fd092
|
[
"MIT"
] | null | null | null |
AoC20/day_18/a.py
|
a-recknagel/AoC20
|
7aa0013dc745bdc0ad357e1168b212bd065fd092
|
[
"MIT"
] | null | null | null |
import re
from AoC20.day_18 import data as data
class Int:
def __init__(self, val):
self.val = val
def __add__(self, other):
return Int(self.val + other.val)
def __sub__(self, other):
return Int(self.val * other.val)
def __repr__(self):
return str(self.val)
def solve(inp):
solutions = []
for line in inp:
# replace * with -, so that the precedence is now equal
line = line.replace("*", "-")
# turn all ints (e.g. 3, 5, 8) into fake ints (e.g. Int(3), Int(5), Int(8))
line = re.sub(r"(\d)", r"Int(\1)", line)
solutions.append(int(str(eval(line))))
return sum(solutions)
print(solve(data))
| 21.84375
| 83
| 0.575107
|
5a04b48333b2ff589ea59c281ab212bf202ad72c
| 1,586
|
py
|
Python
|
.mywaflib/waflib/Tools/suncxx.py
|
nkuhlen/log-transform-kernel-density
|
377e9196b95cfdc2d53db50796a030eb5d0f019a
|
[
"BSD-3-Clause"
] | 2
|
2016-05-15T19:20:55.000Z
|
2016-07-04T18:38:20.000Z
|
.mywaflib/waflib/Tools/suncxx.py
|
michaelkilchenmann/Quantitative_Economic_History
|
c64b5ad877eb995629d4b31f8a8500e7565a953a
|
[
"BSD-3-Clause"
] | null | null | null |
.mywaflib/waflib/Tools/suncxx.py
|
michaelkilchenmann/Quantitative_Economic_History
|
c64b5ad877eb995629d4b31f8a8500e7565a953a
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
# Ralf Habacker, 2006 (rh)
from waflib.Tools import ccroot, ar
from waflib.Configure import conf
@conf
def find_sxx(conf):
"""
Detect the sun C++ compiler
"""
v = conf.env
cc = conf.find_program(['CC', 'c++'], var='CXX')
try:
conf.cmd_and_log(cc + ['-flags'])
except Exception:
conf.fatal('%r is not a Sun compiler' % cc)
v.CXX_NAME = 'sun'
conf.get_suncc_version(cc)
@conf
def sxx_common_flags(conf):
"""
Flags required for executing the sun C++ compiler
"""
v = conf.env
v['CXX_SRC_F'] = []
v['CXX_TGT_F'] = ['-c', '-o']
# linker
if not v['LINK_CXX']: v['LINK_CXX'] = v['CXX']
v['CXXLNK_SRC_F'] = []
v['CXXLNK_TGT_F'] = ['-o']
v['CPPPATH_ST'] = '-I%s'
v['DEFINES_ST'] = '-D%s'
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STLIB_ST'] = '-l%s'
v['STLIBPATH_ST'] = '-L%s'
v['SONAME_ST'] = '-Wl,-h,%s'
v['SHLIB_MARKER'] = '-Bdynamic'
v['STLIB_MARKER'] = '-Bstatic'
# program
v['cxxprogram_PATTERN'] = '%s'
# shared library
v['CXXFLAGS_cxxshlib'] = ['-xcode=pic32', '-DPIC']
v['LINKFLAGS_cxxshlib'] = ['-G']
v['cxxshlib_PATTERN'] = 'lib%s.so'
# static lib
v['LINKFLAGS_cxxstlib'] = ['-Bstatic']
v['cxxstlib_PATTERN'] = 'lib%s.a'
def configure(conf):
conf.find_sxx()
conf.find_ar()
conf.sxx_common_flags()
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags()
| 22.985507
| 65
| 0.579445
|
f255e348e1d0b5cf3085bdb50e3a6d31c933e75a
| 1,995
|
py
|
Python
|
type_utils.py
|
chao-ji/np-auto-diff
|
4b21a8895755072f8e0390ce4cc96b17308f8a90
|
[
"MIT"
] | 2
|
2021-03-07T09:43:38.000Z
|
2021-09-09T21:20:19.000Z
|
type_utils.py
|
chao-ji/reverse-mode-auto-differentiation
|
4b21a8895755072f8e0390ce4cc96b17308f8a90
|
[
"MIT"
] | null | null | null |
type_utils.py
|
chao-ji/reverse-mode-auto-differentiation
|
4b21a8895755072f8e0390ce4cc96b17308f8a90
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2017 Chao Ji
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import numpy as np
def isint(i):
"""Returns if input is of integer type."""
return isinstance(i, (int, np.int8, np.int16, np.int32, np.int64))
def is_variable(v):
"""Returns if input is of variable type."""
return type(v).__name__ == 'Variable'
def to_numpy(val):
"""Converts input to a numpy array."""
if not isinstance(val, np.ndarray):
val = np.array(val).astype(np.float32)
if np.isnan(val) is True or np.isnan(val).any():
raise ValueError('ALL value passed as input to `Constant` node must be '
'determined at graph construction type')
return val
def is_numeric(val):
"""Returns if input is of numeric type."""
return isinstance(val, (int, float,
np.int8, np.int16, np.int32, np.int16, np.int64,
np.float16, np.float32, np.float64))
| 43.369565
| 80
| 0.690226
|
b279acf4f3f562dc142d1088a83f8f18c31529d9
| 1,874
|
py
|
Python
|
modules/tools/record_parse_save/parse_lidar.py
|
zhouyao4321/apollo
|
fe34840f78dc0e9c92850a805eede0ac2e512295
|
[
"Apache-2.0"
] | 1
|
2020-04-24T06:14:14.000Z
|
2020-04-24T06:14:14.000Z
|
modules/tools/record_parse_save/parse_lidar.py
|
Geonhee-LEE/apollo
|
3ceaec8843ffe0fb8e821089b2e4708eaf1fa455
|
[
"Apache-2.0"
] | 2
|
2018-09-10T03:13:39.000Z
|
2018-10-15T16:31:14.000Z
|
modules/tools/record_parse_save/parse_lidar.py
|
Geonhee-LEE/apollo
|
3ceaec8843ffe0fb8e821089b2e4708eaf1fa455
|
[
"Apache-2.0"
] | 3
|
2018-02-03T04:56:22.000Z
|
2018-09-14T10:50:53.000Z
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
function to parse lidar data from *.record files, created using Apollo-Auto
parsed data is saved to *.txt file, for each scan
current implementation for:
* Velodyne VLS-128 lidar
"""
import os
import sys
from cyber_py import cyber
from cyber_py import record
from modules.drivers.proto.pointcloud_pb2 import PointCloud
def parse_data(channelname, msg, out_folder):
"""
"""
msg_lidar = PointCloud()
msg_lidar.ParseFromString(msg)
nPts = len(msg_lidar.point)
pcd = []
for j in range(nPts):
p = msg_lidar.point[j]
pcd.append([p.x, p.y, p.z, p.intensity])
tstamp = msg_lidar.measurement_time
temp_time = str(tstamp).split('.')
if len(temp_time[1]) == 1:
temp_time1_adj = temp_time[1] + '0'
else:
temp_time1_adj = temp_time[1]
pcd_time = temp_time[0] + '_' + temp_time1_adj
pcd_filename = "pcd_" + pcd_time + ".txt"
with open(out_folder + pcd_filename, 'w') as outfile:
for item in pcd:
data = str(item)[1:-1]
outfile.write("%s\n" % data)
return tstamp
| 28.393939
| 79
| 0.628068
|
8f99be59a22f5a53ab57c30a9600bd9a2b1fd246
| 1,443
|
py
|
Python
|
htmlclean.py
|
eyepod101/datagen
|
25b13058f641fca1892232b1d968fdd4dbf7d62f
|
[
"MIT"
] | null | null | null |
htmlclean.py
|
eyepod101/datagen
|
25b13058f641fca1892232b1d968fdd4dbf7d62f
|
[
"MIT"
] | null | null | null |
htmlclean.py
|
eyepod101/datagen
|
25b13058f641fca1892232b1d968fdd4dbf7d62f
|
[
"MIT"
] | null | null | null |
import re
import os
import shutil
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-id", "--inputdir", required=True, help="Input existing directory that contains all original html files.")
parser.add_argument("-od", "--outputdir", required=True, help="Output a new directory that will contain all cleaned html files.")
args = vars(parser.parse_args())
source = args["inputdir"]
destination = args["outputdir"]
def find_files(rawhtml, cleanedhtml):
if os.path.exists(cleanedhtml):
shutil.rmtree(cleanedhtml)
shutil.copytree(rawhtml, cleanedhtml)
find_files(source, destination)
def cleanup(filename1):
string = ""
fin = open(filename1, 'r')
for line in fin:
line = re.sub('(<)', '', line)
line = re.sub('(<*.doc.*>)', '', line)
line = re.sub('(".+")', '', line)
line = re.sub('(a\shref=)', '', line)
line = re.sub('(>,)', '>', line)
line = re.sub('(>)', '', line)
line = re.sub('(/a)', '', line)
line = re.sub('(&ndash;)', '', line)
string += line
fin.close()
return string
def write(filename2):
text = cleanup(f1)
fout = open(filename2, "w")
for line in text:
if line != "":
fout.write(line)
fout.close()
for root, dirs, filenames in os.walk(destination):
for f in filenames:
f1 = os.path.join(root, f)
write(os.path.join(root, f))
| 26.236364
| 129
| 0.595981
|
6768ce035c3447bfad7ce3f4942296779ab33896
| 11,767
|
py
|
Python
|
release/stubs.min/System/Windows/Media/Animation_parts/CharAnimationBase.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Media/Animation_parts/CharAnimationBase.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Media/Animation_parts/CharAnimationBase.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class CharAnimationBase(AnimationTimeline, ISealable, IAnimatable, IResource):
""" Abstract class that,when implemented,animates a System.Char value. """
def AllocateClock(self, *args):
"""
AllocateClock(self: AnimationTimeline) -> Clock
Creates a System.Windows.Media.Animation.Clock for this
System.Windows.Media.Animation.AnimationTimeline.
Returns: A clock for this System.Windows.Media.Animation.AnimationTimeline.
"""
pass
def Clone(self):
"""
Clone(self: CharAnimationBase) -> CharAnimationBase
Creates a modifiable clone of this System.Windows.Media.Animation.CharAnimationBase,making deep
copies of this object's values. When copying dependency properties,this method copies resource
references and data bindings (but they might no longer resolve) but not animations or their
current values.
Returns: A modifiable clone of the current object. The cloned object's System.Windows.Freezable.IsFrozen
property will be false even if the source's System.Windows.Freezable.IsFrozen property was true.
"""
pass
def CloneCore(self, *args):
"""
CloneCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a clone (deep copy) of the specified System.Windows.Freezable using base
(non-animated) property values.
sourceFreezable: The object to clone.
"""
pass
def CloneCurrentValueCore(self, *args):
"""
CloneCurrentValueCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a modifiable clone (deep copy) of the specified System.Windows.Freezable
using current property values.
sourceFreezable: The System.Windows.Freezable to be cloned.
"""
pass
def CreateInstance(self, *args):
"""
CreateInstance(self: Freezable) -> Freezable
Initializes a new instance of the System.Windows.Freezable class.
Returns: The new instance.
"""
pass
def CreateInstanceCore(self, *args):
"""
CreateInstanceCore(self: Freezable) -> Freezable
When implemented in a derived class,creates a new instance of the System.Windows.Freezable
derived class.
Returns: The new instance.
"""
pass
def FreezeCore(self, *args):
"""
FreezeCore(self: Timeline,isChecking: bool) -> bool
Makes this System.Windows.Media.Animation.Timeline unmodifiable or determines whether it can be
made unmodifiable.
isChecking: true to check if this instance can be frozen; false to freeze this instance.
Returns: If isChecking is true,this method returns true if this instance can be made read-only,or false
if it cannot be made read-only. If isChecking is false,this method returns true if this
instance is now read-only,or false if it cannot be made read-only,with the side effect of
having begun to change the frozen status of this object.
"""
pass
def GetAsFrozenCore(self, *args):
"""
GetAsFrozenCore(self: Timeline,sourceFreezable: Freezable)
Makes this instance a clone of the specified System.Windows.Media.Animation.Timeline object.
sourceFreezable: The System.Windows.Media.Animation.Timeline instance to clone.
"""
pass
def GetCurrentValue(
self, defaultOriginValue, defaultDestinationValue, animationClock
):
"""
GetCurrentValue(self: CharAnimationBase,defaultOriginValue: Char,defaultDestinationValue: Char,animationClock: AnimationClock) -> Char
Gets the current value of the animation.
defaultOriginValue: The origin value provided to the animation if the animation does not have its own start value.
defaultDestinationValue: The destination value provided to the animation if the animation does not have its own
destination value.
animationClock: The System.Windows.Media.Animation.AnimationClock which can generate the
System.Windows.Media.Animation.Clock.CurrentTime or
System.Windows.Media.Animation.Clock.CurrentProgress value to be used by the animation to
generate its output value.
Returns: The value this animation believes should be the current value for the property.
GetCurrentValue(self: CharAnimationBase,defaultOriginValue: object,defaultDestinationValue: object,animationClock: AnimationClock) -> object
Gets the current value of the animation.
defaultOriginValue: The origin value provided to the animation if the animation does not have its own start value.
defaultDestinationValue: The destination value provided to the animation if the animation does not have its own
destination value.
animationClock: The System.Windows.Media.Animation.AnimationClock which can generate the
System.Windows.Media.Animation.Clock.CurrentTime or
System.Windows.Media.Animation.Clock.CurrentProgress value to be used by the animation to
generate its output value.
Returns: The value this animation believes should be the current value for the property.
"""
pass
def GetCurrentValueAsFrozenCore(self, *args):
"""
GetCurrentValueAsFrozenCore(self: Timeline,sourceFreezable: Freezable)
Makes this instance a frozen clone of the specified System.Windows.Media.Animation.Timeline.
Resource references,data bindings,and animations are not copied,but their current values are.
sourceFreezable: The System.Windows.Media.Animation.Timeline to copy and freeze.
"""
pass
def GetCurrentValueCore(self, *args):
"""
GetCurrentValueCore(self: CharAnimationBase,defaultOriginValue: Char,defaultDestinationValue: Char,animationClock: AnimationClock) -> Char
Calculates a value that represents the current value of the property being animated,as
determined by the host animation.
defaultOriginValue: The suggested origin value,used if the animation does not have its own explicitly set start
value.
defaultDestinationValue: The suggested destination value,used if the animation does not have its own explicitly set end
value.
animationClock: An System.Windows.Media.Animation.AnimationClock that generates the
System.Windows.Media.Animation.Clock.CurrentTime or
System.Windows.Media.Animation.Clock.CurrentProgress used by the host animation.
Returns: The calculated value of the property,as determined by the current animation.
"""
pass
def GetNaturalDuration(self, *args):
"""
GetNaturalDuration(self: Timeline,clock: Clock) -> Duration
Returns the length of a single iteration of this System.Windows.Media.Animation.Timeline.
clock: The System.Windows.Media.Animation.Clock that was created for this
System.Windows.Media.Animation.Timeline.
Returns: The length of a single iteration of this System.Windows.Media.Animation.Timeline,or
System.Windows.Duration.Automatic if the natural duration is unknown.
"""
pass
def GetNaturalDurationCore(self, *args):
"""
GetNaturalDurationCore(self: AnimationTimeline,clock: Clock) -> Duration
Returns the length of a single iteration of this
System.Windows.Media.Animation.AnimationTimeline.
clock: The clock that was created for this System.Windows.Media.Animation.AnimationTimeline.
Returns: The animation's natural duration. This method always returns a System.Windows.Duration of 1
second.
"""
pass
def OnChanged(self, *args):
"""
OnChanged(self: Freezable)
Called when the current System.Windows.Freezable object is modified.
"""
pass
def OnFreezablePropertyChanged(self, *args):
"""
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject,property: DependencyProperty)
This member supports the Windows Presentation Foundation (WPF) infrastructure and is not
intended to be used directly from your code.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
property: The property that changed.
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject)
Ensures that appropriate context pointers are established for a
System.Windows.DependencyObjectType data member that has just been set.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
"""
pass
def OnPropertyChanged(self, *args):
"""
OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs)
Overrides the System.Windows.DependencyObject implementation of
System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPropertyChangedEventAr
gs) to also invoke any System.Windows.Freezable.Changed handlers in response to a changing
dependency property of type System.Windows.Freezable.
e: Event data that contains information about which property changed,and its old and new values.
"""
pass
def ReadPreamble(self, *args):
"""
ReadPreamble(self: Freezable)
Ensures that the System.Windows.Freezable is being accessed from a valid thread. Inheritors of
System.Windows.Freezable must call this method at the beginning of any API that reads data
members that are not dependency properties.
"""
pass
def ShouldSerializeProperty(self, *args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize the value for
the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized; otherwise,false.
"""
pass
def WritePostscript(self, *args):
"""
WritePostscript(self: Freezable)
Raises the System.Windows.Freezable.Changed event for the System.Windows.Freezable and invokes
its System.Windows.Freezable.OnChanged method. Classes that derive from System.Windows.Freezable
should call this method at the end of any API that modifies class members that are not stored as
dependency properties.
"""
pass
def WritePreamble(self, *args):
"""
WritePreamble(self: Freezable)
Verifies that the System.Windows.Freezable is not frozen and that it is being accessed from a
valid threading context. System.Windows.Freezable inheritors should call this method at the
beginning of any API that writes to data members that are not dependency properties.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
TargetPropertyType = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the type of value this animation generates.
Get: TargetPropertyType(self: CharAnimationBase) -> Type
"""
| 26.988532
| 221
| 0.700263
|
ace4b74bfd2238fbfea109cd688bd811d3161e19
| 1,197
|
py
|
Python
|
setup.py
|
olxbr/kong-config-builder
|
69dc8040eca449aba4557d0d89e96e2bfdbd4721
|
[
"MIT"
] | 1
|
2020-06-16T03:10:42.000Z
|
2020-06-16T03:10:42.000Z
|
setup.py
|
olxbr/kong-config-builder
|
69dc8040eca449aba4557d0d89e96e2bfdbd4721
|
[
"MIT"
] | 2
|
2020-06-19T18:52:29.000Z
|
2020-08-03T19:48:03.000Z
|
setup.py
|
olxbr/kong-config-builder
|
69dc8040eca449aba4557d0d89e96e2bfdbd4721
|
[
"MIT"
] | 1
|
2021-04-09T20:51:56.000Z
|
2021-04-09T20:51:56.000Z
|
from setuptools import setup, find_packages
libs = ["aws"]
extras = {"all": []}
with open("requirements.txt") as reqs:
requirements = reqs.read().split("\n")
for lib in libs:
with open(f"requirements_{lib}.txt") as reqs:
extras[lib] = reqs.read().split("\n")
extras["all"] = extras["all"] + extras[lib]
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="kong_config_builder",
version="DYNAMIC",
description="Kong declarative configuration builder",
long_description=long_description,
long_description_content_type="text/markdown",
author="Olx",
license='MIT',
include_package_data=True,
url='https://github.com/olxbr/kong-config-builder/',
download_url='https://github.com/olxbr/kong-config-builder/archive/master.zip',
install_requires=requirements,
extras_require=extras,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Libraries :: Application Frameworks"
],
packages=find_packages()
)
| 31.5
| 83
| 0.668338
|
75aeadb6cd9205bf2e334ca4ed5f71050d81f75e
| 191
|
py
|
Python
|
dblue_stats/version.py
|
dblueai/dblue-stats
|
6830602271f6992fe6e06e6e307e4caed676fc0e
|
[
"Apache-2.0"
] | 2
|
2020-06-07T07:59:38.000Z
|
2020-06-09T14:55:26.000Z
|
dblue_stats/version.py
|
dblueai/dblue-stats
|
6830602271f6992fe6e06e6e307e4caed676fc0e
|
[
"Apache-2.0"
] | 2
|
2020-06-05T12:04:30.000Z
|
2021-06-02T02:02:34.000Z
|
dblue_stats/version.py
|
dblueai/dblue-stats
|
6830602271f6992fe6e06e6e307e4caed676fc0e
|
[
"Apache-2.0"
] | null | null | null |
import os
from dblue_stats.config import PROJECT_ROOT
version_file = os.path.join(PROJECT_ROOT, "configs", "version.txt")
with open(version_file, 'r') as f:
VERSION = f.read().strip()
| 21.222222
| 67
| 0.727749
|
8437467a1f2ecdecd4ce27250ce27f17ab7bf241
| 1,363
|
py
|
Python
|
framework/utils.py
|
sensoraCloud/BanditsFramework
|
d6c0b577f87dd86a7ab4785a52fed4a7ac258c8e
|
[
"BSD-3-Clause"
] | 1
|
2019-12-01T15:26:06.000Z
|
2019-12-01T15:26:06.000Z
|
framework/utils.py
|
sensoraCloud/BanditsFramework
|
d6c0b577f87dd86a7ab4785a52fed4a7ac258c8e
|
[
"BSD-3-Clause"
] | null | null | null |
framework/utils.py
|
sensoraCloud/BanditsFramework
|
d6c0b577f87dd86a7ab4785a52fed4a7ac258c8e
|
[
"BSD-3-Clause"
] | null | null | null |
import hashlib
import random
import string
import numpy as np
from voucher_opt.logger import log
def print_header(heading):
log.info(heading)
log.info('=' * 50)
def print_footer():
log.info('=' * 50)
def random_str(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def string_seed(text):
return np.frombuffer(hashlib.sha1(text.encode()).digest(), dtype='uint32')
def get_feature_columns(df, non_feature_columns):
return list(df.columns.drop(list(set(non_feature_columns) & set(df.columns))))
def load_config(config, country):
try:
return dict(config['DEFAULT_CONFIG'], **config['COUNTRY_CONFIG'][country])
except KeyError:
return config['DEFAULT_CONFIG']
def validate_bounds(value_name, value, lower_bound=None, upper_bound=None):
if lower_bound is not None:
assert lower_bound <= value, f'The value of "{value_name}" is lower than {lower_bound}'
if upper_bound is not None:
assert value <= upper_bound, f'The value of "{value_name}" is higher than {upper_bound}'
def validate_type(variable_name, variable, expected_type):
actual_type = type(variable)
assert actual_type == expected_type, \
f'{variable_name} has the wrong type. Expected type: "{expected_type}". Actual type = "{actual_type}"'
| 27.816327
| 110
| 0.711665
|
f1f9c173de17d184f359eaf57a0e1216777761e2
| 1,892
|
py
|
Python
|
enrolled/net-intro-CS2105/ass/ass-2/otto/Alice.py
|
plty/nus
|
3d75b6d1c54022a42edbc15317507ddc988a7231
|
[
"MIT"
] | null | null | null |
enrolled/net-intro-CS2105/ass/ass-2/otto/Alice.py
|
plty/nus
|
3d75b6d1c54022a42edbc15317507ddc988a7231
|
[
"MIT"
] | null | null | null |
enrolled/net-intro-CS2105/ass/ass-2/otto/Alice.py
|
plty/nus
|
3d75b6d1c54022a42edbc15317507ddc988a7231
|
[
"MIT"
] | null | null | null |
import time
from functools import partial
from socket import *
from threading import Thread
from zlib import crc32
import sys
MAX_PACKET_SIZE = 64
ANY_HOST = ''
def to_int(b):
return int.from_bytes(b, byteorder='big')
class Server:
def __init__(self, port):
self.start = 0
self.port = port
self.sock = socket(AF_INET, SOCK_DGRAM)
@staticmethod
def _chunk(msg, n):
for i in range(0, len(msg), n):
yield msg[i:i + n]
@staticmethod
def _chunk_str_stream(stream, n):
for msg in stream:
yield from Server._chunk(msg.encode('ascii'), n)
@staticmethod
def _pack(msg, sqn):
sqn = sqn.to_bytes(4, byteorder='big')
checksum = crc32(sqn + msg).to_bytes(4, byteorder='big')
return checksum + sqn + msg
def send(self, packets, window):
while self.start != len(packets):
for i in range(self.start, min(self.start + window, len(packets))):
self.sock.sendto(packets[i], (ANY_HOST, self.port))
@staticmethod
def _unpack_ack(pack):
checksum, sqn = pack[:4], pack[4:8]
return to_int(checksum), to_int(sqn)
def listen(self, n):
while self.start != n:
data, addr = self.sock.recvfrom(MAX_PACKET_SIZE)
checksum, sqn = Server._unpack_ack(data)
if checksum != crc32(data[4:]):
continue
self.start = max(self.start, sqn + 1)
def start_server(port):
packets = [
Server._pack(m, i) for i, m in
enumerate(Server._chunk(sys.stdin.buffer.read(), MAX_PACKET_SIZE - 8))
]
server = Server(port)
listen = Thread(target=partial(server.listen, len(packets))).start()
send = Thread(target=partial(server.send, packets, 64)).start()
if __name__ == "__main__":
start_server(int(sys.argv[1]))
| 27.823529
| 86
| 0.600951
|
cdcc1443708bfe5832dc33e1ef8cbb6defcce54a
| 58,015
|
py
|
Python
|
hs_access_control/tests/test_provenance_function.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | 178
|
2015-01-08T23:03:36.000Z
|
2022-03-03T13:56:45.000Z
|
hs_access_control/tests/test_provenance_function.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | 4,125
|
2015-01-01T14:26:15.000Z
|
2022-03-31T16:38:55.000Z
|
hs_access_control/tests/test_provenance_function.py
|
tommac7/hydroshare
|
87c4543a55f98103d2614bf4c47f7904c3f9c029
|
[
"BSD-3-Clause"
] | 53
|
2015-03-15T17:56:51.000Z
|
2022-03-17T00:32:16.000Z
|
from django.test import TestCase
from django.contrib.auth.models import Group
from hs_access_control.models import UserResourceProvenance, UserResourcePrivilege, \
GroupResourceProvenance, GroupResourcePrivilege, \
UserGroupProvenance, UserGroupPrivilege, \
PrivilegeCodes
from hs_core import hydroshare
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.tests.utilities import global_reset, \
is_equal_to_as_set, check_provenance_synchronization
__author__ = 'Alva'
class UnitTests(MockIRODSTestCaseMixin, TestCase):
""" test basic behavior of each routine """
def setUp(self):
super(UnitTests, self).setUp()
global_reset()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.alva = hydroshare.create_account(
'alva@gmail.com',
username='alva',
first_name='alva',
last_name='couch',
superuser=False,
groups=[]
)
self.george = hydroshare.create_account(
'george@gmail.com',
username='george',
first_name='george',
last_name='miller',
superuser=False,
groups=[]
)
self.john = hydroshare.create_account(
'john@gmail.com',
username='john',
first_name='john',
last_name='miller',
superuser=False,
groups=[]
)
self.admin = hydroshare.create_account(
'admin@gmail.com',
username='admin',
first_name='first_name_admin',
last_name='last_name_admin',
superuser=True,
groups=[]
)
# george creates a entity 'bikes'
self.bikes = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.george,
title='Bikes',
metadata=[],
)
# george creates a entity 'bikers'
self.bikers = self.george.uaccess.create_group('Bikers', 'Of the human powered kind')
# george creates a entity 'harps'
self.harps = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.george,
title='Harps',
metadata=[],
)
# george creates a entity 'harpers'
self.harpers = self.george.uaccess.create_group('Harpers', 'Without any ferries')
def test_user_resource_provenance_crosstalk(self):
george = self.george
alva = self.alva
bikes = self.bikes
harps = self.harps
john = self.john
# George grants Alva view privilege
UserResourcePrivilege.share(
resource=bikes,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.VIEW)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[alva]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=alva)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# George grants Alva privilege
UserResourcePrivilege.share(
resource=bikes,
user=alva,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[alva]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=alva)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Alva grants John privilege
UserResourcePrivilege.share(
resource=bikes,
user=john,
privilege=PrivilegeCodes.CHANGE,
grantor=alva)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[alva]))
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=alva),
[john]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=john)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, alva)
# now George overrides Alva on John's privilege
UserResourcePrivilege.share(
resource=bikes,
user=john,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.VIEW)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes, grantor=george), [
alva, john]))
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=alva),
[]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=john)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Crosstalk test: George grants Alva privilege over harps
UserResourcePrivilege.share(
resource=harps,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
# old privileges didn't change
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes, grantor=george), [
alva, john]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=alva)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# check new privileges: should be independent.
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=harps,
user=alva),
PrivilegeCodes.VIEW)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=harps,
user=alva),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=harps,
grantor=george),
[alva]))
record = UserResourceProvenance.get_current_record(
resource=harps, user=alva)
self.assertEqual(record.resource, harps)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
check_provenance_synchronization(self)
def test_user_group_provenance_crosstalk(self):
george = self.george
alva = self.alva
bikers = self.bikers
harpers = self.harpers
john = self.john
# George grants Alva view privilege
UserGroupPrivilege.share(
group=bikers,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.VIEW)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[alva]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=alva)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# George grants Alva privilege
UserGroupPrivilege.share(
group=bikers,
user=alva,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[alva]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=alva)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Alva grants John privilege
UserGroupPrivilege.share(
group=bikers,
user=john,
privilege=PrivilegeCodes.CHANGE,
grantor=alva)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[alva]))
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=alva),
[john]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=john)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, alva)
# now George overrides Alva on John's privilege
UserGroupPrivilege.share(
group=bikers,
user=john,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.VIEW)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers, grantor=george), [
alva, john]))
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=alva),
[]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=john)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Crosstalk test: George grants Alva privilege over harpers
UserGroupPrivilege.share(
group=harpers,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
# old privileges didn't change
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers, grantor=george), [
alva, john]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=alva)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# check new privileges: should be independent of old privileges
self.assertEqual(
UserGroupProvenance.get_privilege(
group=harpers,
user=alva),
PrivilegeCodes.VIEW)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=harpers,
user=alva),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=harpers,
grantor=george),
[alva]))
record = UserGroupProvenance.get_current_record(
group=harpers, user=alva)
self.assertEqual(record.group, harpers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
check_provenance_synchronization(self)
def test_group_resource_provenance_crosstalk(self):
george = self.george
bikes = self.bikes
bikers = self.bikers
harps = self.harps
harpers = self.harpers
alva = self.alva
# George grants Bikers view privilege
GroupResourcePrivilege.share(
resource=bikes,
group=bikers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.VIEW)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[bikers]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=bikers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, bikers)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# George grants Harpers change privilege
GroupResourcePrivilege.share(
resource=bikes,
group=harpers,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.CHANGE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes, grantor=george), [
bikers, harpers]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=harpers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, harpers)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Alva downgrades Harpers privilege
GroupResourcePrivilege.share(
resource=bikes,
group=harpers,
privilege=PrivilegeCodes.VIEW,
grantor=alva)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.VIEW)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[bikers]))
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=alva),
[harpers]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=harpers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, harpers)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, alva)
# now George overrides Alva on Harpers privilege
GroupResourcePrivilege.share(
resource=bikes,
group=harpers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.VIEW)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes, grantor=george), [
bikers, harpers]))
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=alva),
[]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=harpers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, harpers)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Crosstalk test: George grants bikers privilege over harps
GroupResourcePrivilege.share(
resource=harps,
group=bikers,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
# old privileges didn't change
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.VIEW)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes, grantor=george), [
bikers, harpers]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=bikers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, bikers)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# check new privileges: should be independent.
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=harps,
group=bikers),
PrivilegeCodes.CHANGE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=harps,
group=bikers),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=harps,
grantor=george),
[bikers]))
record = GroupResourceProvenance.get_current_record(
resource=harps, group=bikers)
self.assertEqual(record.resource, harps)
self.assertEqual(record.group, bikers)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
check_provenance_synchronization(self)
def test_user_resource_provenance_undo_share(self):
george = self.george
alva = self.alva
bikes = self.bikes
harps = self.harps
john = self.john
# initial state: no undo to do.
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.NONE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=alva) # no record
self.assertTrue(record is None)
# George grants Alva view privilege
UserResourcePrivilege.share(
resource=bikes,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.VIEW)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[alva]))
# update creates a record
record = UserResourceProvenance.get_current_record(
resource=bikes, user=alva)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Roll back alva's privilege
UserResourcePrivilege.undo_share(resource=bikes, user=alva, grantor=george)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.NONE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[]))
# there is now a record
record = UserResourceProvenance.get_current_record(
resource=bikes, user=alva)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.NONE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, None)
# George grants Alva privilege
UserResourcePrivilege.share(
resource=bikes,
user=alva,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=alva),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[alva]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=alva)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Alva grants John privilege
UserResourcePrivilege.share(
resource=bikes,
user=john,
privilege=PrivilegeCodes.CHANGE,
grantor=alva)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[alva]))
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=alva),
[john]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=john)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, alva)
# now George overrides Alva on John's privilege
UserResourcePrivilege.share(
resource=bikes,
user=john,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.VIEW)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes, grantor=george), [
alva, john]))
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=alva),
[]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=john)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# George changes mind and rolls back change
UserResourcePrivilege.undo_share(resource=bikes, user=john, grantor=george)
# privilege has been rolled back
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[alva]))
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=alva),
[]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=john)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, alva)
# Crosstalk test: George grants Alva privilege over harps
UserResourcePrivilege.share(
resource=harps,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
# old privileges didn't change
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=bikes,
user=john),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=bikes,
grantor=george),
[alva]))
record = UserResourceProvenance.get_current_record(
resource=bikes, user=john)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, alva)
# check new privileges: should be independent.
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=harps,
user=alva),
PrivilegeCodes.VIEW)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=harps,
user=alva),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=harps,
grantor=george),
[alva]))
record = UserResourceProvenance.get_current_record(
resource=harps, user=alva)
self.assertEqual(record.resource, harps)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# now roll back privilege over harps
UserResourcePrivilege.undo_share(resource=harps, user=alva, grantor=george)
self.assertEqual(
UserResourceProvenance.get_privilege(
resource=harps,
user=alva),
PrivilegeCodes.NONE)
self.assertEqual(
UserResourcePrivilege.get_privilege(
resource=harps,
user=alva),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
UserResourcePrivilege.get_undo_users(
resource=harps,
grantor=george),
[]))
record = UserResourceProvenance.get_current_record(
resource=harps, user=alva)
self.assertEqual(record.resource, harps)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.NONE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, None)
check_provenance_synchronization(self)
def test_user_group_provenance_undo_share(self):
george = self.george
alva = self.alva
bikers = self.bikers
harpers = self.harpers
john = self.john
# initial state: no undo to do.
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.NONE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=alva) # no record
self.assertTrue(record is None)
# George grants Alva view privilege
UserGroupPrivilege.share(
group=bikers,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.VIEW)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[alva]))
# update creates a record
record = UserGroupProvenance.get_current_record(
group=bikers, user=alva)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Roll back alva's privilege
UserGroupPrivilege.undo_share(group=bikers, user=alva, grantor=george)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.NONE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[]))
# there is now a record
record = UserGroupProvenance.get_current_record(
group=bikers, user=alva)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.NONE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, None)
# George grants Alva privilege
UserGroupPrivilege.share(
group=bikers,
user=alva,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=alva),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[alva]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=alva)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Alva grants John privilege
UserGroupPrivilege.share(
group=bikers,
user=john,
privilege=PrivilegeCodes.CHANGE,
grantor=alva)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[alva]))
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=alva),
[john]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=john)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, alva)
# now George overrides Alva on John's privilege
UserGroupPrivilege.share(
group=bikers,
user=john,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.VIEW)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers, grantor=george), [
alva, john]))
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=alva),
[]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=john)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# George changes mind and rolls back change
UserGroupPrivilege.undo_share(group=bikers, user=john, grantor=george)
# privilege has been rolled back
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[alva]))
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=alva),
[]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=john)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, alva)
# Crosstalk test: George grants Alva privilege over harpers
UserGroupPrivilege.share(
group=harpers,
user=alva,
privilege=PrivilegeCodes.VIEW,
grantor=george)
# old privileges didn't change
self.assertEqual(
UserGroupProvenance.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.CHANGE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=bikers,
user=john),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=bikers,
grantor=george),
[alva]))
record = UserGroupProvenance.get_current_record(
group=bikers, user=john)
self.assertEqual(record.group, bikers)
self.assertEqual(record.user, john)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, alva)
# check new privileges: should be independent.
self.assertEqual(
UserGroupProvenance.get_privilege(
group=harpers,
user=alva),
PrivilegeCodes.VIEW)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=harpers,
user=alva),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=harpers,
grantor=george),
[alva]))
record = UserGroupProvenance.get_current_record(
group=harpers, user=alva)
self.assertEqual(record.group, harpers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# now roll back privilege over harpers
UserGroupPrivilege.undo_share(group=harpers, user=alva, grantor=george)
self.assertEqual(
UserGroupProvenance.get_privilege(
group=harpers,
user=alva),
PrivilegeCodes.NONE)
self.assertEqual(
UserGroupPrivilege.get_privilege(
group=harpers,
user=alva),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
UserGroupPrivilege.get_undo_users(
group=harpers,
grantor=george),
[]))
record = UserGroupProvenance.get_current_record(
group=harpers, user=alva)
self.assertEqual(record.group, harpers)
self.assertEqual(record.user, alva)
self.assertEqual(record.privilege, PrivilegeCodes.NONE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, None)
check_provenance_synchronization(self)
def test_group_resource_provenance_undo_share(self):
george = self.george
alva = self.alva
bikers = self.bikers
bikes = self.bikes
harps = self.harps
harpers = self.harpers
# initial state: no undo to do.
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.NONE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=bikers) # no record
self.assertTrue(record is None)
# George grants bikers view privilege
GroupResourcePrivilege.share(
resource=bikes,
group=bikers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.VIEW)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[bikers]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=bikers) # update creates a record
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, bikers)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Roll back bikers's privilege
GroupResourcePrivilege.undo_share(resource=bikes, group=bikers, grantor=george)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.NONE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=bikers) # there is now a record that is initial
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, bikers)
self.assertEqual(record.privilege, PrivilegeCodes.NONE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, None)
# George grants bikers privilege
GroupResourcePrivilege.share(
resource=bikes,
group=bikers,
privilege=PrivilegeCodes.CHANGE,
grantor=george)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.CHANGE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=bikers),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[bikers]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=bikers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, bikers)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# Alva grants harpers privilege
GroupResourcePrivilege.share(
resource=bikes,
group=harpers,
privilege=PrivilegeCodes.CHANGE,
grantor=alva)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.CHANGE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[bikers]))
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=alva),
[harpers]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=harpers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, harpers)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, alva)
# now George overrides Alva on harpers' privilege
GroupResourcePrivilege.share(
resource=bikes,
group=harpers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.VIEW)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes, grantor=george), [
bikers, harpers]))
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=alva),
[]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=harpers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, harpers)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# George changes mind and rolls back change
GroupResourcePrivilege.undo_share(resource=bikes, group=harpers, grantor=george)
# privilege has been rolled back
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.CHANGE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[bikers]))
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=alva),
[]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=harpers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, harpers)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, alva)
# Crosstalk test: George grants bikers privilege over harps
GroupResourcePrivilege.share(
resource=harps,
group=bikers,
privilege=PrivilegeCodes.VIEW,
grantor=george)
# old privileges didn't change
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.CHANGE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=bikes,
group=harpers),
PrivilegeCodes.CHANGE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=bikes,
grantor=george),
[bikers]))
record = GroupResourceProvenance.get_current_record(
resource=bikes, group=harpers)
self.assertEqual(record.resource, bikes)
self.assertEqual(record.group, harpers)
self.assertEqual(record.privilege, PrivilegeCodes.CHANGE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, alva)
# check new privileges: should be independent.
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=harps,
group=bikers),
PrivilegeCodes.VIEW)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=harps,
group=bikers),
PrivilegeCodes.VIEW)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=harps,
grantor=george),
[bikers]))
record = GroupResourceProvenance.get_current_record(
resource=harps, group=bikers)
self.assertEqual(record.resource, harps)
self.assertEqual(record.group, bikers)
self.assertEqual(record.privilege, PrivilegeCodes.VIEW)
self.assertEqual(record.undone, False)
self.assertEqual(record.grantor, george)
# now roll back privilege over harps
GroupResourcePrivilege.undo_share(resource=harps, group=bikers, grantor=george)
self.assertEqual(
GroupResourceProvenance.get_privilege(
resource=harps,
group=bikers),
PrivilegeCodes.NONE)
self.assertEqual(
GroupResourcePrivilege.get_privilege(
resource=harps,
group=bikers),
PrivilegeCodes.NONE)
self.assertTrue(
is_equal_to_as_set(
GroupResourcePrivilege.get_undo_groups(
resource=harps,
grantor=george),
[]))
record = GroupResourceProvenance.get_current_record(
resource=harps, group=bikers)
self.assertEqual(record.resource, harps)
self.assertEqual(record.group, bikers)
self.assertEqual(record.privilege, PrivilegeCodes.NONE)
self.assertEqual(record.undone, True)
self.assertEqual(record.grantor, None)
check_provenance_synchronization(self)
| 35.833848
| 93
| 0.576506
|
046f0712888238deafeb4ffd656e9aeb4bf33d87
| 1,238
|
py
|
Python
|
test/test_persist.py
|
davidhwyllie/findNeighbour4
|
d42e10711e59e93ebf0e798fbb1598929f662c9c
|
[
"MIT"
] | null | null | null |
test/test_persist.py
|
davidhwyllie/findNeighbour4
|
d42e10711e59e93ebf0e798fbb1598929f662c9c
|
[
"MIT"
] | 14
|
2021-11-26T14:43:25.000Z
|
2022-03-22T00:39:17.000Z
|
test/test_persist.py
|
davidhwyllie/findNeighbour4
|
d42e10711e59e93ebf0e798fbb1598929f662c9c
|
[
"MIT"
] | null | null | null |
""" tests Persistence class, which returns either a
monogo or rdbms based fn3persistence object.
"""
import unittest
from findn.persistence import Persistence
from findn.mongoStore import fn3persistence
from findn.rdbmsstore import fn3persistence_r
class Test_Persistence(unittest.TestCase):
"""tests persistence class"""
def setUp(self):
self.engines = {}
self.engines["Sqlite"] = "sqlite://" # in memory sqlite
self.engines["mongo"] = "mongodb://localhost" # in memory sqlite
def runTest(self):
"""yields fn3persistence objects, one for each database server being tested."""
for engine, config in self.engines.items():
print(engine, type(self).__name__)
sf = Persistence()
pdm = sf.get_storage_object(connString=config, debug=2)
if engine == "mongo":
self.assertEqual(pdm.storage_technology, "mongodb")
self.assertIsInstance(pdm, fn3persistence)
else:
self.assertEqual(pdm.storage_technology, "rdbms")
self.assertIsInstance(pdm, fn3persistence_r)
# explicitly close connection (required for unittesting)
pdm.closedown()
| 33.459459
| 87
| 0.648627
|
c0d80d6a9f747552b13241511d90ebd641a00d52
| 24,381
|
py
|
Python
|
python/maya/site-packages/pymel-1.0.3/maintenance/stubs.py
|
CountZer0/PipelineConstructionSet
|
0aa73a8a63c72989b2d1c677efd78dad4388d335
|
[
"BSD-3-Clause"
] | 21
|
2015-04-27T05:01:36.000Z
|
2021-11-22T13:45:14.000Z
|
python/maya/site-packages/pymel-1.0.3/maintenance/stubs.py
|
0xb1dd1e/PipelineConstructionSet
|
621349da1b6d1437e95d0c9e48ee9f36d59f19fd
|
[
"BSD-3-Clause"
] | null | null | null |
python/maya/site-packages/pymel-1.0.3/maintenance/stubs.py
|
0xb1dd1e/PipelineConstructionSet
|
621349da1b6d1437e95d0c9e48ee9f36d59f19fd
|
[
"BSD-3-Clause"
] | 7
|
2015-04-11T11:37:19.000Z
|
2020-05-22T09:49:04.000Z
|
from pydoc import * #@UnusedWildImport
import pydoc, sys, pprint #@Reimport
import __builtin__
import os #@Reimport
import pkgutil #@Reimport
builtins = set(__builtin__.__dict__.values())
# for the sake of stubtest, don't importy anything pymel/maya at module level
#import pymel.util as util
class NoUnicodeTextRepr(TextRepr):
'''PyDev barfs when a unicode literal (ie, u'something') is in a pypredef
file; use this repr to make sure they don't show up.
'''
def repr_unicode(self, uStr, level):
return self.repr_string(str(uStr), level)
class StubDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
module_map = {}
_repr_instance = NoUnicodeTextRepr()
# We don't care if it's compact, we just want it to parse right...
_repr_instance.maxlist = _repr_instance.maxtuple = _repr_instance.maxdict\
= _repr_instance.maxstring = _repr_instance.maxother = 100000
repr = _repr_instance.repr
# Mapping of (module, dontImportThese)
MODULE_EXCLUDES = {
'pymel.api':set(['pymel.internal.apicache']),
'pymel' :set(['pymel.all']),
}
debugmodule = 'pymel.core'
def __init__(self, *args, **kwargs):
self.missing_modules = set([])
if hasattr(Doc, '__init__'):
Doc.__init__(self, *args, **kwargs)
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
quotes = "'''" if '"""' in contents else '"""'
return rstrip(self.indent( quotes +'\n' + contents + '\n' + quotes)) + '\n\n'
def docstring(self, contents):
"""Format a section with a given heading."""
quotes = "'''" if '"""' in contents else '"""'
return quotes + '\n' + contents + '\n' + quotes + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
importSubstitutions = {'pymel.util.objectParser':'''
class ProxyUni(object): pass
class Parsed(ProxyUni): pass
''',
'precompmodule':''}
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
desc = splitdoc(getdoc(object))[1]
result = ''
self.module_map = {}
self.missing_modules = set([])
try:
all = object.__all__
except AttributeError:
all = None
# try:
# file = inspect.getabsfile(object)
# except TypeError:
# file = '(built-in)'
# result = result + self.section('FILE', file)
#
# docloc = self.getdocloc(object)
# if docloc is not None:
# result = result + self.section('MODULE DOCS', docloc)
if desc:
result += result + self.docstring(desc)
def classModule(classObj):
mod = inspect.getmodule(classObj)
if not mod:
mod = object
elif mod == __builtin__ and classObj not in builtins:
mod = object
return mod
untraversedClasses = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or classModule(value) is object):
if visiblename(key, all):
untraversedClasses.append((key, value))
# A visible class may have a non-visible baseClass from this module,
# which will still need to be included if the module is to import
# correctly - ie,
# class _AbstractClass(object): pass
# class InheritedClass(_AbstractClass): pass
classes = []
while untraversedClasses:
key, childClass = untraversedClasses.pop()
classes.append( (key, childClass) )
try:
[x for x in childClass.__bases__]
except Exception:
print "problem iterating %s.__bases__" % childClass
for parentClass in childClass.__bases__:
if classModule(parentClass) is object:
newTuple = (parentClass.__name__, parentClass)
if newTuple not in classes and newTuple not in untraversedClasses:
untraversedClasses.append( newTuple )
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or inspect.getmodule(value) is object
or inspect.isbuiltin(value)):
if visiblename(key, all):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
modules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
modules.append((key, value))
fromall_modules = set([])
for key, value in inspect.getmembers(object, lambda x: not inspect.ismodule(x) ):
if hasattr(value, '__module__') and value.__module__ not in [None, object.__name__] and not value.__module__.startswith('_'):
if object.__name__ == self.debugmodule and value.__module__ not in fromall_modules:
print "import* %r" % value.__module__
fromall_modules.add( value.__module__ )
if modules:
contents = []
#print "modules", object
for key, value in modules:
realname = value.__name__
if realname == name:
continue
import_text = self.import_mod_text(object, realname, key)
if import_text:
contents.append(import_text)
result = result + join(contents, '\n') + '\n\n'
if fromall_modules:
# special-case handling for pymel.internal.pmcmds, which ends up
# with a bunch of 'from pymel.core.X import *' commands
if name == 'pymel.internal.pmcmds':
fromall_modules = [x for x in fromall_modules if not x.startswith('pymel.core')]
fromall_modules.append('maya.cmds')
contents = []
for modname in fromall_modules:
import_text = self.import_mod_text(object, modname, '*')
if import_text:
contents.append(import_text)
result = result + join(contents, '\n') + '\n\n'
if classes:
# sort in order of resolution
def nonconflicting(classlist):
for cls in classlist:
mro = set(inspect.getmro(cls)[1:])
if not mro.intersection(classlist):
yield cls
inspect.getmro(str)
sorted = []
unsorted = set([x[1] for x in classes])
while unsorted:
for cls in nonconflicting(unsorted):
sorted.append(cls)
unsorted.difference_update(sorted)
# classlist = map(lambda key_value: key_value[1], classes)
# contents = [self.formattree(
# inspect.getclasstree(classlist, 1), name)]
contents = []
classes = dict([ (x[1], x[0]) for x in classes])
for key in sorted:
contents.append(self.document(key, classes[key], name))
classres = join(contents, '\n').split('\n')
for i, line in enumerate(classres):
if u'\xa0' in line:
print "bad char"
for j in range( max(i-10,0), min(i+10,len(classres)) ):
if j == i:
print '-'*80
print classres[j]
if j == i:
print '-'*80
classres[i] = ''.join(line.split( u'\xa0'))
result = result + join(classres, '\n')
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + join(contents, '\n')
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + join(contents, '\n')
# if hasattr(object, '__version__'):
# version = str(object.__version__)
# if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
# version = strip(version[11:-1])
# result = result + self.section('VERSION', version)
# if hasattr(object, '__date__'):
# result = result + self.section('DATE', str(object.__date__))
# if hasattr(object, '__author__'):
# result = result + self.section('AUTHOR', str(object.__author__))
# if hasattr(object, '__credits__'):
# result = result + self.section('CREDITS', str(object.__credits__))
if self.missing_modules:
contents = []
for mod in self.missing_modules:
import_text = self.import_mod_text(object, mod, mod)
if import_text:
contents.append(import_text)
result = join(contents, '\n') + '\n\n' + result
return result
def classname(self, object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ not in [ modname,'__builtin__']:
#print object
try:
newmodname = self.module_map[object.__module__]
#print "from map", object.__module__, repr(modname)
except KeyError:
newmodname = None
for m in self.module_map.keys():
mod = sys.modules[m]
#print '\t', m, mod
if object in mod.__dict__.values():
#print '\tfound'
newmodname = self.module_map[m]
break
if not newmodname:
#print "missing"
self.missing_modules.add(object.__module__)
if newmodname:
name = newmodname + '.' + name
return name
def docclass(self, object, name=None, mod=None):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return self.classname(c, m)
title = 'class ' + name
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
title += ':\n'
doc = getdoc(object)
contents = doc and [self.docstring(doc) + '\n'] or []
push = contents.append
def spill(msg, attrs, predicate):
ok, attrs = pydoc._split_list(attrs, predicate)
if ok:
for name, kind, homecls, value in ok: #@UnusedVariable
push(self.document(getattr(object, name),
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = pydoc._split_list(attrs, predicate)
if ok:
for name, kind, homecls, value in ok: #@UnusedVariable
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = pydoc._split_list(attrs, predicate)
if ok:
for name, kind, homecls, value in ok: #@UnusedVariable
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0]),
classify_class_attrs(object))
thisclass = object
attrs, inherited = pydoc._split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
else:
if attrs:
tag = None
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
else:
contents.append('pass')
contents = '\n'.join(contents)
return title + self.indent(rstrip(contents), ' ') + '\n\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
# check if the object is os.environ...
isEnviron = False
if object == os.environ:
isEnviron = True
elif isinstance(object, dict):
# If over 90% of the keys are in os.environ, assume it's os.environ
if len(set(object) & set(os.environ)) > (len(object) * 0.9):
isEnviron = True
if isEnviron:
objRepr = repr({'PROXY_FOR':'os.environ'})
else:
if isinstance(object, unicode):
# pydev can't handle unicode literals - ie, u'stuff' - so
# convert to normal strings
object = str(object)
objRepr = self.repr(object)
if objRepr[0] == '<' and objRepr[-1] == '>':
objRepr = repr(objRepr)
return '=' + objRepr
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
if 'display' in name:
print "documenting:", name
realname = object.__name__
name = name or realname
skipdocs = 0
if inspect.ismethod(object):
object = object.im_func
title = name
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
else:
argspec = '(*args, **kwargs)'
decl = 'def ' + title + argspec + ':'
if isinstance(object, staticmethod):
decl = '@staticmethod\n' + decl
elif isinstance(object, classmethod):
decl = '@classmethod\n' + decl
if skipdocs:
return decl + 'pass\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(self.docstring(doc))) + '\n\n') + self.indent('pass') + '\n\n'
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(name + ' = None')
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
if name in ['__metaclass__']:
return ''
value = None
if name == '__all__':
value = pprint.pformat(object)
else:
if isinstance(object, (basestring, int, long)):
value = self.repr(object)
else:
value = 'None'
line = (name and name + ' = ' or '') + value + '\n'
return line
def import_mod_text(self, currmodule, importmodule, asname):
ispkg = hasattr(currmodule, '__path__')
currname = currmodule.__name__
if importmodule in self.MODULE_EXCLUDES.get(currname, ()):
print "%s had %s in MODULE_EXCLUDES" % (currname, importmodule)
return ''
elif asname != '*':
realname = importmodule
realparts = realname.split('.')
currparts = currname.split('.')
importname = realname
fromname = ''
if currname == self.debugmodule:
print '\t %-30s %-30s %s' % ( realname, importname, asname )
#test for siblings - needed to avoid circular imports
if len(realparts) == len(currparts):
if realparts[:-1] == currparts[:-1] and not ispkg:
if currname == self.debugmodule:
print "\t\tsibling"
fromname = '.'
importname = realparts[-1]
# test if importing a child - ie, pymel will have a .core attribute,
# simply because at some point we imported pymel.core, but we don't
# need / want an explicit import statement
elif len(realparts) > len(currparts):
if realparts[:len(currparts)] == currparts:
# Check that asname matches realname, so that if we do
# import pymel.core.nt as nt
# from inside pymel.core, we still get the nt showing up
if asname == realparts[-1]:
if currname == self.debugmodule:
print "\t\tparent - no import"
return ''
# if we're doing a renamed parent import, we want to make it
# relative to avoid circular imports
fromname = '.'
importname = '.'.join(realparts[len(currparts):])
self.module_map[realname] = asname if importname != asname else importname
if importname in self.importSubstitutions:
return '%s = None' % asname
else:
result = 'import ' + importname
if importname != asname:
result += ' as ' + asname
if fromname:
result = 'from ' + fromname + ' ' + result
return result
else:
self.module_map[importmodule] = ''
if importmodule in self.importSubstitutions:
return self.importSubstitutions[importmodule]
else:
return 'from ' + importmodule + ' import *'
stubs = StubDoc()
def packagestubs(packagename, outputdir='', extensions=('py', 'pypredef', 'pi'), exclude=None):
import pymel.util as util
packagemod = __import__(packagename, globals(), locals(), [], -1)
for modname, mod, ispkg in util.subpackages(packagemod):
print modname, ":"
contents = stubs.docmodule(mod)
for extension in extensions:
basedir = os.path.join(outputdir, extension)
if extension == 'pypredef':
curfile = os.path.join(basedir, modname)
else:
curfile = os.path.join(basedir, *modname.split('.') )
if ispkg:
curfile = os.path.join(curfile, '__init__' )
curfile = curfile + os.extsep + extension
curdir = os.path.dirname(curfile)
if not os.path.isdir(curdir):
os.makedirs(curdir)
print "\t ...writing %s" % curfile
f = open( curfile, 'w' )
if not exclude or not re.match( exclude, modname ):
f.write( contents )
f.close()
def pymelstubs(extensions=('py', 'pypredef', 'pi'), pymel=True, maya=True):
""" Builds pymel stub files for autocompletion.
Can build Python Interface files (pi) with extension='pi' for IDEs like wing."""
pymeldir = os.path.dirname( os.path.dirname( sys.modules[__name__].__file__) )
outputdir = os.path.join(pymeldir, 'extras', 'completion')
print outputdir
if not os.path.exists(outputdir):
os.makedirs(outputdir)
if pymel:
packagestubs( 'pymel',
outputdir=outputdir,
extensions=extensions,
exclude='pymel\.util\.scanf|pymel\.util\.objectParser|pymel\.tools\.ipymel')
if maya:
packagestubs( 'maya', outputdir=outputdir,extensions=extensions )
return outputdir
# don't start name with test - don't want it automatically run by nose
def stubstest(pystubdir, doprint=True):
'''Test the stubs modules.
Don't call this from 'inside maya', as we've probably already loaded all
the various 'real' modules, which can give problems.
'''
def importError(modname):
print 'error importing %s:' % modname
import traceback
bad.append( (modname, traceback.format_exc()) )
bad = []
print "Testing all modules in: %s" % pystubdir
sys.path.insert(0, pystubdir)
try:
for importer, modname, ispkg in \
pkgutil.walk_packages(path=[pystubdir],onerror=importError):
print 'testing %s' % modname
try:
# Don't use the importer returned by walk_packages, as it
# doesn't always properly update parent packages's dictionary
# with submodule name - ie, you would do:
# import pymel.all
# print pymel.all
# ...and find that pymel had no attribute 'all'
#importer.find_module(modname).load_module(modname)
__import__(modname, globals(), locals(), [])
except Exception, error:
print 'found bad module: %s - %s' % (modname, error)
importError(modname)
finally:
sys.path.pop(0)
print 'done walking modules'
if doprint:
for modname, error in bad:
print '*' * 60
print 'could not import %s:\n%s' % (modname, error)
return bad
| 40.034483
| 137
| 0.532136
|
deaf935c751b80c2e49989bf9bdf1f168d3aa904
| 6,610
|
py
|
Python
|
fractalis/data/controller.py
|
thehyve/Fractalis
|
5591112e5bc994eea5baf3d28caa7e5dfee85a57
|
[
"Apache-2.0"
] | null | null | null |
fractalis/data/controller.py
|
thehyve/Fractalis
|
5591112e5bc994eea5baf3d28caa7e5dfee85a57
|
[
"Apache-2.0"
] | 6
|
2018-11-02T10:00:04.000Z
|
2021-09-13T14:15:36.000Z
|
fractalis/data/controller.py
|
thehyve/Fractalis
|
5591112e5bc994eea5baf3d28caa7e5dfee85a57
|
[
"Apache-2.0"
] | 1
|
2018-10-22T08:12:00.000Z
|
2018-10-22T08:12:00.000Z
|
"""The /data controller. Please refer to doc/api for more information."""
import json
import logging
from typing import Tuple, Union
from uuid import UUID
from flask import Blueprint, session, request, jsonify, Response
from fractalis import celery, redis
from fractalis.authorization import authorized
from fractalis.data.etlhandler import ETLHandler
from fractalis.schema import CreateDataSchema
from fractalis.sync import remove_data
from fractalis.validator import validate_json, validate_schema
data_blueprint = Blueprint('data_blueprint', __name__)
logger = logging.getLogger(__name__)
@data_blueprint.route('', methods=['POST'])
@validate_json
@validate_schema(CreateDataSchema)
@authorized
def create_data_task() -> Tuple[Response, int]:
"""Submit new ETL tasks based on the payload of the request body.
See doc/api/ for more information.
:return: Empty response. Everything important is stored in the session.
"""
logger.debug("Received POST request on /data.")
wait = request.args.get('wait') == '1'
payload = request.get_json(force=True)
try:
etl_handler = ETLHandler.factory(service_name=payload['service'],
auth=payload['auth'])
except Exception as e:
return jsonify({'error': str(e)}), 400
task_ids = etl_handler.handle(descriptors=payload['descriptors'],
data_tasks=session['data_tasks'],
use_existing=False,
wait=wait)
session['data_tasks'] += task_ids
session['data_tasks'] = list(set(session['data_tasks']))
logger.debug("Tasks successfully submitted. Sending response.")
return jsonify(''), 201
def get_data_state_for_task_id(task_id: str, wait: bool) -> Union[dict, None]:
"""Return data state associated with task id.
:param task_id: The id associated with the ETL task.
:param wait: If true and ETL is still running wait for it.
:return: Data state that has been stored in Redis.
"""
async_result = celery.AsyncResult(task_id)
if wait and async_result.state == 'SUBMITTED':
logger.debug("'wait' was set. Waiting for tasks to finish ...")
async_result.get(propagate=False)
value = redis.get('data:{}'.format(task_id))
if not value:
return None
data_state = json.loads(value)
# add additional information to data_state
result = async_result.result
if isinstance(result, Exception): # Exception -> str
result = "{}: {}".format(type(result).__name__, str(result))
data_state['etl_message'] = result
data_state['etl_state'] = async_result.state
return data_state
@data_blueprint.route('', methods=['GET'])
def get_all_data() -> Tuple[Response, int]:
"""Get information for all tasks that have been submitted in the lifetime
of the current session.
See doc/api/ for more information.
:return: Information associated with each submitted task
"""
logger.debug("Received GET request on /data.")
wait = request.args.get('wait') == '1'
data_states = []
existing_data_tasks = []
for task_id in session['data_tasks']:
data_state = get_data_state_for_task_id(task_id, wait)
if data_state is None:
warning = "Data state with task_id '{}' expired.".format(task_id)
logger.warning(warning)
continue
# remove internal information from response
del data_state['file_path']
del data_state['meta']
# add additional information to response
data_states.append(data_state)
existing_data_tasks.append(task_id)
session['data_tasks'] = existing_data_tasks
logger.debug("Data states collected. Sending response.")
return jsonify({'data_states': data_states}), 200
@data_blueprint.route('/<uuid:task_id>', methods=['DELETE'])
@authorized
def delete_data(task_id: UUID) -> Tuple[Response, int]:
"""Remove all traces of the data associated with the given task id.
:param task_id: The id associated with the data
See doc/api/ for more information.
:return: Empty response.
"""
logger.debug("Received DELETE request on /data/task_id.")
wait = request.args.get('wait') == '1'
task_id = str(task_id)
if task_id not in session['data_tasks']:
error = "Task ID '{}' not found in session. " \
"Refusing access.".format(task_id)
logger.warning(error)
return jsonify({'error': error}), 403
session['data_tasks'].remove(task_id)
# possibly dangerous: http://stackoverflow.com/a/29627549
celery.control.revoke(task_id, terminate=True, signal='SIGUSR1', wait=wait)
remove_data(task_id=task_id)
logger.debug("Successfully removed data from session. Sending response.")
return jsonify(''), 200
@data_blueprint.route('', methods=['DELETE'])
@authorized
def delete_all_data() -> Tuple[Response, int]:
"""Remove all traces of all data associated with this session.
:return: Empty response.
"""
logger.debug("Received DELETE request on /data.")
wait = request.args.get('wait') == '1'
for task_id in session['data_tasks']:
remove_data(task_id=task_id)
# possibly dangerous: http://stackoverflow.com/a/29627549
celery.control.revoke(task_id, terminate=True,
signal='SIGUSR1', wait=wait)
session['data_tasks'] = []
logger.debug("Successfully removed all data from session. "
"Sending response.")
return jsonify(''), 200
@data_blueprint.route('/meta/<uuid:task_id>', methods=['GET'])
@authorized
def get_meta_information(task_id: UUID) -> Tuple[Response, int]:
"""Get meta information for given task id.
:return: meta information object stored in redis.
"""
logger.debug("Received GET request on /data/meta/task_id.")
wait = request.args.get('wait') == '1'
task_id = str(task_id)
if task_id not in session['data_tasks']:
error = "Task ID '{}' not found in session. " \
"Refusing access.".format(task_id)
logger.warning(error)
return jsonify({'error': error}), 403
data_state = get_data_state_for_task_id(task_id, wait)
if data_state is None:
error = "Could not find redis entry for this task id '{}'. " \
"The entry probably expired.".format(task_id)
logger.error(error)
return jsonify({'error': error}), 404
logger.debug("Successfully gather meta information. Sending response.")
return jsonify({'meta': data_state['meta']}), 200
| 40.304878
| 79
| 0.66823
|
4b0ce87cf4b738c3cc7bd7bc0aea39b7af534687
| 2,079
|
py
|
Python
|
tests/contrib/djangorestframework/test_djangorestframework.py
|
tophatmonocle/dd-trace-py
|
7db12f1c398c07cd5baf91c571aed672dbb6496d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/contrib/djangorestframework/test_djangorestframework.py
|
tophatmonocle/dd-trace-py
|
7db12f1c398c07cd5baf91c571aed672dbb6496d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/contrib/djangorestframework/test_djangorestframework.py
|
tophatmonocle/dd-trace-py
|
7db12f1c398c07cd5baf91c571aed672dbb6496d
|
[
"BSD-3-Clause"
] | null | null | null |
import django
from django.apps import apps
from nose.tools import ok_, eq_
from unittest import skipIf
from tests.contrib.django.utils import DjangoTraceTestCase
@skipIf(django.VERSION < (1, 10), 'requires django version >= 1.10')
class RestFrameworkTest(DjangoTraceTestCase):
def setUp(self):
super(RestFrameworkTest, self).setUp()
# would raise an exception
from rest_framework.views import APIView
from ddtrace.contrib.django.restframework import unpatch_restframework
self.APIView = APIView
self.unpatch_restframework = unpatch_restframework
def test_setup(self):
ok_(apps.is_installed('rest_framework'))
ok_(hasattr(self.APIView, '_datadog_patch'))
def test_unpatch(self):
self.unpatch_restframework()
ok_(not getattr(self.APIView, '_datadog_patch'))
response = self.client.get('/users/')
# Our custom exception handler is setting the status code to 500
eq_(response.status_code, 500)
# check for spans
spans = self.tracer.writer.pop()
eq_(len(spans), 1)
sp = spans[0]
eq_(sp.name, 'django.request')
eq_(sp.resource, 'app.views.UserViewSet')
eq_(sp.error, 0)
eq_(sp.span_type, 'http')
eq_(sp.get_tag('http.status_code'), '500')
eq_(sp.get_tag('error.msg'), None)
def test_trace_exceptions(self):
response = self.client.get('/users/')
# Our custom exception handler is setting the status code to 500
eq_(response.status_code, 500)
# check for spans
spans = self.tracer.writer.pop()
eq_(len(spans), 1)
sp = spans[0]
eq_(sp.name, 'django.request')
eq_(sp.resource, 'app.views.UserViewSet')
eq_(sp.error, 1)
eq_(sp.span_type, 'http')
eq_(sp.get_tag('http.method'), 'GET')
eq_(sp.get_tag('http.status_code'), '500')
eq_(sp.get_tag('error.msg'), 'Authentication credentials were not provided.')
ok_('NotAuthenticated' in sp.get_tag('error.stack'))
| 33.532258
| 85
| 0.645503
|
382aa8d4df63d72d6b215d860563b364ea64cdf7
| 1,452
|
py
|
Python
|
MTandHJ/p34.py
|
MTandHJ/leetcode
|
f3832ed255d259cb881666ec8bd3de090d34e883
|
[
"MIT"
] | null | null | null |
MTandHJ/p34.py
|
MTandHJ/leetcode
|
f3832ed255d259cb881666ec8bd3de090d34e883
|
[
"MIT"
] | null | null | null |
MTandHJ/p34.py
|
MTandHJ/leetcode
|
f3832ed255d259cb881666ec8bd3de090d34e883
|
[
"MIT"
] | null | null | null |
from typing import List
from base import version
class Solution:
@version("36ms, 15.8mb")
def searchRange(self, nums: List[int], target: int) -> List[int]:
ans = [-1, -1]
if not len(nums):
return ans
l, r = 0, len(nums)
while l < r:
m = l + (r - l) // 2
if nums[m] >= target:
r = m
else:
l = m + 1
ans[0] = r
l, r = 0, len(nums)
while l < r:
m = l + (r - l) // 2
if nums[m] > target:
r = m
else:
l = m + 1
ans[1] = r - 1
if ans[0] == len(nums):
return [-1, -1]
if nums[ans[0]] == target:
return ans
else:
return [-1, -1]
@version("28ms, 15.8mb")
def searchRange(self, nums: List[int], target: int) -> List[int]:
nums = [-float('inf')] + nums + [float('inf')]
l, r = 0, len(nums)
while l < r:
m = l + (r - l) // 2
if nums[m] >= target:
r = m
else:
l = m + 1
if nums[r] != target:
return [-1, -1]
ans = [r-1, r-1]
l, r = r, len(nums)
while l < r:
m = l + (r - l) // 2
if nums[m] > target:
r = m
else:
l = m + 1
ans[1] = r - 2
return ans
| 23.419355
| 69
| 0.353994
|
609ae867db6fa476a9cb52a7107f9a638ca7f0d5
| 535
|
py
|
Python
|
rotnet/rotnet.py
|
ZJCV/RotNet
|
bc3598acaad3895b7455add301194c3e7d60c5a4
|
[
"Apache-2.0"
] | 7
|
2021-03-23T16:07:48.000Z
|
2021-12-21T15:38:21.000Z
|
rotnet/rotnet.py
|
ZJCV/RotNet
|
bc3598acaad3895b7455add301194c3e7d60c5a4
|
[
"Apache-2.0"
] | 9
|
2021-03-23T16:00:59.000Z
|
2022-02-18T15:32:16.000Z
|
rotnet/rotnet.py
|
ZJCV/RotNet
|
bc3598acaad3895b7455add301194c3e7d60c5a4
|
[
"Apache-2.0"
] | 6
|
2021-06-23T07:42:07.000Z
|
2021-11-26T13:16:04.000Z
|
# -*- coding: utf-8 -*-
"""
@date: 2021/3/15 下午4:25
@file: rotnet.py
@author: zj
@description:
"""
config_file = 'configs/mbv3_small_se_hsigmoid_fmnist_224_e100.yaml'
import torch
from zcls.config import cfg
from zcls.model.recognizers.build import build_recognizer
def rotnet(pretrained=False, **kwargs):
cfg.merge_from_file(config_file)
model = build_recognizer(cfg, torch.device('cpu'))
if pretrained:
checkpoint = torch.load('weights/model.pth')
model.load_state_dict(checkpoint)
return model
| 22.291667
| 67
| 0.721495
|
1c879158f0cd6ac6c8110835ee188665a34d1472
| 5,737
|
py
|
Python
|
SlideServer.py
|
cjchirag7/SlideLoader
|
74aa4aa7c05b8c76814569c7325b92890c8e18cb
|
[
"BSD-3-Clause"
] | null | null | null |
SlideServer.py
|
cjchirag7/SlideLoader
|
74aa4aa7c05b8c76814569c7325b92890c8e18cb
|
[
"BSD-3-Clause"
] | null | null | null |
SlideServer.py
|
cjchirag7/SlideLoader
|
74aa4aa7c05b8c76814569c7325b92890c8e18cb
|
[
"BSD-3-Clause"
] | null | null | null |
import base64
import json
import os
import random
import shutil
import string
import sys
import pyvips
import flask
import flask_cors
import openslide
from werkzeug.utils import secure_filename
import dev_utils
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
app = flask.Flask(__name__)
flask_cors.CORS(app)
# where to put and get slides
app.config['UPLOAD_FOLDER'] = "/images/"
app.config['TEMP_FOLDER'] = "/images/uploading/"
app.config['TOKEN_SIZE'] = 10
app.config['SECRET_KEY'] = os.urandom(24)
ALLOWED_EXTENSIONS = set(['svs', 'tif', 'tiff', 'vms', 'vmu', 'ndpi', 'scn', 'mrxs', 'bif', 'svslide'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def getThumbnail(filename, size=50):
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
if not os.path.isfile(filepath):
return {"error": "No such file"}
try:
slide = openslide.OpenSlide(filepath)
thumb = slide.get_thumbnail((size, size))
buffer = BytesIO()
thumb.save(buffer, format="PNG")
data = 'data:image/png;base64,' + str(base64.b64encode(buffer.getvalue()))[2:-1]
return {"slide": data, "size": size}
except BaseException as e:
return {"type": "Openslide", "error": str(e)}
@app.route('/slide/<filename>/pyramid/<dest>', methods=['POST'])
def makePyramid(filename, dest):
try:
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
destpath = os.path.join(app.config['UPLOAD_FOLDER'], dest)
pyvips.Image.new_from_file(filepath, access='sequential').tiffsave(destpath, tile=True, compression="lzw", tile_width=256, tile_height=256, pyramid=True, bigtiff=True, xres=0.254, yres=0.254)
return flask.Response(json.dumps({"status": "OK"}), status=200)
except BaseException as e:
return flask.Response(json.dumps({"type": "pyvips", "error": str(e)}), status=500)
# routes
# start a file upload by registering the intent to upload, get a token to be used in future upload requests
@app.route('/upload/start', methods=['POST'])
def start_upload():
token = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(app.config['TOKEN_SIZE']))
token = secure_filename(token)
tmppath = os.path.join(app.config['TEMP_FOLDER'], token)
# regenerate if we happen to collide
while os.path.isfile(tmppath):
token = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(app.config['TOKEN_SIZE']))
token = secure_filename(token)
tmppath = os.path.join(app.config['TEMP_FOLDER'], token)
f = open(tmppath, 'a')
f.close()
return flask.Response(json.dumps({"upload_token": token}), status=200)
# using the token from the start upload endpoint, post data given offset.
@app.route('/upload/continue/<token>', methods=['POST'])
def continue_file(token):
token = secure_filename(token)
print(token, file=sys.stderr)
tmppath = os.path.join(app.config['TEMP_FOLDER'], token)
if os.path.isfile(tmppath):
body = flask.request.get_json()
if not body:
return flask.Response(json.dumps({"error": "Missing JSON body"}), status=400)
offset = body['offset'] or 0
if not 'data' in body:
return flask.Response(json.dumps({"error": "File data not found in body"}), status=400)
else:
data = base64.b64decode(body['data'])
f = open(tmppath, "ab")
f.seek(int(offset))
f.write(data)
f.close()
return flask.Response(json.dumps({"status": "OK"}), status=200)
else:
return flask.Response(json.dumps({"error": "Token Not Recognised"}), status=400)
# end the upload, by removing the in progress indication; locks further modification
@app.route('/upload/finish/<token>', methods=['POST', "GET"])
def finish_upload(token):
body = flask.request.get_json()
if not body:
return flask.Response(json.dumps({"error": "Missing JSON body"}), status=400)
token = secure_filename(token)
filename = body['filename']
if filename and allowed_file(filename):
filename = secure_filename(filename)
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
tmppath = os.path.join(app.config['TEMP_FOLDER'], token)
if not os.path.isfile(filepath):
if os.path.isfile(tmppath):
shutil.move(tmppath, filepath)
return flask.Response(json.dumps({"ended": token, "filepath": filepath}))
else:
return flask.Response(json.dumps({"error": "Token Not Recognised"}), status=400)
else:
return flask.Response(json.dumps({"error": "File with name '" + filename + "' already exists"}), status=400)
else:
return flask.Response(json.dumps({"error": "Invalid filename"}), status=400)
# check for token
# get info associated with token
# move the file out of temp to upload dir
@app.route("/test", methods=['GET'])
def testRoute():
return '{"Status":"up"}'
@app.route("/data/one/<filepath>", methods=['GET'])
def singleSlide(filepath):
return json.dumps(dev_utils.getMetadata(filepath, app.config['UPLOAD_FOLDER']))
@app.route("/data/thumbnail/<filepath>", methods=['GET'])
def singleThumb(filepath):
size = flask.request.args.get('size', default=50, type=int)
return json.dumps(getThumbnail(filepath, size))
@app.route("/data/many/<filepathlist>", methods=['GET'])
def multiSlide(filepathlist):
return json.dumps(dev_utils.getMetadataList(json.loads(filepathlist), app.config['UPLOAD_FOLDER']))
| 37.496732
| 199
| 0.662541
|
3f9e3f15d46eea06a5ebf7bfe5351199f789b4a8
| 3,494
|
py
|
Python
|
armada_command/command_dockyard.py
|
firesoft/armada
|
245115fcf21d988db5da71f18b3123479de5f2c1
|
[
"Apache-2.0"
] | null | null | null |
armada_command/command_dockyard.py
|
firesoft/armada
|
245115fcf21d988db5da71f18b3123479de5f2c1
|
[
"Apache-2.0"
] | null | null | null |
armada_command/command_dockyard.py
|
firesoft/armada
|
245115fcf21d988db5da71f18b3123479de5f2c1
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
from armada_command.armada_utils import print_err, print_table
from armada_command.dockyard import alias
from armada_command.dockyard.alias import print_http_dockyard_unavailability_warning
from armada_command.dockyard.dockyard import dockyard_factory, DockyardFactoryException, DockyardDetectionException
def add_arguments(parser):
subparsers = parser.add_subparsers(help='Manage dockyard aliases', dest='dockyard_command')
parser_set_help = 'Set dockyard alias'
parser_set = subparsers.add_parser('set', help=parser_set_help, description=parser_set_help)
parser_set.add_argument('name', help='Name of the dockyard alias')
parser_set.add_argument('address', help='ip[:port] of the dockyard')
parser_set.add_argument('--user', help='user')
parser_set.add_argument('--password', help='password')
parser_set.set_defaults(dockyard_func=command_dockyard_set)
subparsers.add_parser(parser_set)
parser_list_help = 'List dockyard aliases'
parser_list = subparsers.add_parser('list', help=parser_list_help, description=parser_list_help)
parser_list.set_defaults(dockyard_func=command_dockyard_list)
subparsers.add_parser(parser_list)
parser_remove_help = 'Delete dockyard alias'
parser_remove = subparsers.add_parser('delete', help=parser_remove_help, description=parser_remove_help)
parser_remove.add_argument('name', help='Name of the dockyard alias')
parser_remove.set_defaults(dockyard_func=command_dockyard_remove)
subparsers.add_parser(parser_remove)
parser_default_help = 'Get or set default alias'
parser_default = subparsers.add_parser('default', help=parser_default_help, description=parser_default_help)
parser_default.add_argument('name', help='Name of the dockyard alias', nargs='?')
parser_default.set_defaults(dockyard_func=command_dockyard_default)
subparsers.add_parser(parser_default)
def command_dockyard(args):
args.dockyard_func(args)
def command_dockyard_set(args):
warning_header = " Warning!\n Your dockyard alias has been set BUT:"
alias.set_alias(args.name, args.address, args.user, args.password)
try:
dockyard = dockyard_factory(args.address, args.user, args.password)
if dockyard.is_http():
print_http_dockyard_unavailability_warning(args.address, args.name, warning_header)
except (DockyardFactoryException, DockyardDetectionException) as e:
print_err('{}\n{}'.format(warning_header, e))
def command_dockyard_list(args):
output_header = ['Default', 'Alias', 'Address', 'User', 'Password']
output_rows = [output_header]
alias_list = alias.get_list()
for alias_dict in alias_list:
default_string = '->'.rjust(len(output_header[0])) if alias_dict['is_default'] else ''
row = [default_string, alias_dict['name'], alias_dict['address'], alias_dict.get('user', ''),
_hide_password(alias_dict.get('password', ''))]
output_rows.append(row)
print_table(output_rows)
def _hide_password(password):
return '****' if password else ''
def command_dockyard_remove(args):
alias.remove_alias(args.name)
def command_dockyard_default(args):
if args.name:
alias.set_default(args.name)
else:
default_alias = alias.get_default()
if default_alias:
print('Default alias for dockyard is: {default_alias}'.format(**locals()))
else:
print('No default alias set')
| 42.096386
| 115
| 0.747853
|
8e33c1fc3074544bc5bf07d94c110c60efe8fdab
| 395
|
py
|
Python
|
compileio/asgi.py
|
unownone/Online_Compiler
|
b10f147f25210c4daad987a8cee30f5185e2d332
|
[
"MIT"
] | 2
|
2021-07-23T14:01:14.000Z
|
2021-07-23T14:47:08.000Z
|
compileio/asgi.py
|
unownone/Online_Compiler
|
b10f147f25210c4daad987a8cee30f5185e2d332
|
[
"MIT"
] | null | null | null |
compileio/asgi.py
|
unownone/Online_Compiler
|
b10f147f25210c4daad987a8cee30f5185e2d332
|
[
"MIT"
] | 1
|
2021-11-01T06:12:44.000Z
|
2021-11-01T06:12:44.000Z
|
"""
ASGI config for compileio project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'compileio.settings')
application = get_asgi_application()
| 23.235294
| 78
| 0.787342
|
50e51bddced908231d1cadef0abe1ae69e87cad2
| 292
|
py
|
Python
|
scrapy_test/scrapy_test/pipelines.py
|
czl0325/suningbook_scrapy
|
50458c3e7108fcc42132aaba13b29e9a6b8b659b
|
[
"Apache-2.0"
] | 2
|
2021-03-26T05:18:44.000Z
|
2021-03-31T14:14:11.000Z
|
scrapy_test/scrapy_test/pipelines.py
|
czl0325/suningbook_scrapy
|
50458c3e7108fcc42132aaba13b29e9a6b8b659b
|
[
"Apache-2.0"
] | 1
|
2021-08-31T08:44:49.000Z
|
2021-08-31T08:44:49.000Z
|
scrapy_test/scrapy_test/pipelines.py
|
czl0325/suningbook_scrapy
|
50458c3e7108fcc42132aaba13b29e9a6b8b659b
|
[
"Apache-2.0"
] | 1
|
2021-03-26T05:18:46.000Z
|
2021-03-26T05:18:46.000Z
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class ScrapyTestPipeline(object):
def process_item(self, item, spider):
return item
| 24.333333
| 66
| 0.715753
|
1bd507090dc843d5192abbf4b80122af877137d0
| 4,539
|
py
|
Python
|
pastepwn/analyzers/tests/emailpasswordpairanalyzer_test.py
|
robotboyfriend/pastepwn
|
ca6dd87afd053b5032857eb0615a947c3b9dfad9
|
[
"MIT"
] | 113
|
2018-09-06T22:14:52.000Z
|
2022-02-17T01:32:29.000Z
|
pastepwn/analyzers/tests/emailpasswordpairanalyzer_test.py
|
robotboyfriend/pastepwn
|
ca6dd87afd053b5032857eb0615a947c3b9dfad9
|
[
"MIT"
] | 199
|
2018-09-15T22:17:58.000Z
|
2022-01-23T23:45:09.000Z
|
pastepwn/analyzers/tests/emailpasswordpairanalyzer_test.py
|
robotboyfriend/pastepwn
|
ca6dd87afd053b5032857eb0615a947c3b9dfad9
|
[
"MIT"
] | 88
|
2018-09-09T13:02:06.000Z
|
2022-01-23T22:56:09.000Z
|
# -*- coding: utf-8 -*-
import unittest
from unittest import mock
from pastepwn.analyzers.emailpasswordpairanalyzer import EmailPasswordPairAnalyzer
class TestEmailPasswordPairAnalyzer(unittest.TestCase):
def setUp(self):
self.paste = mock.Mock()
def test_positive(self):
"""Test single matches in a paste"""
analyzer = EmailPasswordPairAnalyzer(None)
self.paste.body = "estocanam2@gmail.com:Firebird1@"
self.assertTrue(analyzer.match(self.paste))
analyzer = EmailPasswordPairAnalyzer(None)
self.paste.body = "test+test@gmail.com:abcd"
self.assertTrue(analyzer.match(self.paste))
analyzer = EmailPasswordPairAnalyzer(None)
self.paste.body = "estocanam2@gmail.com:aq12ws"
self.assertTrue(analyzer.match(self.paste))
analyzer = EmailPasswordPairAnalyzer(None)
self.paste.body = "estocanam2@apple.com:Fireb§"
self.assertTrue(analyzer.match(self.paste))
analyzer = EmailPasswordPairAnalyzer(None)
self.paste.body = "g@bb.com:Firebird1@"
self.assertTrue(analyzer.match(self.paste))
def test_negative(self):
"""Tests if it does not match on wrong strings"""
analyzer = EmailPasswordPairAnalyzer(None)
self.paste.body = "This is a Test"
self.assertFalse(analyzer.match(self.paste))
analyzer = EmailPasswordPairAnalyzer(None)
self.paste.body = "{a: 'b'}"
self.assertFalse(analyzer.match(self.paste))
analyzer = EmailPasswordPairAnalyzer(None)
self.paste.body = ""
self.assertFalse(analyzer.match(self.paste))
analyzer = EmailPasswordPairAnalyzer(None)
self.paste.body = "\t\n"
self.assertFalse(analyzer.match(self.paste))
analyzer = EmailPasswordPairAnalyzer(None)
self.paste.body = "\n\n"
self.assertFalse(analyzer.match(self.paste))
def test_match_multiple(self):
"""Test multiple matches in a single paste"""
analyzer = EmailPasswordPairAnalyzer(None)
self.paste.body = "estocanam2@apple.com:Fireb§\n" \
"g@bb.com:Firebird1@\n" \
"Some comment\n" \
"test+test@gmail.com:abcd"
match = analyzer.match(self.paste)
self.assertTrue(match)
self.assertEqual("estocanam2@apple.com:Fireb§", match[0])
self.assertEqual("g@bb.com:Firebird1@", match[1])
self.assertEqual("test+test@gmail.com:abcd", match[2])
def test_min_match(self):
"""Test if the setting for minimal matches works"""
analyzer = EmailPasswordPairAnalyzer(None, min_amount=1)
self.paste.body = "estocanam2@apple.com:Fireb§\n" \
"g@bb.com:Firebird1@\n" \
"Some comment\n" \
"test+test@gmail.com:abcd"
self.assertEqual(3, len(analyzer.match(self.paste)))
self.assertTrue(analyzer.match(self.paste))
analyzer = EmailPasswordPairAnalyzer(None, min_amount=3)
self.assertTrue(analyzer.match(self.paste))
self.assertEqual(3, len(analyzer.match(self.paste)))
analyzer = EmailPasswordPairAnalyzer(None, min_amount=4)
self.assertFalse(analyzer.match(self.paste))
self.assertEqual(bool, type(analyzer.match(self.paste)))
def test_intext(self):
"""Test if matches inside text are recognized"""
analyzer = EmailPasswordPairAnalyzer(None)
self.paste.body = "An email/pw combo estocanam2@apple.com:Fireb§ inside a text"
match = analyzer.match(self.paste)
self.assertTrue(match)
self.assertEqual("estocanam2@apple.com:Fireb§", match[0])
analyzer = EmailPasswordPairAnalyzer(None)
self.paste.body = "also middle inside\nof test+test@gmail.com:abcd of a string!"
match = analyzer.match(self.paste)
self.assertTrue(match)
self.assertEqual("test+test@gmail.com:abcd", match[0])
def test_multiple(self):
"""Test if multiple matches are recognized"""
analyzer = EmailPasswordPairAnalyzer(None)
self.paste.body = "An email/pw combo estocanam2@apple.com:Fireb§ and also another inside\nof test+test@gmail.com:abcd of a string!"
match = analyzer.match(self.paste)
self.assertTrue(match)
self.assertEqual("estocanam2@apple.com:Fireb§", match[0])
self.assertEqual("test+test@gmail.com:abcd", match[1])
if __name__ == "__main__":
unittest.main()
| 40.168142
| 139
| 0.649042
|
64c23121b9356b636e9e6c1557b1d8f26f3bb4d4
| 17,456
|
py
|
Python
|
blender/arm/logicnode/arm_nodes.py
|
SunDaw/armory
|
84663bc981a26287cbac0aa60d73b139a0308b78
|
[
"Zlib"
] | null | null | null |
blender/arm/logicnode/arm_nodes.py
|
SunDaw/armory
|
84663bc981a26287cbac0aa60d73b139a0308b78
|
[
"Zlib"
] | null | null | null |
blender/arm/logicnode/arm_nodes.py
|
SunDaw/armory
|
84663bc981a26287cbac0aa60d73b139a0308b78
|
[
"Zlib"
] | null | null | null |
import itertools
from collections import OrderedDict
from typing import Any, Generator, List, Optional, Type, Dict
from typing import OrderedDict as ODict # Prevent naming conflicts
import bpy.types
from bpy.props import *
from nodeitems_utils import NodeItem
# Pass NodeReplacment forward to individual node modules that import arm_nodes
from arm.logicnode.replacement import NodeReplacement
import arm.node_utils
# When passed as a category to add_node(), this will use the capitalized
# name of the package of the node as the category to make renaming
# categories easier.
PKG_AS_CATEGORY = "__pkgcat__"
nodes = []
category_items: ODict[str, List['ArmNodeCategory']] = OrderedDict()
array_nodes = dict()
class ArmLogicTreeNode(bpy.types.Node):
arm_category = PKG_AS_CATEGORY
arm_section = 'default'
arm_is_obsolete = False
def init(self, context):
# make sure a given node knows the version of the NodeClass from when it was created
if isinstance(type(self).arm_version, int):
self.arm_version = type(self).arm_version
else:
self.arm_version = 1
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'ArmLogicTreeType'
@classmethod
def on_register(cls):
"""Don't call this method register() as it will be triggered before Blender registers the class, resulting in
a double registration."""
add_node(cls, cls.arm_category, cls.arm_section, cls.arm_is_obsolete)
@classmethod
def on_unregister(cls):
pass
def get_replacement_node(self, node_tree: bpy.types.NodeTree):
# needs to be overridden by individual node classes with arm_version>1
"""(only called if the node's version is inferior to the node class's version)
Help with the node replacement process, by explaining how a node (`self`) should be replaced.
This method can either return a NodeReplacement object (see `nodes_logic.py`), or a brand new node.
If a new node is returned, then the following needs to be already set:
- the node's links to the other nodes
- the node's properties
- the node inputs's default values
If more than one node need to be created (for example, if an input needs a type conversion after update),
please return all the nodes in a list.
please raise a LookupError specifically when the node's version isn't handled by the function.
note that the lowest 'defined' version should be 1. if the node's version is 0, it means that it has been saved before versioning was a thing.
NODES OF VERSION 1 AND VERSION 0 SHOULD HAVE THE SAME CONTENTS
"""
if self.arm_version == 0 and type(self).arm_version == 1:
# In case someone doesn't implement this function, but the node has version 0
return NodeReplacement.Identity(self)
else:
raise LookupError(f"the current node class {repr(type(self)):s} does not implement get_replacement_node() even though it has updated")
def add_input(self, socket_type: str, socket_name: str, default_value: Any = None, is_var: bool = False) -> bpy.types.NodeSocket:
"""Adds a new input socket to the node.
If `is_var` is true, a dot is placed inside the socket to denote
that this socket can be used for variable access (see
SetVariable node).
"""
socket = self.inputs.new(socket_type, socket_name)
if default_value is not None:
socket.default_value = default_value
if is_var and not socket.display_shape.endswith('_DOT'):
socket.display_shape += '_DOT'
return socket
def add_output(self, socket_type: str, socket_name: str, default_value: Any = None, is_var: bool = False) -> bpy.types.NodeSocket:
"""Adds a new output socket to the node.
If `is_var` is true, a dot is placed inside the socket to denote
that this socket can be used for variable access (see
SetVariable node).
"""
socket = self.outputs.new(socket_type, socket_name)
if default_value is not None:
socket.default_value = default_value
if is_var and not socket.display_shape.endswith('_DOT'):
socket.display_shape += '_DOT'
return socket
class ArmNodeAddInputButton(bpy.types.Operator):
"""Add a new input socket to the node set by node_index."""
bl_idname = 'arm.node_add_input'
bl_label = 'Add Input'
node_index: StringProperty(name='Node Index', default='')
socket_type: StringProperty(name='Socket Type', default='NodeSocketShader')
name_format: StringProperty(name='Name Format', default='Input {0}')
index_name_offset: IntProperty(name='Index Name Offset', default=0)
def execute(self, context):
global array_nodes
inps = array_nodes[self.node_index].inputs
inps.new(self.socket_type, self.name_format.format(str(len(inps) + self.index_name_offset)))
# Reset to default again for subsequent calls of this operator
self.node_index = ''
self.socket_type = 'NodeSocketShader'
self.name_format = 'Input {0}'
self.index_name_offset = 0
return{'FINISHED'}
class ArmNodeAddInputValueButton(bpy.types.Operator):
"""Add new input"""
bl_idname = 'arm.node_add_input_value'
bl_label = 'Add Input'
node_index: StringProperty(name='Node Index', default='')
socket_type: StringProperty(name='Socket Type', default='NodeSocketShader')
def execute(self, context):
global array_nodes
inps = array_nodes[self.node_index].inputs
inps.new(self.socket_type, 'Value')
return{'FINISHED'}
class ArmNodeRemoveInputButton(bpy.types.Operator):
"""Remove last input"""
bl_idname = 'arm.node_remove_input'
bl_label = 'Remove Input'
node_index: StringProperty(name='Node Index', default='')
def execute(self, context):
global array_nodes
node = array_nodes[self.node_index]
inps = node.inputs
min_inps = 0 if not hasattr(node, 'min_inputs') else node.min_inputs
if len(inps) > min_inps:
inps.remove(inps.values()[-1])
return{'FINISHED'}
class ArmNodeRemoveInputValueButton(bpy.types.Operator):
"""Remove last input"""
bl_idname = 'arm.node_remove_input_value'
bl_label = 'Remove Input'
node_index: StringProperty(name='Node Index', default='')
def execute(self, context):
global array_nodes
node = array_nodes[self.node_index]
inps = node.inputs
min_inps = 0 if not hasattr(node, 'min_inputs') else node.min_inputs
if len(inps) > min_inps and inps[-1].name == 'Value':
inps.remove(inps.values()[-1])
return{'FINISHED'}
class ArmNodeAddOutputButton(bpy.types.Operator):
"""Add a new output socket to the node set by node_index"""
bl_idname = 'arm.node_add_output'
bl_label = 'Add Output'
node_index: StringProperty(name='Node Index', default='')
socket_type: StringProperty(name='Socket Type', default='NodeSocketShader')
name_format: StringProperty(name='Name Format', default='Output {0}')
index_name_offset: IntProperty(name='Index Name Offset', default=0)
def execute(self, context):
global array_nodes
outs = array_nodes[self.node_index].outputs
outs.new(self.socket_type, self.name_format.format(str(len(outs) + self.index_name_offset)))
# Reset to default again for subsequent calls of this operator
self.node_index = ''
self.socket_type = 'NodeSocketShader'
self.name_format = 'Output {0}'
self.index_name_offset = 0
return{'FINISHED'}
class ArmNodeRemoveOutputButton(bpy.types.Operator):
"""Remove last output"""
bl_idname = 'arm.node_remove_output'
bl_label = 'Remove Output'
node_index: StringProperty(name='Node Index', default='')
def execute(self, context):
global array_nodes
node = array_nodes[self.node_index]
outs = node.outputs
min_outs = 0 if not hasattr(node, 'min_outputs') else node.min_outputs
if len(outs) > min_outs:
outs.remove(outs.values()[-1])
return{'FINISHED'}
class ArmNodeAddInputOutputButton(bpy.types.Operator):
"""Add new input and output"""
bl_idname = 'arm.node_add_input_output'
bl_label = 'Add Input Output'
node_index: StringProperty(name='Node Index', default='')
in_socket_type: StringProperty(name='In Socket Type', default='NodeSocketShader')
out_socket_type: StringProperty(name='Out Socket Type', default='NodeSocketShader')
in_name_format: StringProperty(name='In Name Format', default='Input {0}')
out_name_format: StringProperty(name='Out Name Format', default='Output {0}')
in_index_name_offset: IntProperty(name='Index Name Offset', default=0)
def execute(self, context):
global array_nodes
node = array_nodes[self.node_index]
inps = node.inputs
outs = node.outputs
inps.new(self.in_socket_type, self.in_name_format.format(str(len(inps) + self.in_index_name_offset)))
outs.new(self.out_socket_type, self.out_name_format.format(str(len(outs))))
# Reset to default again for subsequent calls of this operator
self.node_index = ''
self.in_socket_type = 'NodeSocketShader'
self.out_socket_type = 'NodeSocketShader'
self.in_name_format = 'Input {0}'
self.out_name_format = 'Output {0}'
self.in_index_name_offset = 0
return{'FINISHED'}
class ArmNodeRemoveInputOutputButton(bpy.types.Operator):
"""Remove last input and output"""
bl_idname = 'arm.node_remove_input_output'
bl_label = 'Remove Input Output'
node_index: StringProperty(name='Node Index', default='')
def execute(self, context):
global array_nodes
node = array_nodes[self.node_index]
inps = node.inputs
outs = node.outputs
min_inps = 0 if not hasattr(node, 'min_inputs') else node.min_inputs
min_outs = 0 if not hasattr(node, 'min_outputs') else node.min_outputs
if len(inps) > min_inps:
inps.remove(inps.values()[-1])
if len(outs) > min_outs:
outs.remove(outs.values()[-1])
return{'FINISHED'}
class ArmNodeSearch(bpy.types.Operator):
bl_idname = "arm.node_search"
bl_label = "Search..."
bl_options = {"REGISTER"}
bl_property = "item"
def get_search_items(self, context):
items = []
for node in get_all_nodes():
items.append((node.nodetype, node.label, ""))
return items
item: EnumProperty(items=get_search_items)
@classmethod
def poll(cls, context):
return context.space_data.tree_type == 'ArmLogicTreeType' and context.space_data.edit_tree
@classmethod
def description(cls, context, properties):
if cls.poll(context):
return "Search for a logic node"
else:
return "Search for a logic node. This operator is not available" \
" without an active node tree"
def invoke(self, context, event):
context.window_manager.invoke_search_popup(self)
return {"CANCELLED"}
def execute(self, context):
"""Called when a node is added."""
bpy.ops.node.add_node('INVOKE_DEFAULT', type=self.item, use_transform=True)
return {"FINISHED"}
class ArmNodeCategory:
"""Represents a category (=directory) of logic nodes."""
def __init__(self, name: str, icon: str, description: str):
self.name = name
self.icon = icon
self.description = description
self.node_sections: ODict[str, List[NodeItem]] = OrderedDict()
self.deprecated_nodes: List[NodeItem] = []
def register_node(self, node_type: Type[bpy.types.Node], node_section: str) -> None:
"""Registers a node to this category so that it will be
displayed int the `Add node` menu."""
self.add_node_section(node_section)
self.node_sections[node_section].append(arm.node_utils.nodetype_to_nodeitem(node_type))
def register_deprecated_node(self, node_type: Type[bpy.types.Node]) -> None:
if hasattr(node_type, 'arm_is_obsolete') and node_type.arm_is_obsolete:
self.deprecated_nodes.append(arm.node_utils.nodetype_to_nodeitem(node_type))
def get_all_nodes(self) -> Generator[NodeItem, None, None]:
"""Returns all nodes that are registered into this category."""
yield from itertools.chain(*self.node_sections.values())
def add_node_section(self, name: str):
"""Adds a node section to this category."""
if name not in self.node_sections:
self.node_sections[name] = []
def sort_nodes(self):
for node_section in self.node_sections:
self.node_sections[node_section] = sorted(self.node_sections[node_section], key=lambda item: item.label)
def category_exists(name: str) -> bool:
for category_section in category_items:
for c in category_items[category_section]:
if c.name == name:
return True
return False
def get_category(name: str) -> Optional[ArmNodeCategory]:
for category_section in category_items:
for c in category_items[category_section]:
if c.name == name:
return c
return None
def get_all_categories() -> Generator[ArmNodeCategory, None, None]:
for section_categories in category_items.values():
yield from itertools.chain(section_categories)
def get_all_nodes() -> Generator[NodeItem, None, None]:
for category in get_all_categories():
yield from itertools.chain(category.get_all_nodes())
def add_category_section(name: str) -> None:
"""Adds a section of categories to the node menu to group multiple
categories visually together. The given name only acts as an ID and
is not displayed in the user inferface."""
global category_items
if name not in category_items:
category_items[name] = []
def add_node_section(name: str, category: str) -> None:
"""Adds a section of nodes to the sub menu of the given category to
group multiple nodes visually together. The given name only acts as
an ID and is not displayed in the user inferface."""
node_category = get_category(category)
if node_category is not None:
node_category.add_node_section(name)
def add_category(category: str, section: str = 'default', icon: str = 'BLANK1', description: str = '') -> Optional[ArmNodeCategory]:
"""Adds a category of nodes to the node menu."""
global category_items
add_category_section(section)
if not category_exists(category):
node_category = ArmNodeCategory(category, icon, description)
category_items[section].append(node_category)
return node_category
return None
def add_node(node_type: Type[bpy.types.Node], category: str, section: str = 'default', is_obsolete: bool = False) -> None:
"""
Registers a node to the given category. If no section is given, the
node is put into the default section that does always exist.
Warning: Make sure that this function is not called multiple times per node!
"""
global nodes
if category == PKG_AS_CATEGORY:
category = node_type.__module__.rsplit('.', 2)[-2].capitalize()
nodes.append(node_type)
node_category = get_category(category)
if node_category is None:
node_category = add_category(category)
if is_obsolete:
# We need the obsolete nodes to be registered in order to have them replaced,
# but do not add them to the menu.
if node_category is not None:
# Make the deprecated nodes available for documentation purposes
node_category.register_deprecated_node(node_type)
return
node_category.register_node(node_type, section)
node_type.bl_icon = node_category.icon
def deprecated(*alternatives: str, message=""):
"""Class decorator to deprecate logic node classes. You can pass multiple string
arguments with the names of the available alternatives as well as a message
(keyword-param only) with further information about the deprecation."""
def wrapper(cls: ArmLogicTreeNode) -> ArmLogicTreeNode:
cls.bl_label += ' (Deprecated)'
cls.bl_description = f'Deprecated. {cls.bl_description}'
cls.bl_icon = 'ERROR'
cls.arm_is_obsolete = True
if cls.__doc__ is None:
cls.__doc__ = ''
if len(alternatives) > 0:
cls.__doc__ += '\n' + f'@deprecated {",".join(alternatives)}: {message}'
else:
cls.__doc__ += '\n' + f'@deprecated : {message}'
return cls
return wrapper
def reset_globals():
global nodes
global category_items
nodes = []
category_items = OrderedDict()
bpy.utils.register_class(ArmNodeSearch)
bpy.utils.register_class(ArmNodeAddInputButton)
bpy.utils.register_class(ArmNodeAddInputValueButton)
bpy.utils.register_class(ArmNodeRemoveInputButton)
bpy.utils.register_class(ArmNodeRemoveInputValueButton)
bpy.utils.register_class(ArmNodeAddOutputButton)
bpy.utils.register_class(ArmNodeRemoveOutputButton)
bpy.utils.register_class(ArmNodeAddInputOutputButton)
bpy.utils.register_class(ArmNodeRemoveInputOutputButton)
| 37.62069
| 150
| 0.68412
|
638c462b3f387ba03a8ac9c251f8e8cb3b3397b5
| 7,308
|
py
|
Python
|
RL_brain.py
|
TissueC/DQN-mountain-car
|
6a9c3b271c79579a966bf91f2995b0e31fc59f17
|
[
"MIT"
] | null | null | null |
RL_brain.py
|
TissueC/DQN-mountain-car
|
6a9c3b271c79579a966bf91f2995b0e31fc59f17
|
[
"MIT"
] | null | null | null |
RL_brain.py
|
TissueC/DQN-mountain-car
|
6a9c3b271c79579a966bf91f2995b0e31fc59f17
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
# Deep Q Network off-policy
class DeepQNetwork:
def __init__(
self,
n_actions,
n_features,
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=500,
memory_size=3000,
batch_size=32,
e_greedy_increment=None,
output_graph=False,
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
self.epsilon_increment = e_greedy_increment
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
# total learning step
self.learn_step_counter = 0
# initialize zero memory [s, a, r, s_]
self.memory = np.zeros((self.memory_size, n_features * 2 + 2))
# consist of [target_net, evaluate_net]
self._build_net()
t_params = tf.get_collection('target_net_params')
e_params = tf.get_collection('eval_net_params')
self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]
self.sess = tf.Session()
if output_graph:
# $ tensorboard --logdir=logs
# tf.train.SummaryWriter soon be deprecated, use following
tf.summary.FileWriter("logs/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
self.cost_his = []
def _build_net(self):
# ------------------ build evaluate_net ------------------
self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # input
self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # for calculating loss
with tf.variable_scope('eval_net'):
# c_names(collections_names) are the collections to store variables
c_names, n_l1, w_initializer, b_initializer = \
['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 10, \
tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers
# first layer. collections is used later when assign to target net
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1)
# second layer. collections is used later when assign to target net
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_eval = tf.matmul(l1, w2) + b2
with tf.variable_scope('loss'):
self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))
with tf.variable_scope('train'):
self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
# ------------------ build target_net ------------------
self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # input
with tf.variable_scope('target_net'):
# c_names(collections_names) are the collections to store variables
c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]
# first layer. collections is used later when assign to target net
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1)
# second layer. collections is used later when assign to target net
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_next = tf.matmul(l1, w2) + b2
def store_transition(self, s, a, r, s_):
if not hasattr(self, 'memory_counter'):
self.memory_counter = 0
transition = np.hstack((s, [a, r], s_))
# replace the old memory with new memory
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition
self.memory_counter += 1
def choose_action(self, observation):
# to have batch dimension when feed into tf placeholder
observation = observation[np.newaxis, :]
if np.random.uniform() < self.epsilon:
# forward feed the observation and get q value for every actions
actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
action = np.argmax(actions_value)
else:
action = np.random.randint(0, self.n_actions)
return action
def learn(self):
# check to replace target parameters
if self.learn_step_counter % self.replace_target_iter == 0:
self.sess.run(self.replace_target_op)
print('target_params_replaced')
# sample batch memory from all memory
if self.memory_counter > self.memory_size:
sample_index = np.random.choice(self.memory_size, size=self.batch_size)
else:
sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
batch_memory = self.memory[sample_index, :]
q_next, q_eval = self.sess.run(
[self.q_next, self.q_eval],
feed_dict={
self.s_: batch_memory[:, -self.n_features:], # fixed params
self.s: batch_memory[:, :self.n_features], # newest params
})
# change q_target w.r.t q_eval's action
q_target = q_eval.copy()
batch_index = np.arange(self.batch_size, dtype=np.int32)
eval_act_index = batch_memory[:, self.n_features].astype(int)
reward = batch_memory[:, self.n_features + 1]
q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1)
# train eval network
_, self.cost = self.sess.run([self._train_op, self.loss],
feed_dict={self.s: batch_memory[:, :self.n_features],
self.q_target: q_target})
self.cost_his.append(self.cost)
# increasing epsilon
self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
self.learn_step_counter += 1
def plot_cost(self):
import matplotlib.pyplot as plt
plt.plot(np.arange(len(self.cost_his)), self.cost_his)
plt.ylabel('Cost')
plt.xlabel('Training steps')
plt.show()
| 42.988235
| 117
| 0.613027
|
0b55ad849d5bb331da2a4c5ee95359b4b760b608
| 473
|
py
|
Python
|
setup.py
|
greyltc/pywrap
|
07f85fc5d1c05a56d4121c9daa369e842b1c5ac3
|
[
"Apache-2.0"
] | 1
|
2018-09-11T00:46:10.000Z
|
2018-09-11T00:46:10.000Z
|
setup.py
|
CadQuery/cpp-py-bindgen
|
66e7376d3a27444393fc99acbdbef40bbc7031ae
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
CadQuery/cpp-py-bindgen
|
66e7376d3a27444393fc99acbdbef40bbc7031ae
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
setup(
name='pywrap',
version="0.1dev",
entry_points={'console_scripts': 'pywrap = bindgen.__main__:main'},
packages=['bindgen'],
include_package_data = True,
install_requires=[
'click',
'logzero',
'path',
'clang',
'toml',
'pandas',
'joblib',
'tqdm',
'jinja2',
'toposort',
'pyparsing',
'pybind11',
'schema'
]
)
| 18.92
| 71
| 0.501057
|
f0e65c626f36a15c41863aa26aa4997515ae1ba0
| 4,413
|
py
|
Python
|
website/util/sanitize.py
|
erichilarysmithsr/osf.io
|
c92f7994ec26642f5ca2ead8248536f6855a6d17
|
[
"Apache-2.0"
] | null | null | null |
website/util/sanitize.py
|
erichilarysmithsr/osf.io
|
c92f7994ec26642f5ca2ead8248536f6855a6d17
|
[
"Apache-2.0"
] | 7
|
2021-03-02T01:33:58.000Z
|
2022-03-03T23:18:17.000Z
|
website/util/sanitize.py
|
erichilarysmithsr/osf.io
|
c92f7994ec26642f5ca2ead8248536f6855a6d17
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import collections
import json
import bleach
def strip_html(unclean, tags=None):
"""Sanitize a string, removing (as opposed to escaping) HTML tags
:param unclean: A string to be stripped of HTML tags
:return: stripped string
:rtype: str
"""
if not tags:
tags = []
# We make this noop for non-string, non-collection inputs so this function can be used with higher-order
# functions, such as rapply (recursively applies a function to collections)
if not isinstance(unclean, basestring) and not is_iterable(unclean) and unclean is not None:
return unclean
return bleach.clean(unclean, strip=True, tags=tags, attributes=[], styles=[])
# TODO: Not used anywhere except unit tests? Review for deletion
def clean_tag(data):
"""Format as a valid Tag
:param data: A string to be cleaned
:return: cleaned string
:rtype: str
"""
# TODO: make this a method of Tag?
return escape_html(data).replace('"', '"').replace("'", ''')
def is_iterable(obj):
return isinstance(obj, collections.Iterable)
def is_iterable_but_not_string(obj):
"""Return True if ``obj`` is an iterable object that isn't a string."""
return (is_iterable(obj) and not hasattr(obj, 'strip'))
def escape_html(data):
"""Escape HTML characters in data (as opposed to stripping them out entirely). Will ignore whitelisted tags.
:param data: A string, dict, or list to clean of HTML characters
:return: A cleaned object
:rtype: str or list or dict
"""
if isinstance(data, dict):
return {
key: escape_html(value)
for (key, value) in data.iteritems()
}
if is_iterable_but_not_string(data):
return [
escape_html(value)
for value in data
]
if isinstance(data, basestring):
return bleach.clean(data)
return data
# FIXME: Doesn't raise either type of exception expected, and can probably be deleted along with sole use
def assert_clean(data):
"""Ensure that data is cleaned
:raise: AssertionError
"""
def _ensure_clean(value):
if value != bleach.clean(value):
raise ValueError
return escape_html(data)
# TODO: Remove unescape_entities when mako html safe comes in
def unescape_entities(value, safe=None):
"""
Convert HTML-encoded data (stored in the database) to literal characters.
Intended primarily for endpoints consumed by frameworks that handle their own escaping (eg Knockout)
:param value: A string, dict, or list
:param safe: A dict of escape sequences and characters that can be used to extend the set of
characters that this function will unescape. Use with caution as there are few cases in which
there will be reason to unescape characters beyond '&'.
:return: A string or list or dict without html escape characters
"""
safe_characters = {
'&': '&',
}
if safe and isinstance(safe, dict):
safe_characters.update(safe)
if isinstance(value, dict):
return {
key: unescape_entities(value, safe=safe_characters)
for (key, value) in value.iteritems()
}
if is_iterable_but_not_string(value):
return [
unescape_entities(each, safe=safe_characters)
for each in value
]
if isinstance(value, basestring):
for escape_sequence, character in safe_characters.items():
value = value.replace(escape_sequence, character)
return value
return value
def temp_ampersand_fixer(s):
"""As a workaround for ampersands stored as escape sequences in database, unescape text before use on a safe page
Explicitly differentiate from safe_unescape_html in case use cases/behaviors diverge
"""
return s.replace('&', '&')
def safe_json(value):
"""
Dump a string to JSON in a manner that can be used for JS strings in mako templates.
Providing additional forward-slash escaping to prevent injection of closing markup in strings. See:
http://benalpert.com/2012/08/03/preventing-xss-json.html
:param value: A string to be converted
:return: A JSON-formatted string that explicitly escapes forward slashes when needed
"""
return json.dumps(value).replace('</', '<\\/') # Fix injection of closing markup in strings
| 31.521429
| 117
| 0.673918
|
5afdaecfd3deae62e17b704d472032f8ee43591f
| 1,437
|
py
|
Python
|
src/app/addr_modify.py
|
jack139/fair
|
fe0ff64f8edbd794c3fb951ab6af420054e9e585
|
[
"BSD-3-Clause"
] | 1
|
2019-07-16T09:46:39.000Z
|
2019-07-16T09:46:39.000Z
|
src/app/addr_modify.py
|
jack139/fair
|
fe0ff64f8edbd794c3fb951ab6af420054e9e585
|
[
"BSD-3-Clause"
] | null | null | null |
src/app/addr_modify.py
|
jack139/fair
|
fe0ff64f8edbd794c3fb951ab6af420054e9e585
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import web, json, time
from config import setting
import app_helper
db = setting.db_web
url = ('/app/addr_modify')
# 修改收货地址
class handler:
def POST(self):
web.header('Content-Type', 'application/json')
param = web.input(app_id='', session='', addr_id='', name='', tel='', addr='', sign='')
if '' in (param.app_id, param.session, param.addr_id,
param.name, param.tel, param.addr, param.sign):
return json.dumps({'ret' : -2, 'msg' : '参数错误'})
uname = app_helper.logged(param.session) # 检查session登录
if uname:
#验证签名
md5_str = app_helper.generate_sign([param.app_id, param.session,
param.addr_id, param.name, param.tel, param.addr])
if md5_str!=param.sign:
return json.dumps({'ret' : -1, 'msg' : '签名验证错误'})
# 需要判断地址是否有对应门店,否则不在送货范围内
# app_helper.check_address()
# 查找并修改收货地址
r = db.app_user.find_one({'uname':uname}, {'address' : 1})
new_addr = []
for i in r['address']:
if i[0]==param.addr_id:
new_addr.append((
param.addr_id,
param.name.strip(),
param.tel.strip(),
param.addr.strip(),
int(time.time())
))
else:
new_addr.append(i)
r = db.app_user.update_one({'uname':uname}, {'$set' : {'address' : new_addr}})
# 返回
return json.dumps({'ret' : 0, 'data' : {
'addr_id' : param.addr_id,
}})
else:
return json.dumps({'ret' : -4, 'msg' : '无效的session'})
| 25.210526
| 89
| 0.60682
|
21b7c3e58634d91a092fe4ffddb4194990d56179
| 228
|
py
|
Python
|
hands/system_management/doctype/supplier_evaluation_template/test_supplier_evaluation_template.py
|
gchartas/hands
|
480032a6399ee4730585f85469a048b2c3fa6163
|
[
"MIT"
] | null | null | null |
hands/system_management/doctype/supplier_evaluation_template/test_supplier_evaluation_template.py
|
gchartas/hands
|
480032a6399ee4730585f85469a048b2c3fa6163
|
[
"MIT"
] | null | null | null |
hands/system_management/doctype/supplier_evaluation_template/test_supplier_evaluation_template.py
|
gchartas/hands
|
480032a6399ee4730585f85469a048b2c3fa6163
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, EngLandGR and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestSupplierEvaluationTemplate(unittest.TestCase):
pass
| 20.727273
| 56
| 0.780702
|
63704136645501997e12d527d47ab80a98b11e14
| 1,464
|
py
|
Python
|
SpeedPunk.roboFontExt/lib/SpeedPunkTool.py
|
yanone/speedpunk
|
d92c147dd269168f5ca046aa3687d4ca7d63782b
|
[
"Apache-2.0"
] | 41
|
2019-12-01T21:04:05.000Z
|
2021-11-14T18:13:06.000Z
|
SpeedPunk.roboFontExt/lib/SpeedPunkTool.py
|
yanone/speedpunk
|
d92c147dd269168f5ca046aa3687d4ca7d63782b
|
[
"Apache-2.0"
] | 7
|
2019-12-17T14:36:55.000Z
|
2022-02-10T18:23:40.000Z
|
SpeedPunk.roboFontExt/lib/SpeedPunkTool.py
|
yanone/speedpunk
|
d92c147dd269168f5ca046aa3687d4ca7d63782b
|
[
"Apache-2.0"
] | 5
|
2019-12-21T15:27:35.000Z
|
2021-04-29T18:33:30.000Z
|
##########################################################################################
#
# Speed Punk
# Visualisation tool of outline curvature for font editors.
#
# Distributed under Apache 2.0 license
#
##########################################################################################
import traceback
from AppKit import NSLog
try:
from mojo.events import installTool, EditingTool
from deYanoneRoboFontSpeedpunk import speedpunklib
from mojo.extensions import ExtensionBundle
bundle = ExtensionBundle("SpeedPunk")
################################################################################################################
class SpeedPunkTool(EditingTool):
def becomeActive(self):
self.speedpunklib = speedpunklib.SpeedPunkLib()
self.speedpunklib.tool = self
self.speedpunklib.Open()
def becomeInactive(self):
self.speedpunklib.Close()
def drawBackground(self, scale):
try:
if self.getGlyph() != None:
self.speedpunklib.UpdateGlyph(self.getGlyph())
except:
NSLog('Speed Punk:\n%s' % traceback.format_exc())
def glyphWindowWillClose(self, a):
self.speedpunklib.Close()
def glyphWindowDidOpen(self, a):
self.speedpunklib.Open()
def getToolbarTip(self):
return "Speed Punk"
def getToolbarIcon(self):
NSImage = bundle.getResourceImage("toolbar")
if NSImage:
return NSImage
installTool(SpeedPunkTool())
except:
NSLog('Speed Punk:\n%s' % traceback.format_exc())
| 24.813559
| 113
| 0.588115
|
6924f5894d43e4c793cc0ea4522ac03cd4fbaaaa
| 268
|
py
|
Python
|
python/dazl/ledger/config/exc.py
|
DACH-NY/dazl-client
|
56c8b1be047415b2bcb35b6558de4a780a402458
|
[
"Apache-2.0"
] | null | null | null |
python/dazl/ledger/config/exc.py
|
DACH-NY/dazl-client
|
56c8b1be047415b2bcb35b6558de4a780a402458
|
[
"Apache-2.0"
] | null | null | null |
python/dazl/ledger/config/exc.py
|
DACH-NY/dazl-client
|
56c8b1be047415b2bcb35b6558de4a780a402458
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017-2022 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
__all__ = ["ConfigError", "ConfigWarning"]
class ConfigError(ValueError):
pass
class ConfigWarning(Warning):
pass
| 20.615385
| 102
| 0.742537
|
0efe60be64e69ce3cd9c038ad240a0a19dbe365c
| 117,230
|
py
|
Python
|
postman/tests.py
|
Boondockers-Welcome/django-postman
|
22818ea019ffaa28014f4a7d5b3365720ccdeb6b
|
[
"BSD-3-Clause"
] | null | null | null |
postman/tests.py
|
Boondockers-Welcome/django-postman
|
22818ea019ffaa28014f4a7d5b3365720ccdeb6b
|
[
"BSD-3-Clause"
] | null | null | null |
postman/tests.py
|
Boondockers-Welcome/django-postman
|
22818ea019ffaa28014f4a7d5b3365720ccdeb6b
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Test suite.
- Do not put 'mailer' in INSTALLED_APPS, it disturbs the emails counting.
- Make sure these templates are accessible:
registration/login.html
base.html
404.html
To have a fast test session, set a minimal configuration as:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': ':memory:', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
# 'django.contrib.sites', # is optional
'django.contrib.admin',
# 'dj_pagination', # has to be before postman ; or use the mock
# 'ajax_select', # is an option
# 'notification', # is an option
'postman',
)
"""
from __future__ import unicode_literals
import copy
from datetime import datetime, timedelta
import re
import sys
from django import VERSION
from django.conf import settings
from django.contrib.auth import get_user_model, REDIRECT_FIELD_NAME
from django.contrib.auth.models import AnonymousUser
from django.contrib.sites.models import Site
from django.core import mail
from django.core.exceptions import ValidationError
from django.core.management import call_command
if VERSION < (1, 10):
from django.core.urlresolvers import clear_url_caches, get_resolver, get_urlconf, resolve, reverse
else:
from django.urls import clear_url_caches, get_resolver, get_urlconf, resolve, reverse
from django.db.models import Q
from django.http import QueryDict
from django.template import Context, Template, TemplateDoesNotExist, TemplateSyntaxError
from django.test import TestCase, TransactionTestCase
if VERSION >= (1, 10):
from django.test import override_settings
from django.utils.encoding import force_text
from django.utils.formats import localize
from django.utils import six
from django.utils.six import StringIO
from django.utils.six.moves import reload_module
from django.utils.timezone import localtime, now
from django.utils.translation import activate
from .api import pm_broadcast, pm_write
# because of reload()'s, do "from postman.fields import CommaSeparatedUserField" just before needs
# because of reload()'s, do "from postman.forms import xxForm" just before needs
from .models import OPTION_MESSAGES, ORDER_BY_KEY, ORDER_BY_MAPPER, Message, PendingMessage,\
STATUS_PENDING, STATUS_ACCEPTED, STATUS_REJECTED,\
get_order_by, get_user_representation, get_user_name
# because of reload()'s, do "from postman.utils import notification" just before needs
from .utils import format_body, format_subject, email, email_visitor, notify_user
# added for 1.8, for the client side, to supersede the default language set as soon as the creation of auth's permissions,
# initiated via a post_migrate signal.
activate('en')
class GenericTest(TestCase):
"""
Usual generic tests.
"""
def test_version(self):
self.assertEqual(sys.modules['postman'].__version__, "3.6.2.post1")
class TransactionViewTest(TransactionTestCase):
"""
Test some transactional behavior.
Can't use Django TestCase class, because it has a special treament for commit/rollback to speed up the database resetting.
"""
if VERSION < (1, 10):
urls = 'postman.urls_for_tests'
def setUp(self):
settings.LANGUAGE_CODE = 'en' # do not bother about translation ; needed for the server side
self.user1 = get_user_model().objects.create_user('foo', 'foo@domain.com', 'pass')
self.user2 = get_user_model().objects.create_user('bar', 'bar@domain.com', 'pass')
for a in (
'POSTMAN_NAME_USER_AS',
):
if hasattr(settings, a):
delattr(settings, a)
def test(self):
"Test possible clash between transaction.commit_on_success and transaction.atomic (Django 1.6)."
url = reverse('postman:write')
data = {'recipients': self.user2.get_username(), 'subject': 's'}
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.post(url, data)
self.assertTrue(Message.objects.get())
if VERSION >= (1, 10):
TransactionViewTest = override_settings(ROOT_URLCONF='postman.urls_for_tests')(TransactionViewTest)
class BaseTest(TestCase):
"""
Common configuration and helper functions for all tests.
"""
if VERSION < (1, 10): # see comments about the decoration below
urls = 'postman.urls_for_tests'
def setUp(self):
settings.LANGUAGE_CODE = 'en' # do not bother about translation ; needed for the server side
for a in (
'POSTMAN_DISALLOW_ANONYMOUS',
'POSTMAN_DISALLOW_MULTIRECIPIENTS',
'POSTMAN_DISALLOW_COPIES_ON_REPLY',
'POSTMAN_DISABLE_USER_EMAILING',
'POSTMAN_FROM_EMAIL',
'POSTMAN_AUTO_MODERATE_AS',
'POSTMAN_NOTIFIER_APP',
'POSTMAN_SHOW_USER_AS',
'POSTMAN_NAME_USER_AS',
'POSTMAN_QUICKREPLY_QUOTE_BODY',
):
if hasattr(settings, a):
delattr(settings, a)
settings.POSTMAN_MAILER_APP = None
settings.POSTMAN_AUTOCOMPLETER_APP = {
'arg_default': 'postman_single_as1-1', # no default, mandatory to enable the feature
}
self.reload_modules()
self.user1 = get_user_model().objects.create_user('foo', 'foo@domain.com', 'pass')
self.user2 = get_user_model().objects.create_user('bar', 'bar@domain.com', 'pass')
self.user3 = get_user_model().objects.create_user('baz', 'baz@domain.com', 'pass')
self.email = 'qux@domain.com'
def check_now(self, dt):
"Check that a date is now. Well... almost."
delta = dt - now()
seconds = delta.days * (24*60*60) + delta.seconds
self.assertTrue(-2 <= seconds <= 1) # -1 is not enough for Mysql
def check_status(self, m, status=STATUS_PENDING, is_new=True, is_replied=False, parent=None, thread=None,
moderation_date=False, moderation_by=None, moderation_reason='',
sender_archived=False, recipient_archived=False,
sender_deleted_at=False, recipient_deleted_at=False):
"Check a bunch of properties of a message."
self.assertEqual(m.is_pending(), status==STATUS_PENDING)
self.assertEqual(m.is_rejected(), status==STATUS_REJECTED)
self.assertEqual(m.is_accepted(), status==STATUS_ACCEPTED)
self.assertEqual(m.is_new, is_new)
self.assertEqual(m.is_replied, is_replied)
self.check_now(m.sent_at)
self.assertEqual(m.parent, parent)
self.assertEqual(m.thread, thread)
self.assertEqual(m.sender_archived, sender_archived)
self.assertEqual(m.recipient_archived, recipient_archived)
if sender_deleted_at:
if isinstance(sender_deleted_at, datetime):
self.assertEqual(m.sender_deleted_at, sender_deleted_at)
else:
self.assertNotEqual(m.sender_deleted_at, None)
else:
self.assertEqual(m.sender_deleted_at, None)
if recipient_deleted_at:
if isinstance(recipient_deleted_at, datetime):
self.assertEqual(m.recipient_deleted_at, recipient_deleted_at)
else:
self.assertNotEqual(m.recipient_deleted_at, None)
else:
self.assertEqual(m.recipient_deleted_at, None)
if moderation_date:
if isinstance(moderation_date, datetime):
self.assertEqual(m.moderation_date, moderation_date)
else:
self.assertNotEqual(m.moderation_date, None)
else:
self.assertEqual(m.moderation_date, None)
self.assertEqual(m.moderation_by, moderation_by)
self.assertEqual(m.moderation_reason, moderation_reason)
def create(self, *args, **kwargs):
"Create a message."
kwargs.update(subject='s')
return Message.objects.create(*args, **kwargs)
def create_accepted(self, *args, **kwargs):
"Create a message with a default status as 'accepted'."
kwargs.setdefault('moderation_status', STATUS_ACCEPTED)
return self.create(*args, **kwargs)
# set of message creations
def c12(self, *args, **kwargs):
kwargs.update(sender=self.user1, recipient=self.user2)
return self.create_accepted(*args, **kwargs)
def c13(self, *args, **kwargs):
kwargs.update(sender=self.user1, recipient=self.user3)
return self.create_accepted(*args, **kwargs)
def c21(self, *args, **kwargs):
kwargs.update(sender=self.user2, recipient=self.user1)
return self.create_accepted(*args, **kwargs)
def c23(self, *args, **kwargs):
kwargs.update(sender=self.user2, recipient=self.user3)
return self.create_accepted(*args, **kwargs)
def c32(self, *args, **kwargs):
kwargs.update(sender=self.user3, recipient=self.user2)
return self.create_accepted(*args, **kwargs)
def reload_modules(self):
"Reload some modules after a change in settings."
clear_url_caches()
try:
reload_module(sys.modules['postman.utils'])
reload_module(sys.modules['postman.fields'])
reload_module(sys.modules['postman.forms'])
reload_module(sys.modules['postman.views'])
except KeyError: # happens once at the setUp
pass
reload_module(get_resolver(get_urlconf()).urlconf_module)
# test_template() fails with the decorated way ; ticket/26427, fixed in 1.10a1
if VERSION >= (1, 10):
BaseTest = override_settings(ROOT_URLCONF='postman.urls_for_tests')(BaseTest)
class ViewTest(BaseTest):
"""
Test the views.
"""
def test_home(self):
response = self.client.get('/messages/')
self.assertRedirects(response, reverse('postman:inbox'), status_code=301, target_status_code=302)
def check_folder(self, folder):
url = reverse('postman:' + folder, args=[OPTION_MESSAGES])
template = "postman/{0}.html".format(folder)
# anonymous
response = self.client.get(url)
self.assertRedirects(response, "{0}?{1}={2}".format(settings.LOGIN_URL, REDIRECT_FIELD_NAME, url))
# authenticated
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.get(url)
self.assertTemplateUsed(response, template)
url = reverse('postman:' + folder)
response = self.client.get(url)
self.assertTemplateUsed(response, template)
def test_inbox(self):
self.check_folder('inbox')
def test_sent(self):
self.check_folder('sent')
def test_archives(self):
self.check_folder('archives')
def test_trash(self):
self.check_folder('trash')
def test_i18n_urls(self):
"Test the POSTMAN_I18N_URLS setting."
settings.POSTMAN_I18N_URLS = True
self.reload_modules()
activate('fr')
url = reverse('postman:inbox') # do not test all urls, one is enough for proof
self.assertEqual(url, '/messages/re%C3%A7us/')
# reset, otherwise 'postman:inbox' keeps its lazy translation and the following test_inbox will fail
settings.POSTMAN_I18N_URLS = False
self.reload_modules()
def check_template(self, action, args):
# don't want to bother with additional templates; test only the parameter passing
url = reverse('postman:' + action + '_template', args=args)
self.assertRaises(TemplateDoesNotExist, self.client.get, url)
def test_template(self):
"Test the 'template_name' parameter."
m1 = self.c12()
m1.read_at, m1.thread = now(), m1
m2 = self.c21(parent=m1, thread=m1.thread)
m1.replied_at = m2.sent_at; m1.save()
self.assertTrue(self.client.login(username='foo', password='pass'))
for actions, args in [
(('inbox', 'sent', 'archives', 'trash', 'write'), []),
(('view', 'view_conversation'), [m1.pk]),
(('reply',), [m2.pk]),
]:
for action in actions:
self.check_template(action, args)
def test_write_authentication(self):
"Test permission and what template & form are used."
url = reverse('postman:write')
template = "postman/write.html"
# anonymous is allowed
response = self.client.get(url)
self.assertTemplateUsed(response, template)
from postman.forms import AnonymousWriteForm
self.assertTrue(isinstance(response.context['form'], AnonymousWriteForm))
# anonymous is not allowed
settings.POSTMAN_DISALLOW_ANONYMOUS = True
self.reload_modules()
response = self.client.get(url)
self.assertRedirects(response, "{0}?{1}={2}".format(settings.LOGIN_URL, REDIRECT_FIELD_NAME, url))
# authenticated
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.get(url)
self.assertTemplateUsed(response, template)
from postman.forms import WriteForm
self.assertTrue(isinstance(response.context['form'], WriteForm))
def test_write_recipient(self):
"Test the passing of recipient names in URL."
template = "postman/write.html"
url = reverse('postman:write', args=['foo'])
response = self.client.get(url)
self.assertContains(response, 'value="foo"')
url = reverse('postman:write', args=['foo:bar'])
response = self.client.get(url)
self.assertContains(response, 'value="bar, foo"')
url = reverse('postman:write', args=[':foo::intruder:bar:a-b+c@d.com:foo:'])
response = self.client.get(url)
self.assertContains(response, 'value="bar, foo"')
# because of Custom User Model, do allow almost any character, not only '^[\w.@+-]+$' of the legacy django.contrib.auth.User model
get_user_model().objects.create_user("Le Créac'h", 'foobar@domain.com', 'pass') # even: space, accentued, quotes
url = reverse('postman:write', args=["Le Créac'h"])
response = self.client.get(url)
self.assertContains(response, 'value="Le Créac'h"')
settings.POSTMAN_NAME_USER_AS = 'id' # test int values compliance with processings using str.join()
url = reverse('postman:write', args=['1:2'])
response = self.client.get(url)
self.assertContains(response, 'value="1, 2"')
def test_write_auto_complete(self):
"Test the 'autocomplete_channels' parameter."
url = reverse('postman:write_auto_complete')
# anonymous
response = self.client.get(url)
f = response.context['form'].fields['recipients']
if hasattr(f, 'channel'): # app may not be in INSTALLED_APPS
self.assertEqual(f.channel, 'postman_single_as1-1')
# authenticated
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.get(url)
f = response.context['form'].fields['recipients']
if hasattr(f, 'channel'):
self.assertEqual(f.channel, 'postman_multiple_as1-1')
def check_init_by_query_string(self, action, args=[]):
template = "postman/{0}.html".format(action)
url = reverse('postman:' + action, args=args)
response = self.client.get(url + '?subject=that%20is%20the%20subject')
self.assertContains(response, 'value="that is the subject"')
response = self.client.get(url + '?body=this%20is%20my%20body')
# before Django 1.5: 'name="body">this is my body' ; after: 'name="body">\r\nthis is my body'
self.assertContains(response, 'this is my body</textarea>')
def test_write_querystring(self):
"Test the prefilling by query string."
self.check_init_by_query_string('write')
def check_message(self, m, is_anonymous=False, subject='s', body='b', recipient_username='bar'):
"Check some message properties, status, and that no mail is sent."
self.assertEqual(m.subject, subject)
self.assertEqual(m.body, body)
self.assertEqual(m.email, 'a@b.com' if is_anonymous else '')
self.assertEqual(m.sender, self.user1 if not is_anonymous else None)
self.assertEqual(m.recipient.get_username(), recipient_username)
if is_anonymous:
self.check_status(m, sender_deleted_at=True)
self.assertEqual(len(mail.outbox), 0)
def check_contrib_messages(self, response, text):
if 'messages' in response.context: # contrib\messages\context_processors.py may be not there
messages = response.context['messages']
if messages != []: # contrib\messages\middleware.py may be not there
self.assertEqual(len(messages), 1)
for message in messages: # can only be iterated
self.assertEqual(str(message), text)
def check_write_post(self, extra={}, is_anonymous=False):
"Check message generation, redirection, and mandatory fields."
url = reverse('postman:write')
url_with_success_url = reverse('postman:write_with_success_url_to_sent')
data = {'recipients': self.user2.get_username(), 'subject': 's', 'body': 'b'}
data.update(extra)
# default redirect is to the requestor page
response = self.client.post(url, data, HTTP_REFERER=url, follow=True)
self.assertRedirects(response, url)
self.check_contrib_messages(response, 'Message successfully sent.') # no such check for the following posts, one is enough
m = Message.objects.get()
pk = m.pk
self.check_message(m, is_anonymous)
# fallback redirect is to inbox. So redirect again when login is required
response = self.client.post(url, data)
self.assertRedirects(response, reverse('postman:inbox'), target_status_code=302 if is_anonymous else 200)
self.check_message(Message.objects.get(pk=pk+1), is_anonymous)
# redirect url may be superseded
response = self.client.post(url_with_success_url, data, HTTP_REFERER=url)
self.assertRedirects(response, reverse('postman:sent'), target_status_code=302 if is_anonymous else 200)
self.check_message(Message.objects.get(pk=pk+2), is_anonymous)
# query string has highest precedence ; scheme and domain are silently ignored
response = self.client.post(url_with_success_url + '?next=any://any.tld' + url, data, HTTP_REFERER='does not matter')
self.assertRedirects(response, url)
self.check_message(Message.objects.get(pk=pk+3), is_anonymous)
for f in data.keys():
if f in ('body',): continue
d = data.copy()
del d[f]
response = self.client.post(url, d, HTTP_REFERER=url)
self.assertFormError(response, 'form', f, 'This field is required.')
def test_write_post_anonymous(self):
self.check_write_post({'email': 'a@b.com'}, is_anonymous=True)
def test_write_post_authenticated(self):
self.assertTrue(self.client.login(username='foo', password='pass'))
self.check_write_post()
def test_write_post_multirecipient(self):
"Test number of recipients constraint."
from postman.fields import CommaSeparatedUserField
url = reverse('postman:write')
data = {
'email': 'a@b.com', 'subject': 's', 'body': 'b',
'recipients': '{0}, {1}'.format(self.user2.get_username(), self.user3.get_username())}
# anonymous
response = self.client.post(url, data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', CommaSeparatedUserField.default_error_messages['max'].format(limit_value=1, show_value=2))
# authenticated
self.assertTrue(self.client.login(username='foo', password='pass'))
del data['email']
response = self.client.post(url, data, HTTP_REFERER=url)
self.assertRedirects(response, url)
msgs = list(Message.objects.all())
self.check_message(msgs[0], recipient_username='baz')
self.check_message(msgs[1])
url_with_max = reverse('postman:write_with_max')
response = self.client.post(url_with_max, data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', CommaSeparatedUserField.default_error_messages['max'].format(limit_value=1, show_value=2))
settings.POSTMAN_DISALLOW_MULTIRECIPIENTS = True
response = self.client.post(url, data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', CommaSeparatedUserField.default_error_messages['max'].format(limit_value=1, show_value=2))
def test_write_post_filters(self):
"Test user- and exchange- filters."
url = reverse('postman:write')
data = {
'subject': 's', 'body': 'b',
'recipients': '{0}, {1}'.format(self.user2.get_username(), self.user3.get_username())}
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.post(reverse('postman:write_with_user_filter_reason'), data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', "Some usernames are rejected: bar (some reason).")
response = self.client.post(reverse('postman:write_with_user_filter_no_reason'), data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', "Some usernames are rejected: bar, baz.")
response = self.client.post(reverse('postman:write_with_user_filter_false'), data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', "Some usernames are rejected: bar, baz.")
response = self.client.post(reverse('postman:write_with_user_filter_exception'), data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', ['first good reason',"anyway, I don't like bar"])
response = self.client.post(reverse('postman:write_with_exch_filter_reason'), data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', "Writing to some users is not possible: bar (some reason).")
response = self.client.post(reverse('postman:write_with_exch_filter_no_reason'), data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', "Writing to some users is not possible: bar, baz.")
response = self.client.post(reverse('postman:write_with_exch_filter_false'), data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', "Writing to some users is not possible: bar, baz.")
response = self.client.post(reverse('postman:write_with_exch_filter_exception'), data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', ['first good reason',"anyway, I don't like bar"])
def test_write_post_moderate(self):
"Test 'auto_moderators' parameter."
url = reverse('postman:write')
data = {'subject': 's', 'body': 'b', 'recipients': self.user2.get_username()}
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.post(reverse('postman:write_moderate'), data, HTTP_REFERER=url, follow=True)
self.assertRedirects(response, url)
self.check_contrib_messages(response, 'Message rejected for at least one recipient.')
self.check_status(Message.objects.get(), status=STATUS_REJECTED, recipient_deleted_at=True,
moderation_date=True, moderation_reason="some reason")
def test_write_notification(self):
"Test the fallback for the site name in the generation of a notification, when the django.contrib.sites app is not installed."
settings.POSTMAN_AUTO_MODERATE_AS = True # will generate an acceptance notification
url = reverse('postman:write')
data = {'subject': 's', 'body': 'b', 'recipients': self.user2.get_username()}
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.post(url, data, HTTP_REFERER=url)
self.assertRedirects(response, url)
self.check_status(Message.objects.get(), status=STATUS_ACCEPTED, moderation_date=True)
self.assertEqual(len(mail.outbox), 1)
# can't use get_current_site(response.request) because response.request is not an HttpRequest and doesn't have a get_host attribute
if Site._meta.installed:
sitename = Site.objects.get_current().name
else:
sitename = "testserver" # the SERVER_NAME environment variable is not accessible here
self.assertTrue(sitename in mail.outbox[0].subject)
def test_reply_authentication(self):
"Test permission and what template & form are used."
template = "postman/reply.html"
pk = self.c21(body="this is my body").pk
url = reverse('postman:reply', args=[pk])
# anonymous
response = self.client.get(url)
self.assertRedirects(response, "{0}?{1}={2}".format(settings.LOGIN_URL, REDIRECT_FIELD_NAME, url))
# authenticated
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.get(url)
self.assertTemplateUsed(response, template)
from postman.forms import FullReplyForm
self.assertTrue(isinstance(response.context['form'], FullReplyForm))
self.assertContains(response, 'value="Re: s"')
self.assertContains(response, '\n\nbar wrote:\n> this is my body\n</textarea>')
self.assertEqual(response.context['recipient'], 'bar')
settings.POSTMAN_QUICKREPLY_QUOTE_BODY = True # no influence here, acts only for Quick Reply
self.reload_modules()
response = self.client.get(url)
self.assertContains(response, 'value="Re: s"')
self.assertContains(response, '\n\nbar wrote:\n> this is my body\n</textarea>')
def test_reply_formatters(self):
"Test the 'formatters' parameter."
template = "postman/reply.html"
pk = self.c21(body="this is my body").pk
url = reverse('postman:reply_formatters', args=[pk])
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.get(url)
self.assertTemplateUsed(response, template)
self.assertContains(response, 'value="Re_ s"')
self.assertContains(response, 'bar _ this is my body</textarea>') # POSTMAN_QUICKREPLY_QUOTE_BODY setting is not involved
def test_reply_auto_complete(self):
"Test the 'autocomplete_channel' parameter."
pk = self.c21().pk
url = reverse('postman:reply_auto_complete', args=[pk])
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.get(url)
f = response.context['form'].fields['recipients']
if hasattr(f, 'channel'):
self.assertEqual(f.channel, 'postman_multiple_as1-1')
def check_404(self, view_name, pk):
"Return is a 404 page."
url = reverse(view_name, args=[pk])
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def check_reply_404(self, pk):
self.check_404('postman:reply', pk)
def test_reply_id(self):
"Test all sort of failures."
self.assertTrue(self.client.login(username='foo', password='pass'))
# invalid message id
self.check_reply_404(1000)
# existent message but you are the sender, not the recipient
self.check_reply_404(Message.objects.get(pk=self.c12().pk).pk) # create & verify really there
# existent message but not yours at all
self.check_reply_404(Message.objects.get(pk=self.c23().pk).pk)
# existent message but not yet visible to you
self.check_reply_404(Message.objects.get(pk=self.create(sender=self.user2, recipient=self.user1).pk).pk)
# cannot reply to a deleted message
self.check_reply_404(Message.objects.get(pk=self.c21(recipient_deleted_at=now()).pk).pk)
def test_reply_querystring(self):
"Test the prefilling by query string."
self.assertTrue(self.client.login(username='foo', password='pass'))
self.check_init_by_query_string('reply', [self.c21().pk])
def test_reply_post(self):
"Test message generation and redirection."
pk = self.c21().pk
url = reverse('postman:reply', args=[pk])
url_with_success_url = reverse('postman:reply_with_success_url_to_sent', args=[pk])
data = {'subject': 's', 'body': 'b'}
self.assertTrue(self.client.login(username='foo', password='pass'))
# default redirect is to the requestor page
response = self.client.post(url, data, HTTP_REFERER=url)
self.assertRedirects(response, url)
# the check_contrib_messages() in test_write_post() is enough
self.check_message(Message.objects.get(pk=pk+1))
# fallback redirect is to inbox
response = self.client.post(url, data)
self.assertRedirects(response, reverse('postman:inbox'))
self.check_message(Message.objects.get(pk=pk+2))
# redirect url may be superseded
response = self.client.post(url_with_success_url, data, HTTP_REFERER=url)
self.assertRedirects(response, reverse('postman:sent'))
self.check_message(Message.objects.get(pk=pk+3))
# query string has highest precedence ; scheme and domain are silently ignored
response = self.client.post(url_with_success_url + '?next=any://any.tld' + url, data, HTTP_REFERER='does not matter')
self.assertRedirects(response, url)
self.check_message(Message.objects.get(pk=pk+4))
# missing subject is valid, as in quick reply
response = self.client.post(url, {}, HTTP_REFERER=url)
self.assertRedirects(response, url)
self.check_message(Message.objects.get(pk=pk+5), subject='Re: s', body='')
def test_reply_post_copies(self):
"Test number of recipients constraint."
from postman.fields import CommaSeparatedUserField
pk = self.c21().pk
url = reverse('postman:reply', args=[pk])
data = {'subject': 's', 'body': 'b', 'recipients': self.user3.get_username()}
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.post(url, data, HTTP_REFERER=url)
self.assertRedirects(response, url)
self.check_message(Message.objects.get(pk=pk+1))
self.check_message(Message.objects.get(pk=pk+2), recipient_username='baz')
url_with_max = reverse('postman:reply_with_max', args=[pk])
data.update(recipients='{0}, {1}'.format(self.user2.get_username(), self.user3.get_username()))
response = self.client.post(url_with_max, data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', CommaSeparatedUserField.default_error_messages['max'].format(limit_value=1, show_value=2))
settings.POSTMAN_DISALLOW_COPIES_ON_REPLY = True
self.reload_modules()
response = self.client.post(url, data, HTTP_REFERER=url)
self.assertRedirects(response, url)
self.check_message(Message.objects.get(pk=pk+3))
self.assertRaises(Message.DoesNotExist, Message.objects.get, pk=pk+4)
def test_reply_post_filters(self):
"Test user- and exchange- filters."
pk = self.c21().pk
url = reverse('postman:reply', args=[pk])
data = {'subject': 's', 'body': 'b', 'recipients': '{0}, {1}'.format(self.user2.get_username(), self.user3.get_username())}
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.post(reverse('postman:reply_with_user_filter_reason', args=[pk]), data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', "Some usernames are rejected: bar (some reason).")
response = self.client.post(reverse('postman:reply_with_user_filter_no_reason', args=[pk]), data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', "Some usernames are rejected: bar, baz.")
response = self.client.post(reverse('postman:reply_with_user_filter_false', args=[pk]), data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', "Some usernames are rejected: bar, baz.")
response = self.client.post(reverse('postman:reply_with_user_filter_exception', args=[pk]), data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', ['first good reason',"anyway, I don't like bar"])
# filter is also applied to the implicit recipient
response = self.client.post(reverse('postman:reply_with_exch_filter_reason', args=[pk]), data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', "Writing to some users is not possible: bar (some reason).")
self.assertFormError(response, 'form', None, "Writing to some users is not possible: bar (some reason).")
response = self.client.post(reverse('postman:reply_with_exch_filter_no_reason', args=[pk]), data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', "Writing to some users is not possible: bar, baz.")
self.assertFormError(response, 'form', None, "Writing to some users is not possible: bar.")
response = self.client.post(reverse('postman:reply_with_exch_filter_false', args=[pk]), data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', "Writing to some users is not possible: bar, baz.")
self.assertFormError(response, 'form', None, "Writing to some users is not possible: bar.")
response = self.client.post(reverse('postman:reply_with_exch_filter_exception', args=[pk]), data, HTTP_REFERER=url)
self.assertFormError(response, 'form', 'recipients', ['first good reason',"anyway, I don't like bar"])
self.assertFormError(response, 'form', None, ['first good reason',"anyway, I don't like bar"])
def test_reply_post_moderate(self):
"Test 'auto_moderators' parameter."
m = self.c21()
pk = m.pk
url = reverse('postman:reply', args=[pk])
data = {'subject': 's', 'body': 'b'}
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.post(reverse('postman:reply_moderate', args=[pk]), data, HTTP_REFERER=url)
self.assertRedirects(response, url)
# the check_contrib_messages() in test_write_post_moderate() is enough
self.check_status(Message.objects.get(pk=pk+1), status=STATUS_REJECTED, recipient_deleted_at=True,
parent=m, thread=m,
moderation_date=True, moderation_reason="some reason")
def test_view_authentication(self):
"Test permission, what template and form are used, set-as-read."
template = "postman/view.html"
pk1 = self.c12().pk
pk2 = self.c21(body="this is my body").pk
url = reverse('postman:view', args=[pk1])
# anonymous
response = self.client.get(url)
self.assertRedirects(response, "{0}?{1}={2}".format(settings.LOGIN_URL, REDIRECT_FIELD_NAME, url))
# authenticated
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.get(url)
self.assertTemplateUsed(response, template)
self.assertFalse(response.context['archived'])
self.assertTrue(response.context['reply_to_pk'] is None)
self.assertTrue(response.context['form'] is None)
self.check_status(Message.objects.get(pk=pk1), status=STATUS_ACCEPTED)
url = reverse('postman:view', args=[pk2])
response = self.client.get(url)
self.assertFalse(response.context['archived'])
self.assertEqual(response.context['reply_to_pk'], pk2)
from postman.forms import QuickReplyForm
self.assertTrue(isinstance(response.context['form'], QuickReplyForm))
self.assertNotContains(response, 'value="Re: s"')
# before Django 1.11, in django\forms\widgets.py\Textarea : '>\r\n</textarea>'
# after, in django\forms\templates\django\forms\widgets\textarea.html : '>\n</textarea>'
self.assertContains(response, '\n</textarea>')
self.check_status(Message.objects.get(pk=pk2), status=STATUS_ACCEPTED, is_new=False)
settings.POSTMAN_QUICKREPLY_QUOTE_BODY = True
self.reload_modules()
response = self.client.get(url)
self.assertContains(response, '\n\nbar wrote:\n> this is my body\n</textarea>')
def test_view_formatters(self):
"Test the 'formatters' parameter."
template = "postman/view.html"
pk = self.c21(body="this is my body").pk
url = reverse('postman:view_formatters', args=[pk])
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.get(url)
self.assertTemplateUsed(response, template)
self.assertNotContains(response, 'value="Re_ s"')
self.assertContains(response, 'bar _ this is my body</textarea>') # POSTMAN_QUICKREPLY_QUOTE_BODY setting is not involved
def check_view_404(self, pk):
self.check_404('postman:view', pk)
def test_view_id(self):
"Test all sort of failures."
self.assertTrue(self.client.login(username='foo', password='pass'))
# invalid message id
self.check_view_404(1000)
# existent message but not yours
self.check_view_404(Message.objects.get(pk=self.c23().pk).pk) # create & verify really there
# existent message but not yet visible to you
self.check_view_404(Message.objects.get(pk=self.create(sender=self.user2, recipient=self.user1).pk).pk)
def test_view_conversation_authentication(self):
"Test permission, what template and form are used, number of messages in the conversation, set-as-read."
template = "postman/view.html"
m1 = self.c12()
m1.read_at, m1.thread = now(), m1
m2 = self.c21(parent=m1, thread=m1.thread, body="this is my body")
m1.replied_at = m2.sent_at; m1.save()
url = reverse('postman:view_conversation', args=[m1.pk])
self.check_status(Message.objects.get(pk=m1.pk), status=STATUS_ACCEPTED, is_new=False, is_replied=True, thread=m1)
# anonymous
response = self.client.get(url)
self.assertRedirects(response, "{0}?{1}={2}".format(settings.LOGIN_URL, REDIRECT_FIELD_NAME, url))
# authenticated
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.get(url)
self.assertTemplateUsed(response, template)
self.assertFalse(response.context['archived'])
self.assertEqual(response.context['reply_to_pk'], m2.pk)
from postman.forms import QuickReplyForm
self.assertTrue(isinstance(response.context['form'], QuickReplyForm))
self.assertNotContains(response, 'value="Re: s"')
self.assertContains(response, '\n</textarea>') # same comment as in test_view_authentication
self.assertEqual(len(response.context['pm_messages']), 2)
self.check_status(Message.objects.get(pk=m2.pk), status=STATUS_ACCEPTED, is_new=False, parent=m1, thread=m1)
settings.POSTMAN_QUICKREPLY_QUOTE_BODY = True
self.reload_modules()
response = self.client.get(url)
self.assertContains(response, '\n\nbar wrote:\n> this is my body\n</textarea>')
def check_view_conversation_404(self, thread_id):
self.check_404('postman:view_conversation', thread_id)
def test_view_conversation_id(self):
"Test all sort of failures."
self.assertTrue(self.client.login(username='foo', password='pass'))
# invalid conversation id
self.check_view_conversation_404(1000)
# existent conversation but not yours
m1 = self.c23()
m1.read_at, m1.thread = now(), m1
m2 = self.c32(parent=m1, thread=m1.thread)
m1.replied_at = m2.sent_at; m1.save()
self.check_view_conversation_404(m1.thread_id)
def test_view_conversation(self):
"Test message visibility."
m1 = self.c12()
m1.read_at, m1.thread = now(), m1
m1.save()
m2 = self.create(sender=self.user2, recipient=self.user1, parent=m1, thread=m1.thread)
url = reverse('postman:view_conversation', args=[m1.pk])
self.check_status(Message.objects.get(pk=m1.pk), status=STATUS_ACCEPTED, is_new=False, thread=m1)
# existent response but not yet visible to you
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.get(url)
self.assertEqual(len(response.context['pm_messages']), 1)
self.check_status(Message.objects.get(pk=m2.pk), parent=m1, thread=m1)
# complete view on the other side
self.assertTrue(self.client.login(username='bar', password='pass'))
response = self.client.get(url)
self.assertEqual(len(response.context['pm_messages']), 2)
def check_update(self, view_name, success_msg, field_bit, pk, field_value=None):
"Check permission, redirection, field updates, invalid cases."
# user1 user2 user3
# ----------- ----------- ----------- read repl
# arch del arch del arch del
# 1 ------>
# 1 <------
# 1 ------>
# -------------------->
# 1: initially set for the undelete test
url = reverse(view_name)
url_with_success_url = reverse(view_name + '_with_success_url_to_archives')
data = {'pks': (str(pk), str(pk+1), str(pk+2))}
m1 = Message.objects.get(pk=pk) # keep an original copy
# anonymous
response = self.client.post(url, data)
self.assertRedirects(response, "{0}?{1}={2}".format(settings.LOGIN_URL, REDIRECT_FIELD_NAME, url))
# authenticated
self.assertTrue(self.client.login(username='foo', password='pass'))
# default redirect is to the requestor page
redirect_url = reverse('postman:sent')
response = self.client.post(url, data, HTTP_REFERER=redirect_url, follow=True) # 'follow' to access contrib messages
self.assertRedirects(response, redirect_url)
self.check_contrib_messages(response, success_msg)
sender_kw = 'sender_{0}'.format(field_bit)
recipient_kw = 'recipient_{0}'.format(field_bit)
self.check_status(Message.objects.get(pk=pk), status=STATUS_ACCEPTED, **{sender_kw: field_value})
self.check_status(Message.objects.get(pk=pk+1), status=STATUS_ACCEPTED, **{recipient_kw: field_value})
self.check_status(Message.objects.get(pk=pk+2), status=STATUS_ACCEPTED, **{sender_kw: field_value})
self.check_status(Message.objects.get(pk=pk+3), status=STATUS_ACCEPTED)
# fallback redirect is to inbox
m1.save() # restoring one message is enough to avoid the error when all are archived|deleted|undeleted
response = self.client.post(url, data)
self.assertRedirects(response, reverse('postman:inbox'))
# redirect url may be superseded
m1.save()
response = self.client.post(url_with_success_url, data, HTTP_REFERER=redirect_url)
self.assertRedirects(response, reverse('postman:archives'))
# query string has highest precedence ; scheme and domain are silently ignored
m1.save()
response = self.client.post(url_with_success_url + '?next=any://any.tld' + redirect_url, data, HTTP_REFERER='does not matter')
self.assertRedirects(response, redirect_url)
# missing payload
response = self.client.post(url, follow=True)
self.assertRedirects(response, reverse('postman:inbox'))
self.check_contrib_messages(response, 'Select at least one object.')
# not a POST
response = self.client.get(url, data)
self.assertEqual(response.status_code, 405)
# not yours
self.assertTrue(self.client.login(username='baz', password='pass'))
response = self.client.post(url, data)
self.assertEqual(response.status_code, 404)
def check_update_conversation(self, view_name, root_msg, field_bit, field_value=None):
"Check redirection, field updates, invalid cases."
# user1 user2
# ----------- ----------- read repl
# arch del arch del
# 1 ------>| x x
# 1 <------|
# 1: initially set for the undelete test
url = reverse(view_name)
pk = root_msg.pk
data = {'tpks': str(pk)}
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.post(url, data)
self.assertRedirects(response, reverse('postman:inbox'))
# contrib.messages are already tested with check_update()
sender_kw = 'sender_{0}'.format(field_bit)
recipient_kw = 'recipient_{0}'.format(field_bit)
self.check_status(Message.objects.get(pk=pk), status=STATUS_ACCEPTED, is_new=False, is_replied=True, thread=root_msg, **{sender_kw: field_value})
self.check_status(Message.objects.get(pk=pk+1), status=STATUS_ACCEPTED, parent=root_msg, thread=root_msg, **{recipient_kw: field_value})
# missing payload
response = self.client.post(url)
self.assertRedirects(response, reverse('postman:inbox'))
# not a POST
response = self.client.get(url, data)
self.assertEqual(response.status_code, 405)
# not yours
self.assertTrue(self.client.login(username='baz', password='pass'))
response = self.client.post(url, data)
self.assertEqual(response.status_code, 404)
def test_archive(self):
"Test archive action on messages."
pk = self.c12().pk
self.c21()
self.c12()
self.c13()
self.check_update('postman:archive', 'Messages or conversations successfully archived.', 'archived', pk, True)
def test_archive_conversation(self):
"Test archive action on conversations."
m1 = self.c12()
m1.read_at, m1.thread = now(), m1
m2 = self.c21(parent=m1, thread=m1.thread)
m1.replied_at = m2.sent_at; m1.save()
self.check_update_conversation('postman:archive', m1, 'archived', True)
def test_delete(self):
"Test delete action on messages."
pk = self.c12().pk
self.c21()
self.c12()
self.c13()
self.check_update('postman:delete', 'Messages or conversations successfully deleted.', 'deleted_at', pk, True)
def test_delete_conversation(self):
"Test delete action on conversations."
m1 = self.c12()
m1.read_at, m1.thread = now(), m1
m2 = self.c21(parent=m1, thread=m1.thread)
m1.replied_at = m2.sent_at; m1.save()
self.check_update_conversation('postman:delete', m1, 'deleted_at', True)
def test_undelete(self):
"Test undelete action on messages."
pk = self.c12(sender_deleted_at=now()).pk
self.c21(recipient_deleted_at=now())
self.c12(sender_deleted_at=now())
self.c13()
self.check_update('postman:undelete', 'Messages or conversations successfully recovered.', 'deleted_at', pk)
def test_undelete_conversation(self):
"Test undelete action on conversations."
m1 = self.c12(sender_deleted_at=now())
m1.read_at, m1.thread = now(), m1
m2 = self.c21(parent=m1, thread=m1.thread, recipient_deleted_at=now())
m1.replied_at = m2.sent_at; m1.save()
self.check_update_conversation('postman:undelete', m1, 'deleted_at')
def check_read(self, view_name, success_msg, pk, field_value=True):
"Check permission, redirection, field updates, invalid cases."
# user1 user2
# ----------- ----------- read
# <------ 1
# ------>
# <------ 1
# <------
# 1: initially set for the unread test
url = reverse(view_name)
data = {'pks': (str(pk), str(pk+1), str(pk+2))}
# anonymous ; various redirects ; are already tested with check_update()
# not yours ; success but no changes
self.assertTrue(self.client.login(username='baz', password='pass'))
response = self.client.post(url, data, follow=True)
self.assertRedirects(response, reverse('postman:inbox'))
self.check_contrib_messages(response, success_msg)
self.check_status(Message.objects.get(pk=pk), status=STATUS_ACCEPTED, is_new=not field_value)
self.check_status(Message.objects.get(pk=pk+1), status=STATUS_ACCEPTED)
self.check_status(Message.objects.get(pk=pk+2), status=STATUS_ACCEPTED, is_new=not field_value)
self.check_status(Message.objects.get(pk=pk+3), status=STATUS_ACCEPTED)
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.post(url, data, follow=True)
self.assertRedirects(response, reverse('postman:inbox'))
self.check_contrib_messages(response, success_msg)
self.check_status(Message.objects.get(pk=pk), status=STATUS_ACCEPTED, is_new=field_value)
self.check_status(Message.objects.get(pk=pk+1), status=STATUS_ACCEPTED) # unchanged
self.check_status(Message.objects.get(pk=pk+2), status=STATUS_ACCEPTED, is_new=field_value)
self.check_status(Message.objects.get(pk=pk+3), status=STATUS_ACCEPTED) # unchanged
# missing payload
response = self.client.post(url, follow=True)
self.assertRedirects(response, reverse('postman:inbox'))
self.check_contrib_messages(response, 'Select at least one object.')
# not a POST
response = self.client.get(url, data)
self.assertEqual(response.status_code, 405)
def check_read_conversation(self, view_name, pk, field_value=True):
"Check redirection, field updates, invalid cases."
# user1 user2
# ----------- ----------- read
# |<------ 1
# |------>|
# <------| 1
# 1: initially set for the unread test
url = reverse(view_name)
data = {'tpks': str(pk)}
# not yours ; success but no changes
self.assertTrue(self.client.login(username='baz', password='pass'))
response = self.client.post(url, data)
self.assertRedirects(response, reverse('postman:inbox'))
m1 = Message.objects.get(pk=pk)
self.check_status(m1, status=STATUS_ACCEPTED, is_new=not field_value, is_replied=True, thread=m1)
m2 = Message.objects.get(pk=pk+1)
self.check_status(m2, status=STATUS_ACCEPTED, parent=m1, is_replied=True, thread=m1)
self.check_status(Message.objects.get(pk=pk+2), status=STATUS_ACCEPTED, is_new=not field_value, parent=m2, thread=m1)
self.assertTrue(self.client.login(username='foo', password='pass'))
response = self.client.post(url, data)
self.assertRedirects(response, reverse('postman:inbox'))
# contrib.messages are already tested with check_read()
m1 = Message.objects.get(pk=pk)
self.check_status(m1, status=STATUS_ACCEPTED, is_new=field_value, is_replied=True, thread=m1)
m2 = Message.objects.get(pk=pk+1)
self.check_status(m2, status=STATUS_ACCEPTED, parent=m1, is_replied=True, thread=m1)
self.check_status(Message.objects.get(pk=pk+2), status=STATUS_ACCEPTED, is_new=field_value, parent=m2, thread=m1)
# missing payload
response = self.client.post(url)
self.assertRedirects(response, reverse('postman:inbox'))
# not a POST
response = self.client.get(url, data)
self.assertEqual(response.status_code, 405)
def test_read(self):
"Test mark-as-read action on messages."
pk = self.c21().pk
self.c12()
self.c21()
self.c21()
self.check_read('postman:mark-read', 'Messages or conversations successfully marked as read.', pk, False)
def test_read_conversation(self):
"Test mark-as-read action on conversations."
m1 = self.c21()
m1.thread = m1
m2 = self.c12(parent=m1, thread=m1.thread)
m1.replied_at = m2.sent_at; m1.save()
m3 = self.c21(parent=m2, thread=m2.thread)
m2.replied_at = m3.sent_at; m2.save()
self.check_read_conversation('postman:mark-read', m1.pk, False)
def test_unread(self):
"Test mark-as-unread action on messages."
m1 = self.c21()
m1.read_at = now(); m1.save()
self.c12()
m3 = self.c21()
m3.read_at = now(); m3.save()
self.c21()
self.check_read('postman:mark-unread', 'Messages or conversations successfully marked as unread.', m1.pk, True)
def test_unread_conversation(self):
"Test mark-as-unread action on conversations."
m1 = self.c21()
m1.read_at = now(); m1.thread = m1
m2 = self.c12(parent=m1, thread=m1.thread)
m1.replied_at = m2.sent_at; m1.save()
m3 = self.c21(parent=m2, thread=m2.thread)
m2.replied_at = m3.sent_at; m2.save()
m3.read_at = now(); m3.save()
self.check_read_conversation('postman:mark-unread', m1.pk, True)
class FieldTest(BaseTest):
"""
Test the CommaSeparatedUserField.
"""
def test_label(self):
"Test the plural/singular of the label."
from postman.fields import CommaSeparatedUserField
f = CommaSeparatedUserField(label=('plural','singular'))
self.assertEqual(f.label, 'plural')
f.set_max(1)
self.assertEqual(f.label, 'singular')
f = CommaSeparatedUserField(label=('plural','singular'), max=1)
self.assertEqual(f.label, 'singular')
f.set_max(2)
self.assertEqual(f.label, 'plural')
f = CommaSeparatedUserField(label=('plural','singular'), max=2)
self.assertEqual(f.label, 'plural')
f.set_max(1)
self.assertEqual(f.label, 'singular')
def test_to_python(self):
"Test the conversion to a python list."
from postman.fields import CommaSeparatedUserField
f = CommaSeparatedUserField()
self.assertEqual(f.to_python(''), [])
self.assertEqual(f.to_python('foo'), ['foo'])
self.assertEqual(frozenset(f.to_python('foo, bar')), frozenset(['foo', 'bar']))
self.assertEqual(frozenset(f.to_python('foo, bar,baz')), frozenset(['foo', 'bar', 'baz']))
self.assertEqual(f.to_python(' foo , foo '), ['foo'])
self.assertEqual(frozenset(f.to_python('foo,, bar,')), frozenset(['foo', 'bar']))
self.assertEqual(frozenset(f.to_python(',foo, \t , bar')), frozenset(['foo', 'bar']))
def test_clean(self):
"Test the 'clean' validation."
from postman.fields import CommaSeparatedUserField
f = CommaSeparatedUserField(required=False)
self.assertEqual(f.clean(''), [])
self.assertEqual(f.clean('foo'), [self.user1])
self.assertEqual(frozenset(f.clean('foo, bar')), frozenset([self.user1, self.user2]))
# 'intruder' is not a username
self.assertRaises(ValidationError, f.clean, 'foo, intruder, bar')
# only active users are considered
self.user1.is_active = False
self.user1.save()
self.assertRaises(ValidationError, f.clean, 'foo, bar')
# int values compliance with processings using str.join()
settings.POSTMAN_NAME_USER_AS = 'id'
self.assertEqual(f.clean('2'), [self.user2])
def test_user_filter(self):
"Test the 'user_filter' argument."
from postman.fields import CommaSeparatedUserField
f = CommaSeparatedUserField(user_filter=lambda u: None)
self.assertEqual(frozenset(f.clean('foo, bar')), frozenset([self.user1, self.user2]))
# no reason
f = CommaSeparatedUserField(user_filter=lambda u: '' if u == self.user1 else None)
self.assertRaises(ValidationError, f.clean, 'foo, bar')
# with reason
f = CommaSeparatedUserField(user_filter=lambda u: 'some reason' if u == self.user1 else None)
self.assertRaises(ValidationError, f.clean, 'foo, bar')
def test_min(self):
"Test the 'min' argument."
from postman.fields import CommaSeparatedUserField
f = CommaSeparatedUserField(required=False, min=1)
self.assertEqual(f.clean(''), [])
f = CommaSeparatedUserField(min=1)
self.assertEqual(f.clean('foo'), [self.user1])
f = CommaSeparatedUserField(min=2)
self.assertEqual(frozenset(f.clean('foo, bar')), frozenset([self.user1, self.user2]))
self.assertRaises(ValidationError, f.clean, 'foo')
def test_max(self):
"Test the 'max' argument."
from postman.fields import CommaSeparatedUserField
f = CommaSeparatedUserField(max=1)
self.assertEqual(f.clean('foo'), [self.user1])
self.assertRaises(ValidationError, f.clean, 'foo, bar')
class MessageManagerTest(BaseTest):
"""
Test the Message manager.
"""
def test_num_queries(self):
"Test the number of queries."
pk = self.c12().pk
self.c21()
self.c12(sender_archived=True, recipient_deleted_at=now())
self.c21(sender_archived=True, recipient_deleted_at=now())
for u in (self.user1, self.user2):
with self.assertNumQueries(1):
msgs = list(Message.objects.sent(u, option=OPTION_MESSAGES))
user = msgs[0].recipient
with self.assertNumQueries(1):
msgs = list(Message.objects.inbox(u, option=OPTION_MESSAGES))
user = msgs[0].sender
with self.assertNumQueries(1):
msgs = list(Message.objects.archives(u, option=OPTION_MESSAGES))
user = msgs[0].sender
user = msgs[0].recipient
with self.assertNumQueries(1):
msgs = list(Message.objects.trash(u, option=OPTION_MESSAGES))
user = msgs[0].sender
user = msgs[0].recipient
with self.assertNumQueries(1):
msgs = list(Message.objects.thread(u, Q(pk=pk)))
user = msgs[0].sender
user = msgs[0].recipient
def test_query_compiler(self):
"Test our custom query compiler, in particular the right sequence of the sql parameters."
m = self.c12()
qs = Message.objects.inbox(self.user2) # important: do not use OPTION_MESSAGES
self.assertEqual(qs.count(), 1) # param '*', must stay at the beginning
self.assertListEqual(list(qs.filter(recipient_id=2)), [m]) # param 2, must stay at the end
def test(self):
"Test the manager."
# user1 user2
# ----------- ----------- read repl
# arch del arch del
# ---...
# ---X x
# ------>| x x
# |<------| x x
# |------>
# ------>
# ------> x
# <------
# ...---
# x X---
m1 = self.c12(moderation_status=STATUS_PENDING)
m2 = self.c12(moderation_status=STATUS_REJECTED, recipient_deleted_at=now())
m3 = self.c12()
m3.read_at, m3.thread = now(), m3
m4 = self.c21(parent=m3, thread=m3.thread)
m3.replied_at = m4.sent_at; m3.save()
m4.read_at = now()
m5 = self.c12(parent=m4, thread=m4.thread)
m4.replied_at = m5.sent_at; m4.save()
m6 = self.c12()
m7 = self.c12()
m7.read_at = now(); m7.save()
m8 = self.c21()
m9 = self.c21(moderation_status=STATUS_PENDING)
m10 = self.c21(moderation_status=STATUS_REJECTED, recipient_deleted_at=now())
def pk(x): return x.pk
def pk_cnt(x): return (x.pk, x.count)
self.assertEqual(Message.objects.count(), 10)
self.assertEqual(Message.objects.inbox_unread_count(self.user1), 1)
self.assertEqual(Message.objects.inbox_unread_count(self.user2), 2)
self.assertEqual(self.user1.sent_messages.count(), 6)
self.assertEqual(self.user1.received_messages.count(), 4)
self.assertEqual(self.user2.sent_messages.count(), 4)
self.assertEqual(self.user2.received_messages.count(), 6)
self.assertEqual(set(m3.child_messages.all()), set([m3,m4,m5]))
self.assertEqual(list(m3.next_messages.all()), [m4])
self.assertEqual(m3.get_replies_count(), 1)
self.assertEqual(list(m4.next_messages.all()), [m5])
self.assertEqual(m4.get_replies_count(), 1)
self.assertEqual(m5.get_replies_count(), 0)
# by messages
self.assertQuerysetEqual(Message.objects.sent(self.user1, option=OPTION_MESSAGES), [m7.pk,m6.pk,m5.pk,m3.pk,m2.pk,m1.pk], transform=pk)
self.assertQuerysetEqual(Message.objects.sent(self.user2, option=OPTION_MESSAGES), [m10.pk,m9.pk,m8.pk,m4.pk], transform=pk)
self.assertQuerysetEqual(Message.objects.inbox(self.user1, option=OPTION_MESSAGES), [m8.pk,m4.pk], transform=pk)
self.assertQuerysetEqual(Message.objects.inbox(self.user2, option=OPTION_MESSAGES), [m7.pk,m6.pk,m5.pk,m3.pk], transform=pk)
self.assertQuerysetEqual(Message.objects.archives(self.user1, option=OPTION_MESSAGES), [], transform=pk)
self.assertQuerysetEqual(Message.objects.archives(self.user2, option=OPTION_MESSAGES), [], transform=pk)
self.assertQuerysetEqual(Message.objects.trash(self.user1, option=OPTION_MESSAGES), [], transform=pk)
self.assertQuerysetEqual(Message.objects.trash(self.user2, option=OPTION_MESSAGES), [], transform=pk)
# by conversations
self.assertQuerysetEqual(Message.objects.sent(self.user1), [(m7.pk,0),(m6.pk,0),(m5.pk,2),(m2.pk,0),(m1.pk,0)], transform=pk_cnt)
self.assertQuerysetEqual(Message.objects.sent(self.user2), [(m10.pk,0),(m9.pk,0),(m8.pk,0),(m4.pk,1)], transform=pk_cnt)
self.assertQuerysetEqual(Message.objects.inbox(self.user1), [(m8.pk,0),(m4.pk,1)], transform=pk_cnt)
self.assertQuerysetEqual(Message.objects.inbox(self.user2), [(m7.pk,0),(m6.pk,0),(m5.pk,2)], transform=pk_cnt)
self.assertQuerysetEqual(Message.objects.thread(self.user1, Q(thread=m3.pk)), [m3.pk,m4.pk,m5.pk], transform=pk)
self.assertQuerysetEqual(Message.objects.thread(self.user1, Q(pk=m4.pk)), [m4.pk], transform=pk)
self.assertQuerysetEqual(Message.objects.thread(self.user2, Q(thread=m3.pk)), [m3.pk,m4.pk,m5.pk], transform=pk)
self.assertQuerysetEqual(Message.objects.thread(self.user2, Q(pk=m4.pk)), [m4.pk], transform=pk)
# mark as archived and deleted
# user1 user2
# ----------- ----------- read repl
# arch del arch del
# X ---...
# X ---X x
# X X ------>| x x
# |<------| X X x x
# |------>
# X ------> X
# ------> X x
# X <------
# ...--- X
# x X--- X
m1.sender_archived = True; m1.save()
m2.sender_deleted_at = now(); m2.save()
m3.sender_archived, m3.sender_deleted_at = True, now(); m3.save()
m4.sender_archived, m4.sender_deleted_at = True, now(); m4.save()
m6.sender_archived, m6.recipient_archived = True, True; m6.save()
m7.recipient_deleted_at = now(); m7.save()
m8.recipient_deleted_at = now(); m8.save()
m9.sender_deleted_at = now(); m9.save()
m10.sender_archived = True; m10.save()
self.assertEqual(Message.objects.inbox_unread_count(self.user1), 0)
self.assertEqual(Message.objects.inbox_unread_count(self.user2), 1)
# by messages
self.assertQuerysetEqual(Message.objects.archives(self.user1, option=OPTION_MESSAGES), [m6.pk,m1.pk], transform=pk)
self.assertQuerysetEqual(Message.objects.archives(self.user2, option=OPTION_MESSAGES), [m10.pk,m6.pk], transform=pk)
self.assertQuerysetEqual(Message.objects.trash(self.user1, option=OPTION_MESSAGES), [m8.pk,m3.pk,m2.pk], transform=pk)
self.assertQuerysetEqual(Message.objects.trash(self.user2, option=OPTION_MESSAGES), [m9.pk,m7.pk,m4.pk], transform=pk)
self.assertQuerysetEqual(Message.objects.sent(self.user1, option=OPTION_MESSAGES), [m7.pk,m5.pk], transform=pk)
self.assertQuerysetEqual(Message.objects.sent(self.user2, option=OPTION_MESSAGES), [m8.pk], transform=pk)
self.assertQuerysetEqual(Message.objects.inbox(self.user1, option=OPTION_MESSAGES), [m4.pk], transform=pk)
self.assertQuerysetEqual(Message.objects.inbox(self.user2, option=OPTION_MESSAGES), [m5.pk,m3.pk], transform=pk)
# by conversations
self.assertQuerysetEqual(Message.objects.sent(self.user1), [(m7.pk,0),(m5.pk,1)], transform=pk_cnt)
self.assertQuerysetEqual(Message.objects.sent(self.user2), [(m8.pk,0)], transform=pk_cnt)
self.assertQuerysetEqual(Message.objects.inbox(self.user1), [(m4.pk,1)], transform=pk_cnt)
self.assertQuerysetEqual(Message.objects.inbox(self.user2), [(m5.pk,2)], transform=pk_cnt)
self.assertQuerysetEqual(Message.objects.thread(self.user1, Q(thread=m3.pk)), [m3.pk,m4.pk,m5.pk], transform=pk)
self.assertQuerysetEqual(Message.objects.thread(self.user1, Q(pk=m4.pk)), [m4.pk], transform=pk)
self.assertQuerysetEqual(Message.objects.thread(self.user2, Q(thread=m3.pk)), [m3.pk,m4.pk,m5.pk], transform=pk)
self.assertQuerysetEqual(Message.objects.thread(self.user2, Q(pk=m4.pk)), [m4.pk], transform=pk)
# mark as read
self.assertEqual(Message.objects.set_read(self.user2, Q(thread=m3.pk)), 1)
m = Message.objects.get(pk=m5.pk)
self.check_status(m, status=STATUS_ACCEPTED, is_new=False, parent=m4, thread=m3)
self.check_now(m.read_at)
self.assertEqual(Message.objects.set_read(self.user2, Q(pk=m6.pk)), 1)
m = Message.objects.get(pk=m6.pk)
self.check_status(m, status=STATUS_ACCEPTED, is_new=False, sender_archived=True, recipient_archived=True)
self.check_now(m.read_at)
self.assertEqual(Message.objects.set_read(self.user1, Q(pk=m8.pk)), 1)
m = Message.objects.get(pk=m8.pk)
self.check_status(m, status=STATUS_ACCEPTED, is_new=False, recipient_deleted_at=True)
self.check_now(m.read_at)
class MessageTest(BaseTest):
"""
Test the Message model.
"""
def check_parties(self, m, s=None, r=None, email=''):
"Check party related properties."
obfuscated_email_re = re.compile('^[0-9a-f]{4}..[0-9a-f]{4}@domain$')
m.sender, m.recipient, m.email = s, r, email
if s or email:
m.clean()
else:
self.assertRaises(ValidationError, m.clean)
self.assertEqual(m.admin_sender(), s.get_username() if s else '<'+email+'>')
self.assertEqual(m.clear_sender, m.admin_sender())
if s:
self.assertEqual(m.obfuscated_sender, s.get_username())
elif email:
self.assertTrue(obfuscated_email_re.match(m.obfuscated_sender))
else:
self.assertEqual(m.obfuscated_sender, '')
self.assertEqual(m.admin_recipient(), r.get_username() if r else '<'+email+'>')
self.assertEqual(m.clear_recipient, m.admin_recipient())
if r:
self.assertEqual(m.obfuscated_recipient, r.get_username())
elif email:
self.assertTrue(obfuscated_email_re.match(m.obfuscated_recipient))
else:
self.assertEqual(m.obfuscated_recipient, '')
def test_parties(self):
"Test sender/recipient/email."
m = Message()
self.check_parties(m)
self.check_parties(m, s=self.user1)
self.check_parties(m, r=self.user2)
self.check_parties(m, s=self.user1, r=self.user2)
self.check_parties(m, s=self.user1, email=self.email )
self.check_parties(m, email=self.email, r=self.user2)
def test_representation(self):
"Test the message representation as text."
m = Message(sender=self.user1, recipient=self.user2)
m.subject = 'one two three four last'
self.assertEqual(str(m), 'foo>bar:one two three four last')
m.subject = 'one two three four last over'
self.assertEqual(str(m), 'foo>bar:one two three four last...')
def test_status(self):
"Test status."
m = Message.objects.create(subject='s')
self.check_status(m)
m = Message.objects.create(subject='s', moderation_status=STATUS_REJECTED)
self.check_status(m, status=STATUS_REJECTED)
m = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED)
self.check_status(m, status=STATUS_ACCEPTED)
m = Message.objects.create(subject='s', read_at=now())
self.check_status(m, is_new=False)
m = Message.objects.create(subject='s', replied_at=now())
self.check_status(m, is_replied=True)
def test_moderated_count(self):
"Test 'moderated_messages' count."
msg = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED,
moderation_date=now(), moderation_by=self.user1)
msg.save()
self.assertEqual(list(self.user1.moderated_messages.all()), [msg])
def test_moderation_from_pending(self):
"Test moderation management when leaving 'pending' status."
msg = Message.objects.create(subject='s')
# pending -> pending: nothing changes
m = copy.copy(msg)
m.clean_moderation(STATUS_PENDING, self.user1)
self.check_status(m)
# pending -> rejected
m = copy.copy(msg)
m.moderation_status = STATUS_REJECTED
m.clean_moderation(STATUS_PENDING, self.user1) # one try with moderator
self.check_status(m, status=STATUS_REJECTED,
moderation_date=True, moderation_by=self.user1, recipient_deleted_at=True)
self.check_now(m.moderation_date)
self.check_now(m.recipient_deleted_at)
# pending -> accepted
m = copy.copy(msg)
m.moderation_status = STATUS_ACCEPTED
m.clean_moderation(STATUS_PENDING) # one try without moderator
self.check_status(m, status=STATUS_ACCEPTED, moderation_date=True)
self.check_now(m.moderation_date)
def test_moderation_from_rejected(self):
"Test moderation management when leaving 'rejected' status."
date_in_past = now() - timedelta(days=2) # any value, just to avoid now()
reason = 'some good reason'
msg = Message.objects.create(subject='s', moderation_status=STATUS_REJECTED,
moderation_date=date_in_past, moderation_by=self.user1, moderation_reason=reason,
recipient_deleted_at=date_in_past)
# rejected -> rejected: nothing changes
m = copy.copy(msg)
m.clean_moderation(STATUS_REJECTED, self.user2)
self.check_status(m, status=STATUS_REJECTED,
moderation_date=date_in_past, moderation_by=self.user1, moderation_reason=reason,
recipient_deleted_at=date_in_past)
# rejected -> pending
m = copy.copy(msg)
m.moderation_status = STATUS_PENDING
m.clean_moderation(STATUS_REJECTED) # one try without moderator
self.check_status(m, status=STATUS_PENDING,
moderation_date=True, moderation_reason=reason, recipient_deleted_at=False)
self.check_now(m.moderation_date)
# rejected -> accepted
m = copy.copy(msg)
m.moderation_status = STATUS_ACCEPTED
m.clean_moderation(STATUS_REJECTED, self.user2) # one try with moderator
self.check_status(m, status=STATUS_ACCEPTED,
moderation_date=True, moderation_by=self.user2, moderation_reason=reason,
recipient_deleted_at=False)
self.check_now(m.moderation_date)
def test_moderation_from_accepted(self):
"Test moderation management when leaving 'accepted' status."
date_in_past = now() - timedelta(days=2) # any value, just to avoid now()
msg = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED,
moderation_date=date_in_past, moderation_by=self.user1, recipient_deleted_at=date_in_past)
# accepted -> accepted: nothing changes
m = copy.copy(msg)
m.clean_moderation(STATUS_ACCEPTED, self.user2)
self.check_status(m, status=STATUS_ACCEPTED,
moderation_date=date_in_past, moderation_by=self.user1, recipient_deleted_at=date_in_past)
# accepted -> pending
m = copy.copy(msg)
m.moderation_status = STATUS_PENDING
m.clean_moderation(STATUS_ACCEPTED, self.user2) # one try with moderator
self.check_status(m, status=STATUS_PENDING,
moderation_date=True, moderation_by=self.user2, recipient_deleted_at=date_in_past)
self.check_now(m.moderation_date)
# accepted -> rejected
m = copy.copy(msg)
m.moderation_status = STATUS_REJECTED
m.clean_moderation(STATUS_ACCEPTED) # one try without moderator
self.check_status(m, status=STATUS_REJECTED, moderation_date=True, recipient_deleted_at=True)
self.check_now(m.moderation_date)
self.check_now(m.recipient_deleted_at)
def test_visitor(self):
"Test clean_for_visitor()."
date_in_past = now() - timedelta(days=2) # any value, just to avoid now()
# as the sender
m = Message.objects.create(subject='s', recipient=self.user1)
m.clean_for_visitor()
self.check_status(m, sender_deleted_at=True)
self.check_now(m.sender_deleted_at)
# as the recipient
msg = Message.objects.create(subject='s', sender=self.user1)
# pending
m = copy.copy(msg)
m.read_at=date_in_past
m.recipient_deleted_at=date_in_past
m.clean_for_visitor()
self.check_status(m, recipient_deleted_at=False)
# rejected
m = copy.copy(msg)
m.moderation_status = STATUS_REJECTED
m.read_at=date_in_past
m.recipient_deleted_at=date_in_past
m.clean_for_visitor()
self.check_status(m, status=STATUS_REJECTED, recipient_deleted_at=date_in_past)
# accepted
m = copy.copy(msg)
m.moderation_status = STATUS_ACCEPTED
m.clean_for_visitor()
self.check_status(m, status=STATUS_ACCEPTED, is_new=False, recipient_deleted_at=True)
self.check_now(m.read_at)
self.check_now(m.recipient_deleted_at)
def test_update_parent(self):
"Test update_parent()."
parent = Message.objects.create(subject='s', sender=self.user1, recipient=self.user2,
moderation_status=STATUS_ACCEPTED)
parent.thread = parent
parent.save()
# any previous rejected reply should not interfere
rejected_reply = Message.objects.create(subject='s', sender=self.user2, recipient=self.user1,
parent=parent, thread=parent.thread, moderation_status=STATUS_REJECTED)
# any previous pending reply should not interfere
pending_reply = Message.objects.create(subject='s', sender=self.user2, recipient=self.user1,
parent=parent, thread=parent.thread, moderation_status=STATUS_PENDING)
reply = Message.objects.create(subject='s', sender=self.user2, recipient=self.user1,
parent=parent, thread=parent.thread)
# the reply is accepted
r = copy.deepcopy(reply)
r.moderation_status = STATUS_ACCEPTED
# accepted -> accepted: no change
r.update_parent(STATUS_ACCEPTED)
self.check_status(r.parent, status=STATUS_ACCEPTED, thread=parent)
# pending -> accepted: parent is replied
r.update_parent(STATUS_PENDING)
p = Message.objects.get(pk=parent.pk) # better to ask the DB to check the save()
self.check_status(p, status=STATUS_ACCEPTED, thread=parent, is_replied=True)
self.assertEqual(p.replied_at.timetuple(), r.sent_at.timetuple()) # mysql doesn't store microseconds
# rejected -> accepted: same as pending -> accepted
# so check here the acceptance of an anterior date
# note: use again the some object for convenience but another reply is more realistic
r.sent_at = r.sent_at - timedelta(days=1)
r.update_parent(STATUS_REJECTED)
p = Message.objects.get(pk=parent.pk)
self.check_status(p, status=STATUS_ACCEPTED, thread=parent, is_replied=True)
self.assertEqual(p.replied_at.timetuple(), r.sent_at.timetuple())
# a reply is withdrawn and no other reply
r = copy.deepcopy(reply)
r.parent.replied_at = r.sent_at
r.moderation_status = STATUS_REJECTED # could be STATUS_PENDING
# rejected -> rejected: no change. In real case, parent.replied_at would be already empty
r.update_parent(STATUS_REJECTED)
self.check_status(r.parent, status=STATUS_ACCEPTED, thread=parent, is_replied=True)
# pending -> rejected: no change. In real case, parent.replied_at would be already empty
r.update_parent(STATUS_PENDING)
self.check_status(r.parent, status=STATUS_ACCEPTED, thread=parent, is_replied=True)
# accepted -> rejected: parent is no more replied
r.update_parent(STATUS_ACCEPTED)
p = Message.objects.get(pk=parent.pk)
self.check_status(p, status=STATUS_ACCEPTED, thread=parent)
# note: accepted -> rejected, with the existence of another suitable reply
# is covered in the accepted -> pending case
# a reply is withdrawn but there is another suitable reply
other_reply = Message.objects.create(subject='s', sender=self.user2, recipient=self.user1,
parent=parent, thread=parent.thread, moderation_status=STATUS_ACCEPTED)
r = copy.deepcopy(reply)
r.parent.replied_at = r.sent_at
r.moderation_status = STATUS_PENDING # could be STATUS_REJECTED
# pending -> pending: no change. In real case, parent.replied_at would be from another reply object
r.update_parent(STATUS_PENDING)
self.check_status(r.parent, status=STATUS_ACCEPTED, thread=parent, is_replied=True)
# rejected -> pending: no change. In real case, parent.replied_at would be from another reply object
r.update_parent(STATUS_REJECTED)
self.check_status(r.parent, status=STATUS_ACCEPTED, thread=parent, is_replied=True)
# accepted -> pending: parent is still replied but by another object
r.update_parent(STATUS_ACCEPTED)
p = Message.objects.get(pk=parent.pk)
self.check_status(p, status=STATUS_ACCEPTED, thread=parent, is_replied=True)
self.assertEqual(p.replied_at.timetuple(), other_reply.sent_at.timetuple())
# note: accepted -> pending, with no other suitable reply
# is covered in the accepted -> rejected case
def check_notification(self, m, mail_number, email=None, is_auto_moderated=True, notice_label=None):
"Check number of mails, recipient, and notice creation."
m.notify_users(STATUS_PENDING, Site.objects.get_current() if Site._meta.installed else None, is_auto_moderated)
self.assertEqual(len(mail.outbox), mail_number)
if mail_number:
self.assertEqual(mail.outbox[0].to, [email])
from postman.utils import notification
if notification and notice_label:
if hasattr(notification, "Notice"): # exists for django-notification 0.2.0, but no more in 1.0
notice = notification.Notice.objects.get()
self.assertEqual(notice.notice_type.label, notice_label)
def test_notification_rejection_visitor(self):
"Test notify_users() for rejection, sender is a visitor."
m = Message.objects.create(subject='s', moderation_status=STATUS_REJECTED, email=self.email, recipient=self.user2)
self.check_notification(m, 1, self.email)
def test_notification_rejection_user(self):
"Test notify_users() for rejection, sender is a User."
m = Message.objects.create(subject='s', moderation_status=STATUS_REJECTED, sender=self.user1, recipient=self.user2)
self.check_notification(m, 1, self.user1.email, is_auto_moderated=False, notice_label='postman_rejection')
def test_notification_rejection_user_auto_moderated(self):
"Test notify_users() for rejection, sender is a User, and is alerted online."
m = Message.objects.create(subject='s', moderation_status=STATUS_REJECTED, sender=self.user1, recipient=self.user2)
self.check_notification(m, 0, is_auto_moderated=True)
def test_notification_rejection_user_inactive(self):
"Test notify_users() for rejection, sender is a User, but must be active."
m = Message.objects.create(subject='s', moderation_status=STATUS_REJECTED, sender=self.user1, recipient=self.user2)
self.user1.is_active = False
self.check_notification(m, 0, is_auto_moderated=False, notice_label='postman_rejection')
def test_notification_rejection_user_disable(self):
"Test notify_users() for rejection, sender is a User, but emailing is disabled."
m = Message.objects.create(subject='s', moderation_status=STATUS_REJECTED, sender=self.user1, recipient=self.user2)
settings.POSTMAN_DISABLE_USER_EMAILING = True
settings.POSTMAN_NOTIFIER_APP = None
self.reload_modules()
self.check_notification(m, 0, is_auto_moderated=False)
def test_notification_acceptance_visitor(self):
"Test notify_users() for acceptance, recipient is a visitor."
m = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED, sender=self.user1, email=self.email)
self.check_notification(m, 1, self.email)
def test_notification_acceptance_user(self):
"Test notify_users() for acceptance, recipient is a User."
m = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED, sender=self.user1, recipient=self.user2)
self.check_notification(m, 1, self.user2.email, notice_label='postman_message')
def test_notification_acceptance_user_inactive(self):
"Test notify_users() for acceptance, recipient is a User, but must be active."
m = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED, sender=self.user1, recipient=self.user2)
self.user2.is_active = False
self.check_notification(m, 0, notice_label='postman_message')
def test_notification_acceptance_user_disable(self):
"Test notify_users() for acceptance, recipient is a User, but emailing is disabled."
m = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED, sender=self.user1, recipient=self.user2)
settings.POSTMAN_DISABLE_USER_EMAILING = True
settings.POSTMAN_NOTIFIER_APP = None
self.reload_modules()
self.check_notification(m, 0, notice_label='postman_message')
def test_notification_acceptance_reply(self):
"Test notify_users() for acceptance, for a reply, recipient is a User."
p = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED, sender=self.user2, recipient=self.user1)
m = Message.objects.create(subject='s', moderation_status=STATUS_ACCEPTED, sender=self.user1, recipient=self.user2,
parent=p, thread=p)
self.check_notification(m, 1, self.user2.email, notice_label='postman_reply')
def test_dates(self):
"Test set_dates(), get_dates()."
m = Message()
set = now(), now(), now()
m.set_dates(*set)
get = m.get_dates()
self.assertEqual(get, set)
def test_moderation(self):
"Test set_moderation(), get_moderation()."
m = Message()
set = STATUS_ACCEPTED, self.user1.pk, now(), 'some reason'
m.set_moderation(*set)
get = m.get_moderation()
self.assertEqual(get, set)
def check_auto_moderation(self, msg, seq, default):
"Check auto-moderation results."
for mod, result in seq:
m = copy.copy(msg)
m.auto_moderate(mod)
changes = {}
if result is True:
changes['status'] = STATUS_ACCEPTED
elif result is None:
changes['status'] = default
else:
changes['status'] = STATUS_REJECTED
changes['moderation_reason'] = result
m.sent_at = now() # refresh, as we recycle the same base message
self.check_status(m, **changes)
def test_auto_moderation(self):
"Test auto-moderation function combination."
msg = Message.objects.create(subject='s')
def moderate_as_none(m): return None
def moderate_as_true(m): return True
def moderate_as_false(m): return False
def moderate_as_0(m): return 0
def moderate_as_100(m): return 100
def moderate_as_50(m): return 50
def moderate_as_49_default_reason(m): return 49
moderate_as_49_default_reason.default_reason = 'moderate_as_49 default_reason'
def moderate_as_49_with_reason(m): return (49, 'moderate_as_49 with_reason')
moderate_as_49_with_reason.default_reason = 'is not used'
def moderate_as_1(m): return (1, 'moderate_as_1')
def moderate_as_1_no_reason(m): return (1, ' ')
def moderate_as_2(m): return (2, 'moderate_as_2')
def moderate_as_98(m): return 98
moderate_as_98.default_reason = 'useless; never used'
def moderate_badly_as_negative(m): return -1
def moderate_badly_as_too_high(m): return 101
def moderate_as_0_with_reason(m): return (0, 'moderate_as_0 with_reason')
def invalid_moderator_1(m): return (0, )
def invalid_moderator_2(m): return (0, 'reason', 'extra')
for mod in [invalid_moderator_1, invalid_moderator_2]:
m = copy.copy(msg)
self.assertRaises(ValueError, m.auto_moderate, mod)
seq = (
# no moderator, no valid rating, or moderator is unable to state, default applies
([], None),
(moderate_badly_as_negative, None),
(moderate_badly_as_too_high, None),
(moderate_as_none, None),
# firm decision
(moderate_as_false, ''), (moderate_as_0, ''),
(moderate_as_true, True), (moderate_as_100, True),
# round to up
(moderate_as_50, True),
# reasons
(moderate_as_49_default_reason, moderate_as_49_default_reason.default_reason),
(moderate_as_49_with_reason, 'moderate_as_49 with_reason'),
# priority is left to right
([moderate_as_none, moderate_as_false, moderate_as_true], ''),
([moderate_as_none, moderate_as_true, moderate_as_false], True),
# keep only reasons for ratings below 50, non empty or whitespace
([moderate_as_1, moderate_as_98], 'moderate_as_1'),
([moderate_as_1, moderate_as_2, moderate_as_50], 'moderate_as_1, moderate_as_2'),
([moderate_as_1, moderate_as_1_no_reason, moderate_as_2], 'moderate_as_1, moderate_as_2'),
# a firm reject imposes its reason
([moderate_as_1, moderate_as_2, moderate_as_50, moderate_as_0_with_reason], 'moderate_as_0 with_reason'),
# neutral or invalid moderators do not count in the average
([moderate_as_50, moderate_as_none, moderate_badly_as_negative, moderate_badly_as_too_high], True),
)
# no default auto moderation
# settings.POSTMAN_AUTO_MODERATE_AS = None
self.check_auto_moderation(msg, seq, STATUS_PENDING)
# default is: accepted
settings.POSTMAN_AUTO_MODERATE_AS = True
self.check_auto_moderation(msg, seq, STATUS_ACCEPTED)
# default is: rejected
settings.POSTMAN_AUTO_MODERATE_AS = False
self.check_auto_moderation(msg, seq, STATUS_REJECTED)
class PendingMessageManagerTest(BaseTest):
"""
Test the PendingMessage manager.
"""
def test(self):
msg1 = self.create()
msg2 = self.create(moderation_status=STATUS_REJECTED)
msg3 = self.create(moderation_status=STATUS_ACCEPTED)
msg4 = self.create()
self.assertQuerysetEqual(PendingMessage.objects.all(), [msg4.pk, msg1.pk], transform=lambda x: x.pk)
class PendingMessageTest(BaseTest):
"""
Test the PendingMessage model.
"""
def test(self):
m = PendingMessage()
self.assertTrue(m.is_pending())
m.set_accepted()
self.assertTrue(m.is_accepted())
m.set_rejected()
self.assertTrue(m.is_rejected())
class FiltersTest(BaseTest):
"""
Test the filters.
"""
def check_sub(self, x, y, value):
t = Template("{% load postman_tags %}{% with "+x+"|sub:"+y+" as var %}{{ var }}{% endwith %}")
self.assertEqual(t.render(Context({})), value)
def test_sub(self):
"Test '|sub'."
self.check_sub('6', '2', '4')
self.check_sub('6', "'X'", '6')
self.check_sub("'X'", '2', 'X')
def check_or_me(self, x, value, user=None, m=None):
t = Template("{% load postman_tags %}{{ "+x+"|or_me:user }}") # do not load i18n to be able to check the untranslated pattern
self.assertEqual(t.render(Context({'user': user or AnonymousUser(), 'message': m})), value)
def test_or_me(self):
"Test '|or_me'."
self.check_or_me("'foo'", 'foo')
self.check_or_me("'foo'", '<me>', self.user1)
self.check_or_me("'bar'", 'bar', self.user1)
self.check_or_me("user", '<me>', self.user1)
m = self.c12()
self.check_or_me("message.obfuscated_sender", '<me>', self.user1, m=m)
self.check_or_me("message.obfuscated_recipient", 'bar', self.user1, m=m)
settings.POSTMAN_SHOW_USER_AS = 'email'
self.check_or_me("message.obfuscated_sender", '<me>', self.user1, m=m)
self.check_or_me("message.obfuscated_recipient", 'bar@domain.com', self.user1, m=m)
def check_compact_date(self, date, value, format='H:i,d b,d/m/y'):
# use 'H', 'd', 'm' instead of 'G', 'j', 'n' because no strftime equivalents
t = Template('{% load postman_tags %}{{ date|compact_date:"'+format+'" }}')
self.assertEqual(t.render(Context({'date': date})), value)
def test_compact_date(self):
"Test '|compact_date'."
dt = now()
dt = localtime(dt)
# (1.6) template/base.py/render_value_in_context()
default = force_text(localize(dt))
self.check_compact_date(dt, default, format='')
self.check_compact_date(dt, default, format='one')
self.check_compact_date(dt, default, format='one,two')
self.check_compact_date(dt, dt.strftime('%H:%M'))
dt2 = dt - timedelta(days=1) # little fail: do not work on Jan, 1st, because the year changes as well
self.check_compact_date(dt2, dt2.strftime('%d %b').lower()) # filter's 'b' is lowercase
dt2 = dt - timedelta(days=365)
self.check_compact_date(dt2, dt2.strftime('%d/%m/%y'))
class ContextProcessorsTest(BaseTest):
"""
Test the context processors.
"""
def _check_inbox(self, urlname, num_extra_queries, value=''):
url = reverse('postman:' + urlname)
# SELECT "django_session"."... -------------\
# SAVEPOINT "s????_x?" -\ always \ +2 if
# RELEASE SAVEPOINT "s????_x?" -/ these two / authenticated
# SELECT "auth_user"."... -------------/
# SELECT COUNT(*) ... -- when authenticated, +1 if the variable is evaluated
with self.assertNumQueries(2 + num_extra_queries):
response = self.client.get(url)
self.assertEqual(response.content, value.encode()) # content is bytestring
def check_inbox_without_eval(self, num_extra_queries=0):
return self._check_inbox('no_context_processors', num_extra_queries)
def check_inbox_with_eval(self, num_extra_queries=0, value=''):
return self._check_inbox('context_processors', num_extra_queries, value)
def test_inbox(self):
"Test {{ postman_unread_count }}."
self.check_inbox_without_eval()
self.check_inbox_with_eval()
self.assertTrue(self.client.login(username='foo', password='pass'))
self.check_inbox_without_eval(2)
self.check_inbox_with_eval(3, '0')
Message.objects.create(subject='s', recipient=self.user1) # its status is not enough
self.check_inbox_without_eval(2)
self.check_inbox_with_eval(3, '0')
Message.objects.create(subject='s', recipient=self.user1, moderation_status=STATUS_ACCEPTED)
self.check_inbox_without_eval(2)
self.check_inbox_with_eval(3, '1')
class TagsTest(BaseTest):
"""
Test the template tags.
"""
def check_postman_unread(self, value, user=None, asvar=''):
t = Template("{% load postman_tags %}{% postman_unread " + asvar +" %}")
ctx = Context({'user': user} if user else {})
self.assertEqual(t.render(ctx), value)
return ctx
def test_postman_unread(self):
"Test 'postman_unread'."
self.check_postman_unread('')
self.check_postman_unread('', AnonymousUser())
self.check_postman_unread('0', self.user1)
Message.objects.create(subject='s', recipient=self.user1)
self.check_postman_unread('0', self.user1)
Message.objects.create(subject='s', recipient=self.user1, moderation_status=STATUS_ACCEPTED)
self.check_postman_unread('1', self.user1)
ctx = self.check_postman_unread('', self.user1, 'as var')
self.assertEqual(ctx['var'], 1)
self.assertRaises(TemplateSyntaxError, self.check_postman_unread, '', self.user1, 'as var extra')
self.assertRaises(TemplateSyntaxError, self.check_postman_unread, '', self.user1, 'As var')
def check_order_by(self, keyword, value_list, context=None):
t = Template("{% load postman_tags %}{% postman_order_by " + keyword +" %}")
r = t.render(Context({'gets': QueryDict(context)} if context else {}))
self.assertEqual(r[0], '?')
self.assertEqual(set(r[1:].split('&')), set([k+'='+v for k, v in value_list]))
def test_order_by(self):
"Test 'postman_order_by'."
for k, v in ORDER_BY_MAPPER.items():
self.check_order_by(k, [(ORDER_BY_KEY, v)])
self.check_order_by('subject', [(ORDER_BY_KEY, 's')], ORDER_BY_KEY+'=foo')
self.check_order_by('subject', [(ORDER_BY_KEY, 'S')], ORDER_BY_KEY+'=s')
self.check_order_by('subject', [(ORDER_BY_KEY, 's'), ('page', '12')], 'page=12')
self.check_order_by('subject', [('foo', 'bar'), (ORDER_BY_KEY, 's'), ('baz', 'qux')], 'foo=bar&'+ORDER_BY_KEY+'=S&baz=qux')
self.assertRaises(TemplateSyntaxError, self.check_order_by, '', None)
self.assertRaises(TemplateSyntaxError, self.check_order_by, 'subject extra', None)
self.assertRaises(TemplateSyntaxError, self.check_order_by, 'unknown', None)
class UtilsTest(BaseTest):
"""
Test helper functions.
"""
def test_format_body(self):
"Test format_body()."
header = "\n\nfoo wrote:\n"
footer = "\n"
self.assertEqual(format_body(self.user1, "foo bar"), header+"> foo bar"+footer)
self.assertEqual(format_body(self.user1, "foo bar", indent='|_'), header+"|_foo bar"+footer)
self.assertEqual(format_body(self.user1, width=10, body="34 67 90"), header+"> 34 67 90"+footer)
self.assertEqual(format_body(self.user1, width=10, body="34 67 901"), header+"> 34 67\n> 901"+footer)
self.assertEqual(format_body(self.user1, width=10, body="> 34 67 901"), header+"> > 34 67 901"+footer)
self.assertEqual(format_body(self.user1, width=10,
body= "34 67\n" "\n" " \n" " .\n" "End"),
header+"> 34 67\n" "> \n" "> \n" "> .\n" "> End"+footer)
def test_format_subject(self):
"Test format_subject()."
self.assertEqual(format_subject("foo bar"), "Re: foo bar")
self.assertEqual(format_subject("Re: foo bar"), "Re: foo bar")
self.assertEqual(format_subject("rE: foo bar"), "rE: foo bar")
def check_email(self, message_template_name, recipient_list, object, action, site, parts=None):
mail.outbox = []
subject_template = 'postman/email_visitor_subject.txt'
message_template_name = 'postman_for_tests/' + message_template_name
if not parts:
self.assertRaises(TemplateDoesNotExist, email, subject_template, message_template_name, recipient_list, object, action, site)
else:
email(subject_template, message_template_name, recipient_list, object, action, site)
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0].message()
payload = msg.get_payload()
if isinstance(parts, tuple):
self.assertEqual(msg.is_multipart(), True)
for i, mimetype in ((0, 'text/plain'), (1, 'text/html')):
part = payload[i]
self.assertTupleEqual((part.get_content_type(), part.get_payload()), (mimetype, parts[i]))
else:
self.assertEqual(msg.is_multipart(), False)
self.assertEqual(payload, parts)
def test_email(self):
"Test email()."
m = self.c12()
recipient_list = ['recipient@domain.tld']
action = 'some_action'
site = Site.objects.get_current() if Site._meta.installed else None
self.check_email('nonexistent_template', recipient_list, m, action, site)
self.check_email('email_txt_only', recipient_list, m, action, site, 'Text only\n')
self.check_email('email_html_only', recipient_list, m, action, site, ('Html only\n', '<div>Html only</div>\n'))
self.check_email('email_html_and_empty_txt', recipient_list, m, action, site, ('Html and empty Text\n', '<div>Html and empty Text</div>\n'))
self.check_email('email_html_and_txt', recipient_list, m, action, site, ('Alternate Text\n', '<div>Html and Text</div>\n'))
def check_from_email(self, subject_template, message_template_name, recipient_list, object, action, site, from_email):
mail.outbox = []
email(subject_template, message_template_name, recipient_list, object, action, site)
self.assertEqual(mail.outbox[0].from_email, from_email)
def test_from_email(self):
"Test the POSTMAN_FROM_EMAIL setting."
m = self.c12()
recipient_list = ['recipient@domain.tld']
action = 'some_action'
site = None
subject_template = 'postman/email_visitor_subject.txt'
message_template_name = 'postman_for_tests/email_txt_only'
self.check_from_email(subject_template, message_template_name, recipient_list, m, action, site, settings.DEFAULT_FROM_EMAIL)
custom_from_email = 'postman@host.tld'
settings.POSTMAN_FROM_EMAIL = custom_from_email
self.reload_modules()
self.check_from_email(subject_template, message_template_name, recipient_list, m, action, site, custom_from_email)
def test_params_email(self):
"Test the POSTMAN_PARAMS_EMAIL setting."
m = self.c12()
action = 'acceptance'
site = None
settings.POSTMAN_PARAMS_EMAIL = lambda context: {
'reply_to': ['someone@domain.tld'],
'headers': {'X-my-choice': 'my-value'}
}
self.reload_modules()
# sender+email+recipient is not regular, but if Django 1.10 doesn't mind if the email is missing,
# Django 1.11 doesn't send without at least one recipient (empty strings are filtered).
m.email = self.email
email_visitor(m, action, site)
msg = mail.outbox[0]
self.assertEqual(msg.reply_to, ['someone@domain.tld'])
self.assertEqual(msg.extra_headers['X-my-choice'], 'my-value')
notify_user(m, action, site)
msg = mail.outbox[1]
self.assertEqual(msg.reply_to, ['someone@domain.tld'])
self.assertEqual(msg.extra_headers['X-my-choice'], 'my-value')
def check_notification_approval(self, m, setting, mail_number, email=None):
settings.POSTMAN_NOTIFICATION_APPROVAL = setting
self.reload_modules()
notify_user(m, 'acceptance', None)
self.assertEqual(len(mail.outbox), mail_number)
if mail_number:
self.assertEqual(mail.outbox[0].to, [email])
mail.outbox = []
def test_notification_approval(self):
"Test the POSTMAN_NOTIFICATION_APPROVAL setting."
m = self.c12()
# a constant
self.check_notification_approval(m, False, 0)
# a function
self.check_notification_approval(m, lambda user, action, site: None, 0)
self.check_notification_approval(m, lambda user, action, site: False, 0)
self.check_notification_approval(m, lambda user, action, site: True, 1, self.user2.email)
self.check_notification_approval(m,
lambda user, action, site: '{}_{}@domain.tld'.format(user.username, action), 1, 'bar_acceptance@domain.tld')
# for the following syntaxes, the other returned value variants are already checked with the preceding lambda functions
# a method name
self.user2.notification_approval = lambda action, site: 'bar_' + action # patch to emulate a method of a custom user model
self.check_notification_approval(m, 'notification_approval', 1, 'bar_acceptance')
# a path to a function
self.check_notification_approval(m, 'postman.module_for_tests.notification_approval', 1, 'bar_acceptance@domain.tld')
def test_get_order_by(self):
"Test get_order_by()."
self.assertEqual(get_order_by({}), None)
self.assertEqual(get_order_by({ORDER_BY_KEY: 'f'}), 'sender__{0}'.format(get_user_model().USERNAME_FIELD))
self.assertEqual(get_order_by({ORDER_BY_KEY: 'D'}), '-sent_at')
def test_get_user_representation(self):
"Test get_user_representation()."
# no setting
self.assertEqual(get_user_representation(self.user1), "foo")
# a wrong setting
settings.POSTMAN_SHOW_USER_AS = 'unknown_attribute'
self.assertEqual(get_user_representation(self.user1), "foo")
# a valid setting but an empty attribute
settings.POSTMAN_SHOW_USER_AS = 'first_name'
self.assertEqual(get_user_representation(self.user1), "foo")
# a property name
settings.POSTMAN_SHOW_USER_AS = 'email'
self.assertEqual(get_user_representation(self.user1), "foo@domain.com")
if not six.PY3: # avoid six.PY2, not available in six 1.2.0
settings.POSTMAN_SHOW_USER_AS = b'email' # usage on PY3 is nonsense
self.assertEqual(get_user_representation(self.user1), "foo@domain.com")
# a method name
# can't use get_full_name(), an empty string in our case
# get_absolute_url() doesn't exist anymore since Django 1.7
settings.POSTMAN_SHOW_USER_AS = 'natural_key' # avoid get_username(), already used for the default representation
self.assertEqual(get_user_representation(self.user1), "(u'foo',)" if not six.PY3 else "('foo',)")
# a function
settings.POSTMAN_SHOW_USER_AS = lambda u: u.natural_key()
self.assertEqual(get_user_representation(self.user1), "(u'foo',)" if not six.PY3 else "('foo',)")
# a path to a function or a class
settings.POSTMAN_SHOW_USER_AS = 'postman.module_for_tests.user_representation'
self.assertEqual(get_user_representation(self.user1), "nick_foo")
settings.POSTMAN_SHOW_USER_AS = 'postman.module_for_tests.UserRepresentation'
self.assertEqual(get_user_representation(self.user1), "nick_foo")
def test_get_user_name(self):
"Test get_user_name()."
# no setting
self.assertEqual(get_user_name(self.user1), "foo")
# a wrong setting
settings.POSTMAN_NAME_USER_AS = 'unknown_attribute'
self.assertRaises(AttributeError, get_user_name, self.user1)
# a property name
settings.POSTMAN_NAME_USER_AS = 'email'
self.assertEqual(get_user_name(self.user1), "foo@domain.com")
settings.POSTMAN_NAME_USER_AS = 'id' # a string is always returned even for not character types
self.assertEqual(get_user_name(self.user1), "1")
class ApiTest(BaseTest):
"""
Test the API functions.
"""
def check_message(self, m, subject='s', body='b', recipient_username='bar'):
"Check some message properties."
self.assertEqual(m.subject, subject)
self.assertEqual(m.body, body)
self.assertEqual(m.email, '')
self.assertEqual(m.sender, self.user1)
self.assertEqual(m.recipient.get_username(), recipient_username)
def test_pm_broadcast(self):
"Test the case of a single recipient."
pm_broadcast(sender=self.user1, recipients=self.user2, subject='s', body='b')
m = Message.objects.get()
self.check_status(m, status=STATUS_ACCEPTED, moderation_date=True,
sender_archived=True, sender_deleted_at=True)
self.check_now(m.sender_deleted_at)
self.check_now(m.moderation_date)
self.check_message(m)
self.assertEqual(len(mail.outbox), 1)
def test_pm_broadcast_skip_notification(self):
"Test the notification skipping."
pm_broadcast(sender=self.user1, recipients=self.user2, subject='s', skip_notification=True)
self.assertEqual(len(mail.outbox), 0)
def test_pm_broadcast_multi(self):
"Test the case of more than a single recipient."
pm_broadcast(sender=self.user1, recipients=[self.user2, self.user3], subject='s', body='b')
msgs = list(Message.objects.all())
self.check_message(msgs[0], recipient_username='baz')
self.check_message(msgs[1])
def test_pm_write(self):
"Test the basic minimal use."
msg = pm_write(sender=self.user1, recipient=self.user2, subject='s', body='b')
self.assertTrue(isinstance(msg, Message))
m = Message.objects.get()
self.check_status(m, status=STATUS_ACCEPTED, moderation_date=True)
self.check_now(m.moderation_date)
self.check_message(m)
self.assertEqual(len(mail.outbox), 1) # notify the recipient
def test_pm_write_skip_notification(self):
"Test the notification skipping."
msg = pm_write(sender=self.user1, recipient=self.user2, subject='s', skip_notification=True)
self.assertTrue(isinstance(msg, Message))
self.assertEqual(len(mail.outbox), 0)
def test_pm_write_auto_archive(self):
"Test the auto_archive parameter."
msg = pm_write(sender=self.user1, recipient=self.user2, subject='s', auto_archive=True)
self.assertTrue(isinstance(msg, Message))
m = Message.objects.get()
self.check_status(m, status=STATUS_ACCEPTED, moderation_date=True, sender_archived=True)
def test_pm_write_auto_delete(self):
"Test the auto_delete parameter."
msg = pm_write(sender=self.user1, recipient=self.user2, subject='s', auto_delete=True)
self.assertTrue(isinstance(msg, Message))
m = Message.objects.get()
self.check_status(m, status=STATUS_ACCEPTED, moderation_date=True, sender_deleted_at=True)
self.check_now(m.sender_deleted_at)
def test_pm_write_auto_moderators_accepted(self):
"Test the auto_moderators parameter, moderate as accepted."
msg = pm_write(sender=self.user1, recipient=self.user2, subject='s', auto_moderators=lambda m: True)
self.assertTrue(isinstance(msg, Message))
m = Message.objects.get()
self.check_status(m, status=STATUS_ACCEPTED, moderation_date=True)
def test_pm_write_auto_moderators_pending(self):
"Test the auto_moderators parameter, no moderation decision is taken. Test the parameter as a list."
msg = pm_write(sender=self.user1, recipient=self.user2, subject='s', auto_moderators=[lambda m: None])
self.assertTrue(isinstance(msg, Message))
m = Message.objects.get()
self.check_status(m)
self.assertEqual(len(mail.outbox), 0) # no one to notify
def test_pm_write_auto_moderators_rejected(self):
"Test the auto_moderators parameter, moderate as rejected. Test the parameter as a tuple."
msg = pm_write(sender=self.user1, recipient=self.user2, subject='s', auto_moderators=(lambda m: False, ))
self.assertTrue(isinstance(msg, Message))
m = Message.objects.get()
self.check_status(m, status=STATUS_REJECTED, moderation_date=True, recipient_deleted_at=True)
self.check_now(m.moderation_date)
self.check_now(m.recipient_deleted_at)
self.assertEqual(len(mail.outbox), 0) # sender is not notified in the case of auto moderation
class CommandTest(BaseTest):
"""
Test the custom commands.
"""
def test_cleanup_empty(self):
"Basic, without messages."
out = StringIO()
call_command('postman_cleanup', verbosity=1, stdout=out) # 1 is the only implemented verbosity level
self.assertTrue(out.getvalue().startswith('Erase messages '))
def test_cleanup_messages(self):
"Only individual messages."
good_date = now() - timedelta(days=30) # the default delay
m1 = self.create()
m2 = self.create(sender_deleted_at=good_date, recipient_deleted_at=good_date) # the only good candidate
m3 = self.create(sender_deleted_at=good_date, recipient_deleted_at=now())
m4 = self.create(sender_deleted_at=now(), recipient_deleted_at=good_date)
m5 = self.create(sender_deleted_at=good_date)
m6 = self.create(recipient_deleted_at=good_date)
out = StringIO()
call_command('postman_cleanup', verbosity=0, stdout=out)
self.assertEqual(out.getvalue(), '')
self.assertListEqual(list(Message.objects.all()), [m6, m5, m4, m3, m1])
def test_cleanup_conversations(self):
# user1 user2
# ----------- -----------
# del del
# ok ------>| ok
# ok <------| ok good candidate
# ok ------>| ok
# ok <------| bad date is not old enough (could be any of the four dates)
# ok ------>| ok
# missing <------| ok one message not deleted by one of the participants (could be any of the four dates)
good_date = now() - timedelta(days=30) # the default delay
c1_m1 = self.c12(sender_deleted_at=good_date, recipient_deleted_at=good_date)
c1_m1.thread = c1_m1; c1_m1.save()
c1_m2 = self.c21(parent=c1_m1, thread=c1_m1.thread, sender_deleted_at=good_date, recipient_deleted_at=good_date)
c2_m1 = self.c12(sender_deleted_at=good_date, recipient_deleted_at=good_date)
c2_m1.thread = c2_m1; c2_m1.save()
c2_m2 = self.c21(parent=c2_m1, thread=c2_m1.thread, sender_deleted_at=now(), recipient_deleted_at=good_date)
c3_m1 = self.c12(sender_deleted_at=good_date, recipient_deleted_at=good_date)
c3_m1.thread = c3_m1; c3_m1.save()
c3_m2 = self.c21(parent=c3_m1, thread=c3_m1.thread, sender_deleted_at=good_date) # missing recipient_deleted_at
out = StringIO()
call_command('postman_cleanup', verbosity=0, stdout=out)
self.assertEqual(out.getvalue(), '')
self.assertListEqual(list(Message.objects.all()), [c3_m2, c3_m1, c2_m2, c2_m1])
def test_cleanup_days(self):
"Test the 'days' option."
delay = 5 # any but the default delay
good_date = now() - timedelta(days=delay)
m1 = self.create(sender_deleted_at=good_date, recipient_deleted_at=good_date) # the only good candidate
m2 = self.create(sender_deleted_at=good_date, recipient_deleted_at=good_date + timedelta(days=1))
out = StringIO()
call_command('postman_cleanup', verbosity=0, stdout=out, days=delay)
self.assertEqual(out.getvalue(), '')
self.assertListEqual(list(Message.objects.all()), [m2])
def test_checkup_empty(self):
"Basic, without messages."
out = StringIO()
call_command('postman_checkup', verbosity=1, stdout=out) # 1 is the only implemented verbosity level
lines = out.getvalue().splitlines()
self.assertTrue(lines[0].startswith('Checking messages ', 9)) # begin with "HH:MM:SS "
self.assertTrue(lines[1].startswith('All is correct.', 9))
def check_checkup(self, reasons):
count = len(reasons)
out, err = StringIO(), StringIO()
call_command('postman_checkup', verbosity=1, stdout=out, stderr=err)
lines = out.getvalue().splitlines()
self.assertTrue(lines[-1].startswith('Number of inconsistencies found: {0}'.format(count), 9))
lines = err.getvalue().splitlines()
for i, reason in enumerate(reasons):
# because of possible WARNINGS in some Dj versions, do a reverse read, from the end
self.assertEqual(lines[-3 * (1+i)], reason)
def test_checkup_parties(self):
m = self.create()
self.check_checkup(["Visitor's email is missing.", 'Sender and Recipient cannot be both undefined.'])
def test_checkup_email(self):
m = self.c12(email=self.email)
self.check_checkup(["Visitor's email is in excess."])
def test_checkup_dates(self):
delta = timedelta(minutes=1)
m = self.c12(
read_at=now() - delta,
sender_deleted_at=now() - delta,
recipient_deleted_at=now() - delta,
)
self.check_checkup([
"Deletion date by recipient must be later than sending date.",
"Deletion date by sender must be later than sending date.",
"Reading date must be later than sending date.",
])
def test_checkup_replied(self):
delta = timedelta(minutes=1)
m = self.c12(replied_at=now() - delta)
self.check_checkup([
"The message cannot be replied without being in a conversation.",
"Response date cannot be set without at least one reply.",
"The message cannot be replied without having been read.",
"Response date must be later than sending date.",
])
def test_checkup_reply_after_read(self):
delta = timedelta(minutes=1)
m = self.c12(
read_at=now() + 2 * delta,
replied_at=now() + delta,
)
self.check_checkup([
"The message cannot be replied without being in a conversation.",
"Response date cannot be set without at least one reply.",
"Response date must be later than reading date.",
])
def test_checkup_reply(self):
p = self.c12()
m = self.c21(parent=p) # thread is missing
self.check_checkup(["The message cannot be a reply without being in a conversation."])
def test_checkup_in_conversation(self):
p = self.c12() # thread is missing
m = self.c21(parent=p, thread=p)
self.check_checkup(["The reply and its parent are not in a conversation in common."])
def test_checkup_same_conversation(self):
"""
parent thread
m1
m2 1 <- would be 2 if correct
m3 2 2
"""
fake = self.c12()
p = self.c12(thread=fake) # thread is incorrect
m = self.c21(parent=p, thread=p)
self.check_checkup(["The reply and its parent are not in a conversation in common."])
| 50.748918
| 153
| 0.654918
|
51dbc2fb6ecaf4002210863b43d73efd54f41d43
| 12,390
|
py
|
Python
|
scipy/linalg/_expm_frechet.py
|
smola/scipy
|
ff8b9d9e87a585a820846d7f459d6156ba621c4d
|
[
"BSD-3-Clause"
] | 2
|
2020-06-20T14:11:14.000Z
|
2020-10-12T07:11:36.000Z
|
scipy/linalg/_expm_frechet.py
|
smola/scipy
|
ff8b9d9e87a585a820846d7f459d6156ba621c4d
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/linalg/_expm_frechet.py
|
smola/scipy
|
ff8b9d9e87a585a820846d7f459d6156ba621c4d
|
[
"BSD-3-Clause"
] | null | null | null |
"""Frechet derivative of the matrix exponential."""
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.linalg
__all__ = ['expm_frechet', 'expm_cond']
def expm_frechet(A, E, method=None, compute_expm=True, check_finite=True):
"""
Frechet derivative of the matrix exponential of A in the direction E.
Parameters
----------
A : (N, N) array_like
Matrix of which to take the matrix exponential.
E : (N, N) array_like
Matrix direction in which to take the Frechet derivative.
method : str, optional
Choice of algorithm. Should be one of
- `SPS` (default)
- `blockEnlarge`
compute_expm : bool, optional
Whether to compute also `expm_A` in addition to `expm_frechet_AE`.
Default is True.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
expm_A : ndarray
Matrix exponential of A.
expm_frechet_AE : ndarray
Frechet derivative of the matrix exponential of A in the direction E.
For ``compute_expm = False``, only `expm_frechet_AE` is returned.
See also
--------
expm : Compute the exponential of a matrix.
Notes
-----
This section describes the available implementations that can be selected
by the `method` parameter. The default method is *SPS*.
Method *blockEnlarge* is a naive algorithm.
Method *SPS* is Scaling-Pade-Squaring [1]_.
It is a sophisticated implementation which should take
only about 3/8 as much time as the naive implementation.
The asymptotics are the same.
.. versionadded:: 0.13.0
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
Computing the Frechet Derivative of the Matrix Exponential,
with an application to Condition Number Estimation.
SIAM Journal On Matrix Analysis and Applications.,
30 (4). pp. 1639-1657. ISSN 1095-7162
Examples
--------
>>> import scipy.linalg
>>> A = np.random.randn(3, 3)
>>> E = np.random.randn(3, 3)
>>> expm_A, expm_frechet_AE = scipy.linalg.expm_frechet(A, E)
>>> expm_A.shape, expm_frechet_AE.shape
((3, 3), (3, 3))
>>> import scipy.linalg
>>> A = np.random.randn(3, 3)
>>> E = np.random.randn(3, 3)
>>> expm_A, expm_frechet_AE = scipy.linalg.expm_frechet(A, E)
>>> M = np.zeros((6, 6))
>>> M[:3, :3] = A; M[:3, 3:] = E; M[3:, 3:] = A
>>> expm_M = scipy.linalg.expm(M)
>>> np.allclose(expm_A, expm_M[:3, :3])
True
>>> np.allclose(expm_frechet_AE, expm_M[:3, 3:])
True
"""
if check_finite:
A = np.asarray_chkfinite(A)
E = np.asarray_chkfinite(E)
else:
A = np.asarray(A)
E = np.asarray(E)
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be a square matrix')
if E.ndim != 2 or E.shape[0] != E.shape[1]:
raise ValueError('expected E to be a square matrix')
if A.shape != E.shape:
raise ValueError('expected A and E to be the same shape')
if method is None:
method = 'SPS'
if method == 'SPS':
expm_A, expm_frechet_AE = expm_frechet_algo_64(A, E)
elif method == 'blockEnlarge':
expm_A, expm_frechet_AE = expm_frechet_block_enlarge(A, E)
else:
raise ValueError('Unknown implementation %s' % method)
if compute_expm:
return expm_A, expm_frechet_AE
else:
return expm_frechet_AE
def expm_frechet_block_enlarge(A, E):
"""
This is a helper function, mostly for testing and profiling.
Return expm(A), frechet(A, E)
"""
n = A.shape[0]
M = np.vstack([
np.hstack([A, E]),
np.hstack([np.zeros_like(A), A])])
expm_M = scipy.linalg.expm(M)
return expm_M[:n, :n], expm_M[:n, n:]
"""
Maximal values ell_m of ||2**-s A|| such that the backward error bound
does not exceed 2**-53.
"""
ell_table_61 = (
None,
# 1
2.11e-8,
3.56e-4,
1.08e-2,
6.49e-2,
2.00e-1,
4.37e-1,
7.83e-1,
1.23e0,
1.78e0,
2.42e0,
# 11
3.13e0,
3.90e0,
4.74e0,
5.63e0,
6.56e0,
7.52e0,
8.53e0,
9.56e0,
1.06e1,
1.17e1,
)
# The b vectors and U and V are copypasted
# from scipy.sparse.linalg.matfuncs.py.
# M, Lu, Lv follow (6.11), (6.12), (6.13), (3.3)
def _diff_pade3(A, E, ident):
b = (120., 60., 12., 1.)
A2 = A.dot(A)
M2 = np.dot(A, E) + np.dot(E, A)
U = A.dot(b[3]*A2 + b[1]*ident)
V = b[2]*A2 + b[0]*ident
Lu = A.dot(b[3]*M2) + E.dot(b[3]*A2 + b[1]*ident)
Lv = b[2]*M2
return U, V, Lu, Lv
def _diff_pade5(A, E, ident):
b = (30240., 15120., 3360., 420., 30., 1.)
A2 = A.dot(A)
M2 = np.dot(A, E) + np.dot(E, A)
A4 = np.dot(A2, A2)
M4 = np.dot(A2, M2) + np.dot(M2, A2)
U = A.dot(b[5]*A4 + b[3]*A2 + b[1]*ident)
V = b[4]*A4 + b[2]*A2 + b[0]*ident
Lu = (A.dot(b[5]*M4 + b[3]*M2) +
E.dot(b[5]*A4 + b[3]*A2 + b[1]*ident))
Lv = b[4]*M4 + b[2]*M2
return U, V, Lu, Lv
def _diff_pade7(A, E, ident):
b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
A2 = A.dot(A)
M2 = np.dot(A, E) + np.dot(E, A)
A4 = np.dot(A2, A2)
M4 = np.dot(A2, M2) + np.dot(M2, A2)
A6 = np.dot(A2, A4)
M6 = np.dot(A4, M2) + np.dot(M4, A2)
U = A.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)
V = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
Lu = (A.dot(b[7]*M6 + b[5]*M4 + b[3]*M2) +
E.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident))
Lv = b[6]*M6 + b[4]*M4 + b[2]*M2
return U, V, Lu, Lv
def _diff_pade9(A, E, ident):
b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
2162160., 110880., 3960., 90., 1.)
A2 = A.dot(A)
M2 = np.dot(A, E) + np.dot(E, A)
A4 = np.dot(A2, A2)
M4 = np.dot(A2, M2) + np.dot(M2, A2)
A6 = np.dot(A2, A4)
M6 = np.dot(A4, M2) + np.dot(M4, A2)
A8 = np.dot(A4, A4)
M8 = np.dot(A4, M4) + np.dot(M4, A4)
U = A.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)
V = b[8]*A8 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
Lu = (A.dot(b[9]*M8 + b[7]*M6 + b[5]*M4 + b[3]*M2) +
E.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident))
Lv = b[8]*M8 + b[6]*M6 + b[4]*M4 + b[2]*M2
return U, V, Lu, Lv
def expm_frechet_algo_64(A, E):
n = A.shape[0]
s = None
ident = np.identity(n)
A_norm_1 = scipy.linalg.norm(A, 1)
m_pade_pairs = (
(3, _diff_pade3),
(5, _diff_pade5),
(7, _diff_pade7),
(9, _diff_pade9))
for m, pade in m_pade_pairs:
if A_norm_1 <= ell_table_61[m]:
U, V, Lu, Lv = pade(A, E, ident)
s = 0
break
if s is None:
# scaling
s = max(0, int(np.ceil(np.log2(A_norm_1 / ell_table_61[13]))))
A = A * 2.0**-s
E = E * 2.0**-s
# pade order 13
A2 = np.dot(A, A)
M2 = np.dot(A, E) + np.dot(E, A)
A4 = np.dot(A2, A2)
M4 = np.dot(A2, M2) + np.dot(M2, A2)
A6 = np.dot(A2, A4)
M6 = np.dot(A4, M2) + np.dot(M4, A2)
b = (64764752532480000., 32382376266240000., 7771770303897600.,
1187353796428800., 129060195264000., 10559470521600.,
670442572800., 33522128640., 1323241920., 40840800., 960960.,
16380., 182., 1.)
W1 = b[13]*A6 + b[11]*A4 + b[9]*A2
W2 = b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident
Z1 = b[12]*A6 + b[10]*A4 + b[8]*A2
Z2 = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
W = np.dot(A6, W1) + W2
U = np.dot(A, W)
V = np.dot(A6, Z1) + Z2
Lw1 = b[13]*M6 + b[11]*M4 + b[9]*M2
Lw2 = b[7]*M6 + b[5]*M4 + b[3]*M2
Lz1 = b[12]*M6 + b[10]*M4 + b[8]*M2
Lz2 = b[6]*M6 + b[4]*M4 + b[2]*M2
Lw = np.dot(A6, Lw1) + np.dot(M6, W1) + Lw2
Lu = np.dot(A, Lw) + np.dot(E, W)
Lv = np.dot(A6, Lz1) + np.dot(M6, Z1) + Lz2
# factor once and solve twice
lu_piv = scipy.linalg.lu_factor(-U + V)
R = scipy.linalg.lu_solve(lu_piv, U + V)
L = scipy.linalg.lu_solve(lu_piv, Lu + Lv + np.dot((Lu - Lv), R))
# squaring
for k in range(s):
L = np.dot(R, L) + np.dot(L, R)
R = np.dot(R, R)
return R, L
def vec(M):
"""
Stack columns of M to construct a single vector.
This is somewhat standard notation in linear algebra.
Parameters
----------
M : 2-D array_like
Input matrix
Returns
-------
v : 1-D ndarray
Output vector
"""
return M.T.ravel()
def expm_frechet_kronform(A, method=None, check_finite=True):
"""
Construct the Kronecker form of the Frechet derivative of expm.
Parameters
----------
A : array_like with shape (N, N)
Matrix to be expm'd.
method : str, optional
Extra keyword to be passed to expm_frechet.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
K : 2-D ndarray with shape (N*N, N*N)
Kronecker form of the Frechet derivative of the matrix exponential.
Notes
-----
This function is used to help compute the condition number
of the matrix exponential.
See also
--------
expm : Compute a matrix exponential.
expm_frechet : Compute the Frechet derivative of the matrix exponential.
expm_cond : Compute the relative condition number of the matrix exponential
in the Frobenius norm.
"""
if check_finite:
A = np.asarray_chkfinite(A)
else:
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
n = A.shape[0]
ident = np.identity(n)
cols = []
for i in range(n):
for j in range(n):
E = np.outer(ident[i], ident[j])
F = expm_frechet(A, E,
method=method, compute_expm=False, check_finite=False)
cols.append(vec(F))
return np.vstack(cols).T
def expm_cond(A, check_finite=True):
"""
Relative condition number of the matrix exponential in the Frobenius norm.
Parameters
----------
A : 2-D array_like
Square input matrix with shape (N, N).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
kappa : float
The relative condition number of the matrix exponential
in the Frobenius norm
Notes
-----
A faster estimate for the condition number in the 1-norm
has been published but is not yet implemented in SciPy.
.. versionadded:: 0.14.0
See also
--------
expm : Compute the exponential of a matrix.
expm_frechet : Compute the Frechet derivative of the matrix exponential.
Examples
--------
>>> from scipy.linalg import expm_cond
>>> A = np.array([[-0.3, 0.2, 0.6], [0.6, 0.3, -0.1], [-0.7, 1.2, 0.9]])
>>> k = expm_cond(A)
>>> k
1.7787805864469866
"""
if check_finite:
A = np.asarray_chkfinite(A)
else:
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
X = scipy.linalg.expm(A)
K = expm_frechet_kronform(A, check_finite=False)
# The following norm choices are deliberate.
# The norms of A and X are Frobenius norms,
# and the norm of K is the induced 2-norm.
A_norm = scipy.linalg.norm(A, 'fro')
X_norm = scipy.linalg.norm(X, 'fro')
K_norm = scipy.linalg.norm(K, 2)
kappa = (K_norm * A_norm) / X_norm
return kappa
| 30.072816
| 79
| 0.559403
|
a52434b282ffea2344e3110751288ba0c6972587
| 590
|
py
|
Python
|
examples/pylab_examples/agg_buffer_to_array.py
|
SoftwareDev/mat-plot-lib
|
abaf94859d5ef6e653a4d8a7ce2c59cea1724a57
|
[
"MIT",
"BSD-3-Clause"
] | 16
|
2016-06-14T19:45:35.000Z
|
2020-11-30T19:02:58.000Z
|
lib/mpl_examples/pylab_examples/agg_buffer_to_array.py
|
yingkailiang/matplotlib
|
255a79b106c98c1904489afe6a754e4d943179d6
|
[
"MIT",
"BSD-3-Clause"
] | 7
|
2015-05-08T19:36:25.000Z
|
2015-06-30T15:32:17.000Z
|
lib/mpl_examples/pylab_examples/agg_buffer_to_array.py
|
yingkailiang/matplotlib
|
255a79b106c98c1904489afe6a754e4d943179d6
|
[
"MIT",
"BSD-3-Clause"
] | 6
|
2015-06-05T03:34:06.000Z
|
2022-01-25T09:07:10.000Z
|
import matplotlib.pyplot as plt
import numpy as np
# make an agg figure
fig, ax = plt.subplots()
ax.plot([1,2,3])
ax.set_title('a simple figure')
fig.canvas.draw()
# grab the pixel buffer and dump it into a numpy array
buf = fig.canvas.buffer_rgba()
l, b, w, h = fig.bbox.bounds
# The array needs to be copied, because the underlying buffer
# may be reallocated when the window is resized.
X = np.frombuffer(buf, np.uint8).copy()
X.shape = h,w,4
# now display the array X as an Axes in a new figure
fig2 = plt.figure()
ax2 = fig2.add_subplot(111, frameon=False)
ax2.imshow(X)
plt.show()
| 25.652174
| 61
| 0.722034
|
f841f0b32cb8b2d2ead57e6972cabbb050084401
| 3,221
|
py
|
Python
|
tests/test_update_association_compare.py
|
flying-sheep/goatools
|
1e3a74faa17cbdeef02550c7ddf17b65cf47d34a
|
[
"BSD-2-Clause"
] | 477
|
2015-02-10T06:54:42.000Z
|
2022-03-15T12:36:11.000Z
|
tests/test_update_association_compare.py
|
flying-sheep/goatools
|
1e3a74faa17cbdeef02550c7ddf17b65cf47d34a
|
[
"BSD-2-Clause"
] | 174
|
2015-02-05T18:11:14.000Z
|
2022-03-29T10:24:19.000Z
|
tests/test_update_association_compare.py
|
flying-sheep/goatools
|
1e3a74faa17cbdeef02550c7ddf17b65cf47d34a
|
[
"BSD-2-Clause"
] | 202
|
2015-01-21T12:29:23.000Z
|
2022-03-01T13:26:05.000Z
|
#!/usr/bin/env python
"""Compare new propagate counts function with original function. Test assc results is same."""
__copyright__ = "Copyright (C) 2016-present, DV Klopfenstein, H Tang, All rights reserved."
__author__ = "DV Klopfenstein"
import os
from os.path import join
from sys import stdout
import timeit
from goatools.obo_parser import GODag
from goatools.base import get_godag
from goatools.associations import dnld_assc
from goatools.anno.update_association import update_association
from goatools.anno.factory import get_objanno
from goatools.godag.prttime import prt_hms
REPO = join(os.path.dirname(os.path.abspath(__file__)), "..")
# pylint: disable=superfluous-parens
def test_update_association():
"""Compare new propagate cnts function with original function. Test assc results is same."""
print('\n1) READ GODAG:')
assc_name = "goa_human.gaf" # gene_association.fb gene_association.mgi
obo = join(REPO, "go-basic.obo")
tic = timeit.default_timer()
godag = get_godag(obo)
tic = prt_hms(tic, "Created two GODags: One for original and one for new propagate counts")
print('\n2) READ ANNOTATIONS:')
assc_orig = dnld_assc(join(REPO, assc_name), godag, prt=stdout)
tic = prt_hms(tic, "Associations Read")
objanno = get_objanno(join(REPO, assc_name), 'gaf', godag=godag)
tic = prt_hms(tic, "Associations Read")
print('\n3) MAKE COPIES OF ASSOCIATIONS:')
assc1 = {g:set(gos) for g, gos in assc_orig.items()}
assc2 = {g:set(gos) for g, gos in assc_orig.items()}
tic = prt_hms(tic, "Associations Copied: One for original and one for new")
print('\n4) UPDATE ASSOCIATIONS (PROPAGATE COUNTS):')
godag.update_association(assc1)
tic = prt_hms(tic, "ORIG: godag.update_association(assc)")
update_association(assc2, godag)
tic = prt_hms(tic, "NEW SA: update_association(go2obj, assc_orig)")
assc3 = objanno.get_id2gos(namespace='BP', propagate_counts=True)
tic = prt_hms(tic, "NEW BASE: update_association(go2obj, assc_orig)")
print('\n5) RUN CHECKS')
_chk_assc(assc1, assc2)
_chk_assc(assc1, assc3)
_chk_godag(godag, obo)
def _chk_godag(go2obj_act, obo):
"""Check that the update_association function did not alter godag."""
go2obj_exp = GODag(join(os.path.dirname(os.path.abspath(__file__)), "../..", obo))
assert len(go2obj_act) == len(go2obj_exp)
assert set(go2obj_act) == set(go2obj_exp)
for go_act, obj_act in go2obj_act.items():
obj_exp = go2obj_exp[go_act]
act_gos = set(o.id for o in obj_act.parents)
exp_gos = set(o.id for o in obj_exp.parents)
assert act_gos == exp_gos, "\nACT: {A}\nEXP: {E}".format(A=act_gos, E=exp_gos)
def _chk_assc(assc1, assc2):
"""Ensure the two associations are the same."""
assert len(assc1) == len(assc2)
assert set(assc1) == set(assc2)
for gene, gos1 in assc1.items():
gos2 = assc2[gene]
assert gos1 == gos2, '{}\nGOs1 {}\n GOs2 {}'.format(
gos1.symmetric_difference(gos2),
sorted(gos1),
sorted(gos2))
if __name__ == '__main__':
test_update_association()
# Copyright (C) 2016-present, DV Klopfenstein, H Tang, All rights reserved.
| 39.280488
| 96
| 0.696678
|
a3038c6b4e6235de5e044619a10c1f889ade84b1
| 786
|
py
|
Python
|
editing/lightweight/rules/__init__.py
|
pulp-platform/quantlib
|
bff5351f937c7dfd88e1ae44a146a257beca0585
|
[
"Apache-2.0"
] | null | null | null |
editing/lightweight/rules/__init__.py
|
pulp-platform/quantlib
|
bff5351f937c7dfd88e1ae44a146a257beca0585
|
[
"Apache-2.0"
] | null | null | null |
editing/lightweight/rules/__init__.py
|
pulp-platform/quantlib
|
bff5351f937c7dfd88e1ae44a146a257beca0585
|
[
"Apache-2.0"
] | 1
|
2022-01-02T10:10:46.000Z
|
2022-01-02T10:10:46.000Z
|
#
# __init__.py
#
# Author(s):
# Matteo Spallanzani <spmatteo@iis.ee.ethz.ch>
#
# Copyright (c) 2020-2021 ETH Zurich.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .filters import *
from .rules import LightweightRule
from . import inq
from . import ana
from . import pact
| 28.071429
| 74
| 0.74173
|
2e1a0cfb468961f0005428b783937b011c778212
| 8,119
|
py
|
Python
|
ClemBot.Bot/bot/cogs/random_cog/random_cog.py
|
Iapetus-11/ClemBot
|
ec32c4491e988b934d3a14fc4709726af6be4e02
|
[
"MIT"
] | null | null | null |
ClemBot.Bot/bot/cogs/random_cog/random_cog.py
|
Iapetus-11/ClemBot
|
ec32c4491e988b934d3a14fc4709726af6be4e02
|
[
"MIT"
] | null | null | null |
ClemBot.Bot/bot/cogs/random_cog/random_cog.py
|
Iapetus-11/ClemBot
|
ec32c4491e988b934d3a14fc4709726af6be4e02
|
[
"MIT"
] | null | null | null |
import asyncio
import json
import logging
import random
import time
import typing
from datetime import datetime
import aiohttp
import discord
import discord.ext.commands as commands
import bot.extensions as ext
from bot.consts import Colors
from bot.messaging.events import Events
from bot.utils.converters import Duration
log = logging.getLogger(__name__)
SLOTS_COMMAND_COOLDOWN = 30
class RandomCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@ext.command()
@ext.long_help(
'Simply flips a coin in discord'
)
@ext.short_help('Flip a coin!')
@ext.example('flip')
async def flip(self, ctx):
random.seed(time.time())
embed = discord.Embed(title='Coin Flip', color=Colors.ClemsonOrange)
heads = discord.File(filename='Heads.jpg',
fp='bot/cogs/random_cog/assets/Heads.jpg')
tails = discord.File(filename='Tails.jpg',
fp='bot/cogs/random_cog/assets/Tails.jpg')
if random.randint(0, 1) == 1:
attachment = heads
embed.set_thumbnail(url='attachment://Heads.jpg')
else:
attachment = tails
embed.set_thumbnail(url='attachment://Tails.jpg')
await ctx.send(embed=embed, file=attachment)
@ext.command(aliases=['roll', 'dice'])
@ext.long_help(
"""
Rolls dice in a XdY format where X is the number of dice and Y is the number of sides on the dice.
Example:
1d6 - Rolls 1 die with 6 sides
2d8 - Rolls 2 die with 8 sides
3d10 - Rolls 3 die with 10 sides
4d20 - Rolls 4 die with 20 sides
"""
)
@ext.short_help('Rolls any type of dice in discord')
@ext.example(('roll 1d6', 'roll 4d20'))
async def diceroll(self, ctx, dice: str):
try:
rolls, limit = map(int, dice.split('d'))
except Exception:
await ctx.send('Entry has to be in a XdY format! See the help command for an example.')
return
result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))
embed = discord.Embed(title='Dice Roller', description=f'{ctx.message.author.mention} rolled **{dice}**', color=Colors.ClemsonOrange)
embed.add_field(name='Here are the results of their rolls: ', value=result, inline=False)
await ctx.send(embed=embed)
@ext.command(aliases=['8ball', '🎱'])
@ext.long_help(
'Rolls a magic 8ball to tell you your future, guarenteed to work!'
)
@ext.short_help('Know your future')
@ext.example(('ball Will I have a good day today?', '8ball Will I have a bad day today?'))
async def ball(self, ctx, *, question):
responses = [
'It is certain.',
'It is decidedly so.',
'Without a doubt.',
'Yes – definitely.',
'You may rely on it.',
'As I see it, yes.',
'Most likely.',
'Outlook good.',
'Yes.',
'Signs point to yes.',
'Reply hazy, try again.',
'Ask again later.',
'Better not tell you now.',
'Cannot predict now.',
'Concentrate and ask again.',
'Don\'t count on it.',
'My reply is no.',
'My sources say no.',
'Outlook not so good.',
'Very doubtful.'
]
embed = discord.Embed(title='🎱', description=f'{random.choice(responses)}', color=Colors.ClemsonOrange)
await ctx.send(embed=embed)
@ext.command(hidden=True)
@commands.cooldown(1, SLOTS_COMMAND_COOLDOWN, commands.BucketType.user)
@ext.long_help(
'A slot machine inside discord with a chance to win fame and fortune'
)
@ext.short_help('How lucky are you?')
@ext.example('slots')
async def ogslots(self, ctx):
emojis = "🍎🍊🍐🍋🍉🍇🍓🍒"
a = random.choice(emojis)
b = random.choice(emojis)
c = random.choice(emojis)
blank = '⬜'
slotset = {a, b, c}
if (len(slotset) == 1):
message = f'{ctx.message.author.mention} won!'
elif (len(slotset) == 2):
message = f'{ctx.message.author.mention} almost won, 2/3!'
else:
message = f'{ctx.message.author.mention} lost, no matches.'
slotstitle = '💎 Slot Machine 💎'
async def slotsrolling(input, spinstatus, waittime):
slotembed = discord.Embed(title=f'{slotstitle}', color=Colors.ClemsonOrange,
description=f'**{ctx.message.author.name} has rolled the slots**')
slotembed.add_field(name=input, value=spinstatus, inline=False)
await asyncio.sleep(waittime)
return slotembed
embed = await slotsrolling(f'{blank} | {blank} | {blank}', 'Spinning', 0)
msg = await ctx.send(embed=embed)
embed = await slotsrolling(f'{a} | {blank} | {blank}', 'Spinning', 1)
await msg.edit(embed=embed)
embed = await slotsrolling(f'{a} | {b} | {blank}', 'Spinning', 1)
await msg.edit(embed=embed)
embed = await slotsrolling(f'{a} | {b} | {c}', f'**{message}**', 1)
await msg.edit(embed=embed)
@ext.command()
@ext.long_help(
'Creates a raffle for giveaways inside discord and picks a random winner from all reactors after a specified time frame'
)
@ext.short_help('Create giveaways!')
@ext.example(('raffle 1h this is fun', 'raffle 1d a whole day raffle!'))
async def raffle(self, ctx, time: typing.Optional[Duration] = 5, *, reason):
if isinstance(time, datetime):
delay_time = (time - datetime.utcnow()).total_seconds()
else:
delay_time = time
description = f'Raffle for {reason}\nReact with :tickets: to enter the raffle'
embed = discord.Embed(title='RAFFLE', color=Colors.ClemsonOrange, description=description)
msg = await ctx.send(embed=embed)
await msg.add_reaction('🎟️')
await asyncio.sleep(delay_time)
cache_msg = await ctx.fetch_message(msg.id)
for reaction in cache_msg.reactions:
if reaction.emoji == '🎟️':
if reaction.count == 1:
description += '\n\nNo one entered the raffle :('
embed = discord.Embed(title='RAFFLE', color=Colors.ClemsonOrange, description=description)
await msg.edit(embed=embed)
else:
reactors = await reaction.users().flatten()
# remove first user b/c first user is always bot
reactors.pop(0)
winner = random.choice(reactors).name
description += f'\n\n🎉 Winner is {winner} 🎉'
embed = discord.Embed(title='RAFFLE', color=Colors.ClemsonOrange, description=description)
await msg.edit(embed=embed)
@ext.command(aliases=['relevant'])
@ext.long_help(
'Theres always a relevant xkcd for any situation, see if you get lucky with a random one!'
)
@ext.short_help('"relevant xkcd"')
@ext.example('xkcd')
async def xkcd(self, ctx):
async with aiohttp.ClientSession() as session:
async with await session.get(url='https://c.xkcd.com/random/comic/') as resp:
if (resp.status == 200):
msg = await ctx.send(resp.url)
await self.bot.messenger.publish(Events.on_set_deletable, msg=msg, author=ctx.author, timeout=60)
else:
response_info = json.loads(await resp.text())['meta']
embed = discord.Embed(title='xkcd', color=Colors.Error)
embed.add_field(name='Error', value=f"{response_info['status']}: {response_info['msg']}")
msg = await ctx.send(embed=embed)
await self.bot.messenger.publish(Events.on_set_deletable, msg=msg, author=ctx.author, timeout=60)
def setup(bot):
bot.add_cog(RandomCog(bot))
| 38.117371
| 141
| 0.582215
|
634ba3bdcdd9d0f4bee23d72496bf79df0ba0a34
| 19,860
|
py
|
Python
|
sql/query_privileges.py
|
hujingguang/Archery
|
819c77745fe7f70899dfbe3ead013d0b0caf00be
|
[
"Apache-2.0"
] | 1
|
2019-11-12T08:32:01.000Z
|
2019-11-12T08:32:01.000Z
|
sql/query_privileges.py
|
hujingguang/Archery
|
819c77745fe7f70899dfbe3ead013d0b0caf00be
|
[
"Apache-2.0"
] | 1
|
2021-06-10T22:04:38.000Z
|
2021-06-10T22:04:38.000Z
|
sql/query_privileges.py
|
hujingguang/Archery
|
819c77745fe7f70899dfbe3ead013d0b0caf00be
|
[
"Apache-2.0"
] | 1
|
2020-09-23T05:43:34.000Z
|
2020-09-23T05:43:34.000Z
|
# -*- coding: UTF-8 -*-
"""
@author: hhyo
@license: Apache Licence
@file: query_privileges.py
@time: 2019/03/24
"""
import logging
import datetime
import re
import traceback
import simplejson as json
from django.contrib.auth.decorators import permission_required
from django.db import transaction
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django_q.tasks import async_task
from common.config import SysConfig
from common.utils.const import WorkflowDict
from common.utils.extend_json_encoder import ExtendJSONEncoder
from sql.engines.inception import InceptionEngine
from sql.models import QueryPrivilegesApply, QueryPrivileges, Instance, ResourceGroup
from sql.notify import notify_for_audit
from sql.utils.resource_group import user_groups, user_instances
from sql.utils.workflow_audit import Audit
from sql.utils.sql_utils import extract_tables
logger = logging.getLogger('default')
__author__ = 'hhyo'
# TODO 权限校验内的语法解析和判断独立到每个engine内
def query_priv_check(user, instance, db_name, sql_content, limit_num):
"""
查询权限校验
:param user:
:param instance:
:param db_name:
:param sql_content:
:param limit_num:
:return:
"""
result = {'status': 0, 'msg': 'ok', 'data': {'priv_check': True, 'limit_num': 0}}
# 如果有can_query_all_instance, 视为管理员, 仅获取limit值信息
# superuser 拥有全部权限, 不需做特别修改
if user.has_perm('sql.query_all_instances'):
priv_limit = int(SysConfig().get('admin_query_limit', 5000))
result['data']['limit_num'] = min(priv_limit, limit_num) if limit_num else priv_limit
return result
# 如果有can_query_resource_group_instance, 视为资源组管理员, 可查询资源组内所有实例数据
if user.has_perm('sql.query_resource_group_instance'):
if user_instances(user, tag_codes=['can_read']).filter(pk=instance.pk).exists():
priv_limit = int(SysConfig().get('admin_query_limit', 5000))
result['data']['limit_num'] = min(priv_limit, limit_num) if limit_num else priv_limit
return result
# explain和show create跳过权限校验
if re.match(r"^explain|^show\s+create", sql_content, re.I):
return result
# 其他尝试使用inception解析
try:
# 尝试使用Inception校验表权限
table_ref = _table_ref(f"{sql_content.rstrip(';')};", instance, db_name)
# 循环验证权限,可能存在性能问题,但一次查询涉及的库表数量有限,可忽略
for table in table_ref:
# 既无库权限也无表权限
if not _db_priv(user, instance, table['db']) and not _tb_priv(user, instance, db_name, table['table']):
result['status'] = 1
result['msg'] = f"你无{db_name}.{table['table']}表的查询权限!请先到查询权限管理进行申请"
return result
# 获取查询涉及库/表权限的最小limit限制,和前端传参作对比,取最小值
# 循环获取,可能存在性能问题,但一次查询涉及的库表数量有限,可忽略
for table in table_ref:
priv_limit = _priv_limit(user, instance, db_name=table['db'], tb_name=table['table'])
limit_num = min(priv_limit, limit_num) if limit_num else priv_limit
result['data']['limit_num'] = limit_num
except SyntaxError as msg:
result['status'] = 1
result['msg'] = f"SQL语法错误,{msg}"
return result
except Exception as msg:
# 表权限校验失败再次校验库权限
# 先获取查询语句涉及的库
if instance.db_type in ['redis', 'mssql']:
dbs = [db_name]
else:
dbs = [i['schema'].strip('`') for i in extract_tables(sql_content) if i['schema'] is not None]
dbs.append(db_name)
# 库去重
dbs = list(set(dbs))
# 排序
dbs.sort()
# 校验库权限,无库权限直接返回
for db_name in dbs:
if not _db_priv(user, instance, db_name):
result['status'] = 1
result['msg'] = f"你无{db_name}数据库的查询权限!请先到查询权限管理进行申请"
return result
# 有所有库权限则获取最小limit值
for db_name in dbs:
priv_limit = _priv_limit(user, instance, db_name=db_name)
limit_num = min(priv_limit, limit_num) if limit_num else priv_limit
result['data']['limit_num'] = limit_num
# 实例为mysql的,需要判断query_check状态
if instance.db_type == 'mysql':
# 开启query_check,则禁止执行
if SysConfig().get('query_check'):
result['status'] = 1
result['msg'] = f"无法校验查询语句权限,请检查语法是否正确或联系管理员,错误信息:{msg}"
return result
# 关闭query_check,标记权限校验为跳过,可继续执行
else:
result['data']['priv_check'] = False
return result
@permission_required('sql.menu_queryapplylist', raise_exception=True)
def query_priv_apply_list(request):
"""
获取查询权限申请列表
:param request:
:return:
"""
user = request.user
limit = int(request.POST.get('limit', 0))
offset = int(request.POST.get('offset', 0))
limit = offset + limit
search = request.POST.get('search', '')
query_privs = QueryPrivilegesApply.objects.all()
# 过滤搜索项,支持模糊搜索标题、用户
if search:
query_privs = query_privs.filter(Q(title__icontains=search) | Q(user_display__icontains=search))
# 管理员可以看到全部数据
if user.is_superuser:
query_privs = query_privs
# 拥有审核权限、可以查看组内所有工单
elif user.has_perm('sql.query_review'):
# 先获取用户所在资源组列表
group_list = user_groups(user)
group_ids = [group.group_id for group in group_list]
query_privs = query_privs.filter(group_id__in=group_ids)
# 其他人只能看到自己提交的工单
else:
query_privs = query_privs.filter(user_name=user.username)
count = query_privs.count()
lists = query_privs.order_by('-apply_id')[offset:limit].values(
'apply_id', 'title', 'instance__instance_name', 'db_list', 'priv_type', 'table_list', 'limit_num', 'valid_date',
'user_display', 'status', 'create_time', 'group_name'
)
# QuerySet 序列化
rows = [row for row in lists]
result = {"total": count, "rows": rows}
# 返回查询结果
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
@permission_required('sql.query_applypriv', raise_exception=True)
def query_priv_apply(request):
"""
申请查询权限
:param request:
:return:
"""
title = request.POST['title']
instance_name = request.POST.get('instance_name')
group_name = request.POST.get('group_name')
group_id = ResourceGroup.objects.get(group_name=group_name).group_id
priv_type = request.POST.get('priv_type')
db_name = request.POST.get('db_name')
db_list = request.POST.getlist('db_list[]')
table_list = request.POST.getlist('table_list[]')
valid_date = request.POST.get('valid_date')
limit_num = request.POST.get('limit_num')
# 获取用户信息
user = request.user
# 服务端参数校验
result = {'status': 0, 'msg': 'ok', 'data': []}
if int(priv_type) == 1:
if not (title and instance_name and db_list and valid_date and limit_num):
result['status'] = 1
result['msg'] = '请填写完整'
return HttpResponse(json.dumps(result), content_type='application/json')
elif int(priv_type) == 2:
if not (title and instance_name and db_name and valid_date and table_list and limit_num):
result['status'] = 1
result['msg'] = '请填写完整'
return HttpResponse(json.dumps(result), content_type='application/json')
try:
user_instances(request.user, tag_codes=['can_read']).get(instance_name=instance_name)
except Instance.DoesNotExist:
result['status'] = 1
result['msg'] = '你所在组未关联该实例!'
return HttpResponse(json.dumps(result), content_type='application/json')
# 库权限
ins = Instance.objects.get(instance_name=instance_name)
if int(priv_type) == 1:
# 检查申请账号是否已拥库查询权限
for db_name in db_list:
if _db_priv(user, ins, db_name):
result['status'] = 1
result['msg'] = f'你已拥有{instance_name}实例{db_name}库权限,不能重复申请'
return HttpResponse(json.dumps(result), content_type='application/json')
# 表权限
elif int(priv_type) == 2:
# 先检查是否拥有库权限
if _db_priv(user, ins, db_name):
result['status'] = 1
result['msg'] = f'你已拥有{instance_name}实例{db_name}库的全部权限,不能重复申请'
return HttpResponse(json.dumps(result), content_type='application/json')
# 检查申请账号是否已拥有该表的查询权限
for tb_name in table_list:
if _tb_priv(user, ins, db_name, tb_name):
result['status'] = 1
result['msg'] = f'你已拥有{instance_name}实例{db_name}.{tb_name}表的查询权限,不能重复申请'
return HttpResponse(json.dumps(result), content_type='application/json')
# 使用事务保持数据一致性
try:
with transaction.atomic():
# 保存申请信息到数据库
applyinfo = QueryPrivilegesApply(
title=title,
group_id=group_id,
group_name=group_name,
audit_auth_groups=Audit.settings(group_id, WorkflowDict.workflow_type['query']),
user_name=user.username,
user_display=user.display,
instance=ins,
priv_type=int(priv_type),
valid_date=valid_date,
status=WorkflowDict.workflow_status['audit_wait'],
limit_num=limit_num
)
if int(priv_type) == 1:
applyinfo.db_list = ','.join(db_list)
applyinfo.table_list = ''
elif int(priv_type) == 2:
applyinfo.db_list = db_name
applyinfo.table_list = ','.join(table_list)
applyinfo.save()
apply_id = applyinfo.apply_id
# 调用工作流插入审核信息,查询权限申请workflow_type=1
audit_result = Audit.add(WorkflowDict.workflow_type['query'], apply_id)
if audit_result['status'] == 0:
# 更新业务表审核状态,判断是否插入权限信息
_query_apply_audit_call_back(apply_id, audit_result['data']['workflow_status'])
except Exception as msg:
logger.error(traceback.format_exc())
result['status'] = 1
result['msg'] = str(msg)
else:
result = audit_result
# 消息通知
audit_id = Audit.detail_by_workflow_id(workflow_id=apply_id,
workflow_type=WorkflowDict.workflow_type['query']).audit_id
async_task(notify_for_audit, audit_id=audit_id, timeout=60)
return HttpResponse(json.dumps(result), content_type='application/json')
@permission_required('sql.menu_queryapplylist', raise_exception=True)
def user_query_priv(request):
"""
用户的查询权限管理
:param request:
:return:
"""
user = request.user
user_display = request.POST.get('user_display', 'all')
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
limit = offset + limit
search = request.POST.get('search', '')
user_query_privs = QueryPrivileges.objects.filter(is_deleted=0, valid_date__gte=datetime.datetime.now())
# 过滤搜索项,支持模糊搜索用户、数据库、表
if search:
user_query_privs = user_query_privs.filter(Q(user_display__icontains=search) |
Q(db_name__icontains=search) |
Q(table_name__icontains=search))
# 过滤用户
if user_display != 'all':
user_query_privs = user_query_privs.filter(user_display=user_display)
# 管理员可以看到全部数据
if user.is_superuser:
user_query_privs = user_query_privs
# 拥有管理权限、可以查看组内所有工单
elif user.has_perm('sql.query_mgtpriv'):
# 先获取用户所在资源组列表
group_list = user_groups(user)
group_ids = [group.group_id for group in group_list]
user_query_privs = user_query_privs.filter(instance__queryprivilegesapply__group_id__in=group_ids)
# 其他人只能看到自己提交的工单
else:
user_query_privs = user_query_privs.filter(user_name=user.username)
privileges_count = user_query_privs.distinct().count()
privileges_list = user_query_privs.distinct().order_by('-privilege_id')[offset:limit].values(
'privilege_id', 'user_display', 'instance__instance_name', 'db_name', 'priv_type',
'table_name', 'limit_num', 'valid_date'
)
# QuerySet 序列化
rows = [row for row in privileges_list]
result = {"total": privileges_count, "rows": rows}
# 返回查询结果
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
@permission_required('sql.query_mgtpriv', raise_exception=True)
def query_priv_modify(request):
"""
变更权限信息
:param request:
:return:
"""
privilege_id = request.POST.get('privilege_id')
type = request.POST.get('type')
result = {'status': 0, 'msg': 'ok', 'data': []}
# type=1删除权限,type=2变更权限
try:
privilege = QueryPrivileges.objects.get(privilege_id=int(privilege_id))
except QueryPrivileges.DoesNotExist:
result['msg'] = '待操作权限不存在'
result['status'] = 1
return HttpResponse(json.dumps(result), content_type='application/json')
if int(type) == 1:
# 删除权限
privilege.is_deleted = 1
privilege.save(update_fields=['is_deleted'])
return HttpResponse(json.dumps(result), content_type='application/json')
elif int(type) == 2:
# 变更权限
valid_date = request.POST.get('valid_date')
limit_num = request.POST.get('limit_num')
privilege.valid_date = valid_date
privilege.limit_num = limit_num
privilege.save(update_fields=['valid_date', 'limit_num'])
return HttpResponse(json.dumps(result), content_type='application/json')
@permission_required('sql.query_review', raise_exception=True)
def query_priv_audit(request):
"""
查询权限审核
:param request:
:return:
"""
# 获取用户信息
user = request.user
apply_id = int(request.POST['apply_id'])
audit_status = int(request.POST['audit_status'])
audit_remark = request.POST.get('audit_remark')
if audit_remark is None:
audit_remark = ''
if Audit.can_review(request.user, apply_id, 1) is False:
context = {'errMsg': '你无权操作当前工单!'}
return render(request, 'error.html', context)
# 使用事务保持数据一致性
try:
with transaction.atomic():
audit_id = Audit.detail_by_workflow_id(workflow_id=apply_id,
workflow_type=WorkflowDict.workflow_type['query']).audit_id
# 调用工作流接口审核
audit_result = Audit.audit(audit_id, audit_status, user.username, audit_remark)
# 按照审核结果更新业务表审核状态
audit_detail = Audit.detail(audit_id)
if audit_detail.workflow_type == WorkflowDict.workflow_type['query']:
# 更新业务表审核状态,插入权限信息
_query_apply_audit_call_back(audit_detail.workflow_id, audit_result['data']['workflow_status'])
except Exception as msg:
logger.error(traceback.format_exc())
context = {'errMsg': msg}
return render(request, 'error.html', context)
else:
# 消息通知
async_task(notify_for_audit, audit_id=audit_id, audit_remark=audit_remark, timeout=60)
return HttpResponseRedirect(reverse('sql:queryapplydetail', args=(apply_id,)))
def _table_ref(sql_content, instance, db_name):
"""
解析语法树,获取语句涉及的表,用于查询权限限制
:param sql_content:
:param instance:
:param db_name:
:return:
"""
if instance.db_type != 'mysql':
raise RuntimeError('Inception Error: 仅支持MySQL实例')
inception_engine = InceptionEngine()
query_tree = inception_engine.query_print(instance=instance, db_name=db_name, sql=sql_content)
table_ref = query_tree.get('table_ref', [])
db_list = [table_info['db'] for table_info in table_ref]
table_list = [table_info['table'] for table_info in table_ref]
# 异常解析的情形
if '' in db_list or '*' in table_list:
raise RuntimeError('Inception Error: 存在空数据库表信息')
if not (db_list or table_list):
raise RuntimeError('Inception Error: 未解析到任何库表信息')
return table_ref
def _db_priv(user, instance, db_name):
"""
检测用户是否拥有指定库权限
:param user: 用户对象
:param instance: 实例对象
:param db_name: 库名
:return: 权限存在则返回对应权限的limit_num,否则返回False
TODO 返回统一为 int 类型, 不存在返回0 (虽然其实在python中 0==False)
"""
# 获取用户库权限
user_privileges = QueryPrivileges.objects.filter(user_name=user.username, instance=instance, db_name=str(db_name),
valid_date__gte=datetime.datetime.now(), is_deleted=0,
priv_type=1)
if user.is_superuser:
return int(SysConfig().get('admin_query_limit', 5000))
else:
if user_privileges.exists():
return user_privileges.first().limit_num
return False
def _tb_priv(user, instance, db_name, tb_name):
"""
检测用户是否拥有指定表权限
:param user: 用户对象
:param instance: 实例对象
:param db_name: 库名
:param tb_name: 表名
:return: 权限存在则返回对应权限的limit_num,否则返回False
"""
# 获取用户表权限
user_privileges = QueryPrivileges.objects.filter(user_name=user.username, instance=instance, db_name=str(db_name),
table_name=str(tb_name), valid_date__gte=datetime.datetime.now(),
is_deleted=0, priv_type=2)
if user.is_superuser:
return int(SysConfig().get('admin_query_limit', 5000))
else:
if user_privileges.exists():
return user_privileges.first().limit_num
return False
def _priv_limit(user, instance, db_name, tb_name=None):
"""
获取用户拥有的查询权限的最小limit限制,用于返回结果集限制
:param db_name:
:param tb_name: 可为空,为空时返回库权限
:return:
"""
# 获取库表权限limit值
db_limit_num = _db_priv(user, instance, db_name)
if tb_name:
tb_limit_num = _tb_priv(user, instance, db_name, tb_name)
else:
tb_limit_num = None
# 返回最小值
if db_limit_num and tb_limit_num:
return min(db_limit_num, tb_limit_num)
elif db_limit_num:
return db_limit_num
elif tb_limit_num:
return tb_limit_num
else:
raise RuntimeError('用户无任何有效权限!')
def _query_apply_audit_call_back(apply_id, workflow_status):
"""
查询权限申请用于工作流审核回调
:param apply_id: 申请id
:param workflow_status: 审核结果
:return:
"""
# 更新业务表状态
apply_info = QueryPrivilegesApply.objects.get(apply_id=apply_id)
apply_info.status = workflow_status
apply_info.save()
# 审核通过插入权限信息,批量插入,减少性能消耗
if workflow_status == WorkflowDict.workflow_status['audit_success']:
apply_queryset = QueryPrivilegesApply.objects.get(apply_id=apply_id)
# 库权限
if apply_queryset.priv_type == 1:
insert_list = [QueryPrivileges(
user_name=apply_queryset.user_name,
user_display=apply_queryset.user_display,
instance=apply_queryset.instance,
db_name=db_name,
table_name=apply_queryset.table_list, valid_date=apply_queryset.valid_date,
limit_num=apply_queryset.limit_num, priv_type=apply_queryset.priv_type) for db_name in
apply_queryset.db_list.split(',')]
# 表权限
elif apply_queryset.priv_type == 2:
insert_list = [QueryPrivileges(
user_name=apply_queryset.user_name,
user_display=apply_queryset.user_display,
instance=apply_queryset.instance,
db_name=apply_queryset.db_list,
table_name=table_name, valid_date=apply_queryset.valid_date,
limit_num=apply_queryset.limit_num, priv_type=apply_queryset.priv_type) for table_name in
apply_queryset.table_list.split(',')]
QueryPrivileges.objects.bulk_create(insert_list)
| 37.330827
| 120
| 0.642095
|
0d99fea6f98bbab78299b21a188a76390bbf9391
| 37,112
|
py
|
Python
|
third_party/catapult/dashboard/dashboard/start_try_job_test.py
|
maidiHaitai/haitaibrowser
|
a232a56bcfb177913a14210e7733e0ea83a6b18d
|
[
"BSD-3-Clause"
] | 1
|
2020-09-15T08:43:34.000Z
|
2020-09-15T08:43:34.000Z
|
third_party/catapult/dashboard/dashboard/start_try_job_test.py
|
maidiHaitai/haitaibrowser
|
a232a56bcfb177913a14210e7733e0ea83a6b18d
|
[
"BSD-3-Clause"
] | null | null | null |
third_party/catapult/dashboard/dashboard/start_try_job_test.py
|
maidiHaitai/haitaibrowser
|
a232a56bcfb177913a14210e7733e0ea83a6b18d
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import json
import unittest
import mock
import webapp2
import webtest
from google.appengine.ext import ndb
from dashboard import can_bisect
from dashboard import namespaced_stored_object
from dashboard import rietveld_service
from dashboard import start_try_job
from dashboard import testing_common
from dashboard.models import bug_data
from dashboard.models import graph_data
from dashboard.models import try_job
# TODO(qyearsley): Shorten this module.
# See https://github.com/catapult-project/catapult/issues/1917
# pylint: disable=too-many-lines
# Below is a series of test strings which may contain long lines.
# pylint: disable=line-too-long
_EXPECTED_BISECT_CONFIG_DIFF = """config = {
- 'command': '',
- 'good_revision': '',
- 'bad_revision': '',
- 'metric': '',
- 'repeat_count':'',
- 'max_time_minutes': '',
+ "bad_revision": "215828",
+ "bisect_mode": "mean",
+ "bug_id": "12345",
+ "builder_type": "",
+ "command": "tools/perf/run_benchmark -v --browser=release --output-format=buildbot --upload-results --also-run-disabled-tests dromaeo.jslibstylejquery",
+ "good_revision": "215806",
+ "max_time_minutes": "20",
+ "metric": "jslib/jslib",
+ "repeat_count": "20",
+ "target_arch": "ia32",
+ "try_job_id": 1
}
"""
_EXPECTED_BISECT_CONFIG_DIFF_FOR_INTERNAL_TEST = """config = {
- 'command': '',
- 'good_revision': '',
- 'bad_revision': '',
- 'metric': '',
- 'repeat_count':'',
- 'max_time_minutes': '',
+ "bad_revision": "f14a8f733cce874d5d66e8e6b86e75bbac240b0e",
+ "bisect_mode": "mean",
+ "bug_id": "12345",
+ "builder_type": "",
+ "command": "tools/perf/run_benchmark -v --browser=android-chrome --upload-results --also-run-disabled-tests start_with_url.cold.startup_pages",
+ "good_revision": "d82ccc77c8a86ce9893a8035fb55aca666f044c8",
+ "max_time_minutes": "20",
+ "metric": "foreground_tab_request_start/foreground_tab_request_start",
+ "repeat_count": "20",
+ "target_arch": "ia32",
+ "try_job_id": 1
}
"""
_EXPECTED_BISECT_CONFIG_DIFF_WITH_ARCHIVE = """config = {
- 'command': '',
- 'good_revision': '',
- 'bad_revision': '',
- 'metric': '',
- 'repeat_count':'',
- 'max_time_minutes': '',
+ "bad_revision": "215828",
+ "bisect_mode": "mean",
+ "bug_id": "12345",
+ "builder_type": "perf",
+ "command": "tools/perf/run_benchmark -v --browser=release --output-format=buildbot --upload-results --also-run-disabled-tests dromaeo.jslibstylejquery",
+ "good_revision": "215806",
+ "max_time_minutes": "20",
+ "metric": "jslib/jslib",
+ "repeat_count": "20",
+ "target_arch": "ia32",
+ "try_job_id": 1
}
"""
_EXPECTED_PERF_CONFIG_DIFF = """config = {
- 'command': '',
- 'metric': '',
- 'repeat_count': '',
- 'max_time_minutes': '',
+ "bad_revision": "215828",
+ "command": "tools/perf/run_benchmark -v --browser=release --output-format=buildbot --upload-results --also-run-disabled-tests dromaeo.jslibstylejquery",
+ "good_revision": "215806",
+ "max_time_minutes": "60",
+ "repeat_count": "1",
+ "try_job_id": 1
}
"""
_FAKE_XSRF_TOKEN = '1234567890'
_ISSUE_CREATED_RESPONSE = """Issue created. https://test-rietveld.appspot.com/33001
1
1001 filename
"""
_BISECT_CONFIG_CONTENTS = """# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
\"\"\"Config file for Run Performance Test Bisect Tool
This script is intended for use by anyone that wants to run a remote bisection
on a range of revisions to look for a performance regression. Modify the config
below and add the revision range, performance command, and metric. You can then
run a git try <bot>.
Changes to this file should never be submitted.
Args:
'command': This is the full command line to pass to the
bisect-perf-regression.py script in order to execute the test.
'good_revision': An svn or git revision where the metric hadn't regressed yet.
'bad_revision': An svn or git revision sometime after the metric had
regressed.
'metric': The name of the metric to parse out from the results of the
performance test. You can retrieve the metric by looking at the stdio of
the performance test. Look for lines of the format:
RESULT <graph>: <trace>= <value> <units>
The metric name is "<graph>/<trace>".
'repeat_count': The number of times to repeat the performance test.
'max_time_minutes': The script will attempt to run the performance test
"repeat_count" times, unless it exceeds "max_time_minutes".
Sample config:
config = {
'command': './out/Release/performance_ui_tests' +
' --gtest_filter=PageCyclerTest.Intl1File',
'good_revision': '179755',
'bad_revision': '179782',
'metric': 'times/t',
'repeat_count': '20',
'max_time_minutes': '20',
}
On Windows:
config = {
'command': 'tools/perf/run_benchmark -v --browser=release kraken',
'good_revision': '185319',
'bad_revision': '185364',
'metric': 'Total/Total',
'repeat_count': '20',
'max_time_minutes': '20',
}
On ChromeOS:
- Script accepts either ChromeOS versions, or unix timestamps as revisions.
- You don't need to specify --identity and --remote, they will be added to
the command using the bot's BISECT_CROS_IP and BISECT_CROS_BOARD values.
config = {
'command': './tools/perf/run_benchmark -v '\
'--browser=cros-chrome-guest '\
'dromaeo tools/perf/page_sets/dromaeo/jslibstylejquery.json',
'good_revision': '4086.0.0',
'bad_revision': '4087.0.0',
'metric': 'jslib/jslib',
'repeat_count': '20',
'max_time_minutes': '20',
}
\"\"\"
config = {
'command': '',
'good_revision': '',
'bad_revision': '',
'metric': '',
'repeat_count':'',
'max_time_minutes': '',
}
# Workaround git try issue, see crbug.com/257689"""
_PERF_CONFIG_CONTENTS = """# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
\"\"\"Config file for Run Performance Test Bot
This script is intended for use by anyone that wants to run a remote performance
test. Modify the config below and add the command to run the performance test,
the metric you're interested in, and repeat/discard parameters. You can then
run a git try <bot>.
Changes to this file should never be submitted.
Args:
'command': This is the full command line to pass to the
bisect-perf-regression.py script in order to execute the test.
'metric': The name of the metric to parse out from the results of the
performance test. You can retrieve the metric by looking at the stdio of
the performance test. Look for lines of the format:
RESULT <graph>: <trace>= <value> <units>
The metric name is "<graph>/<trace>".
'repeat_count': The number of times to repeat the performance test.
'max_time_minutes': The script will attempt to run the performance test
"repeat_count" times, unless it exceeds "max_time_minutes".
Sample config:
config = {
'command': './tools/perf/run_benchmark --browser=release smoothness.key_mobile_sites',
'metric': 'mean_frame_time/mean_frame_time',
'repeat_count': '20',
'max_time_minutes': '20',
}
On Windows:
config = {
'command': 'tools/perf/run_benchmark -v --browser=release \
smoothness.key_mobile_sites',
'metric': 'mean_frame_time/mean_frame_time',
'repeat_count': '20',
'max_time_minutes': '20',
}
On ChromeOS:
- Script accepts either ChromeOS versions, or unix timestamps as revisions.
- You don't need to specify --identity and --remote, they will be added to
the command using the bot's BISECT_CROS_IP and BISECT_CROS_BOARD values.
config = {
'command': './tools/perf/run_benchmark -v '\
'--browser=cros-chrome-guest '\
'smoothness.key_mobile_sites',
'metric': 'mean_frame_time/mean_frame_time',
'repeat_count': '20',
'max_time_minutes': '20',
}
\"\"\"
config = {
'command': '',
'metric': '',
'repeat_count': '',
'max_time_minutes': '',
}
# Workaround git try issue, see crbug.com/257689"""
# pylint: enable=line-too-long
# These globals are set in tests and checked in _MockMakeRequest.
_EXPECTED_CONFIG_DIFF = None
_TEST_EXPECTED_BOT = None
_TEST_EXPECTED_CONFIG_CONTENTS = None
def _MockFetch(url=None):
if start_try_job._BISECT_CONFIG_PATH in url:
return testing_common.FakeResponseObject(
200, base64.encodestring(_BISECT_CONFIG_CONTENTS))
elif start_try_job._PERF_CONFIG_PATH in url:
return testing_common.FakeResponseObject(
200, base64.encodestring(_PERF_CONFIG_CONTENTS))
def _MockFailedFetch(url=None): # pylint: disable=unused-argument
return testing_common.FakeResponseObject(404, {})
def _MockMakeRequest(path, *args, **kwargs): # pylint: disable=unused-argument
"""Mocks out a request, returning a canned response."""
if path.endswith('xsrf_token'):
assert kwargs['headers']['X-Requesting-XSRF-Token'] == 1
return testing_common.FakeResponseObject(200, _FAKE_XSRF_TOKEN)
if path == 'upload':
assert kwargs['method'] == 'POST'
assert _EXPECTED_CONFIG_DIFF in kwargs['body'], (
'%s\nnot in\n%s\n' % (_EXPECTED_CONFIG_DIFF, kwargs['body']))
return testing_common.FakeResponseObject(200, _ISSUE_CREATED_RESPONSE)
if path == '33001/upload_content/1/1001':
assert kwargs['method'] == 'POST'
assert _TEST_EXPECTED_CONFIG_CONTENTS in kwargs['body']
return testing_common.FakeResponseObject(200, 'Dummy content')
if path == '33001/upload_complete/1':
assert kwargs['method'] == 'POST'
return testing_common.FakeResponseObject(200, 'Dummy content')
if path == '33001/try/1':
assert _TEST_EXPECTED_BOT in kwargs['body']
return testing_common.FakeResponseObject(200, 'Dummy content')
assert False, 'Invalid url %s requested!' % path
class StartBisectTest(testing_common.TestCase):
def setUp(self):
super(StartBisectTest, self).setUp()
app = webapp2.WSGIApplication(
[('/start_try_job', start_try_job.StartBisectHandler)])
self.testapp = webtest.TestApp(app)
namespaced_stored_object.Set(
can_bisect.BISECT_BOT_MAP_KEY,
{
'ChromiumPerf': [
('nexus4', 'android_nexus4_perf_bisect'),
('nexus7', 'android_nexus7_perf_bisect'),
('win8', 'win_8_perf_bisect'),
('xp', 'win_xp_perf_bisect'),
('android', 'android_nexus7_perf_bisect'),
('mac', 'mac_perf_bisect'),
('win', 'win_perf_bisect'),
('linux', 'linux_perf_bisect'),
('', 'linux_perf_bisect'),
],
})
namespaced_stored_object.Set(
start_try_job._BUILDER_TYPES_KEY,
{'ChromiumPerf': 'perf', 'OtherMaster': 'foo'})
namespaced_stored_object.Set(
start_try_job._BOT_BROWSER_MAP_KEY,
[
['android', 'android-chromium'],
['winx64', 'release_x64'],
['win_x64', 'release_x64'],
['', 'release'],
])
namespaced_stored_object.Set(
start_try_job._TESTER_DIRECTOR_MAP_KEY,
{
'ChromiumPerf': {
'linux_perf_tester': 'linux_perf_bisector',
'win64_nv_tester': 'linux_perf_bisector',
}
})
namespaced_stored_object.Set(
start_try_job._MASTER_BUILDBUCKET_MAP_KEY,
{
'ChromiumPerf': 'master.tryserver.chromium.perf'
})
testing_common.SetSheriffDomains(['chromium.org'])
# Add fake Rietveld auth info.
rietveld_config = rietveld_service.RietveldConfig(
id='default_rietveld_config',
client_email='sullivan@chromium.org',
service_account_key='Fake Account Key',
server_url='https://test-rietveld.appspot.com')
rietveld_config.put()
def testPost_InvalidUser_ShowsErrorMessage(self):
self.SetCurrentUser('foo@yahoo.com')
response = self.testapp.post('/start_try_job', {
'test_path': 'ChromiumPerf/win7/morejs/times/page_load_time',
'step': 'prefill-info',
})
self.assertEqual(
{'error': 'User "foo@yahoo.com" not authorized.'},
json.loads(response.body))
def testPost_PrefillInfoStep(self):
self.SetCurrentUser('foo@chromium.org')
testing_common.AddTests(
['ChromiumPerf'],
[
'win7',
'android-nexus7',
'chromium-rel-win8-dual',
'chromium-rel-xp-single'
],
{
'page_cycler.morejs': {
'times': {
'page_load_time': {},
'page_load_time_ref': {},
'blog.chromium.org': {},
'dev.chromium.org': {},
'test.blogspot.com': {},
'http___test.com_': {}
},
'vm_final_size_renderer': {
'ref': {},
'vm_final_size_renderer_extcs1': {}
},
},
'blink_perf': {
'Animation_balls': {}
}
})
tests = graph_data.Test.query().fetch()
for test in tests:
name = test.key.string_id()
if name in ('times', 'page_cycler.morejs', 'blink_perf'):
continue
test.has_rows = True
ndb.put_multi(tests)
response = self.testapp.post('/start_try_job', {
'test_path': ('ChromiumPerf/win7/page_cycler.morejs/'
'times/page_load_time'),
'step': 'prefill-info',
})
info = json.loads(response.body)
self.assertEqual('win_perf_bisect', info['bisect_bot'])
self.assertEqual('foo@chromium.org', info['email'])
self.assertEqual('page_cycler.morejs', info['suite'])
self.assertEqual('times/page_load_time', info['default_metric'])
self.assertEqual('ChromiumPerf', info['master'])
self.assertFalse(info['internal_only'])
self.assertTrue(info['use_archive'])
self.assertEqual(
[
'android_nexus4_perf_bisect',
'android_nexus7_perf_bisect',
'linux_perf_bisect',
'mac_perf_bisect',
'win_8_perf_bisect',
'win_perf_bisect',
'win_xp_perf_bisect',
], info['all_bots'])
self.assertEqual(
[
'times/blog.chromium.org',
'times/dev.chromium.org',
'times/http___test.com_',
'times/page_load_time',
'times/test.blogspot.com'
],
info['all_metrics'])
response = self.testapp.post('/start_try_job', {
'test_path': ('ChromiumPerf/win7/page_cycler.morejs/'
'vm_final_size_renderer'),
'step': 'prefill-info',
})
info = json.loads(response.body)
self.assertEqual(
['vm_final_size_renderer/vm_final_size_renderer',
'vm_final_size_renderer/vm_final_size_renderer_extcs1'],
info['all_metrics'])
response = self.testapp.post('/start_try_job', {
'test_path': 'ChromiumPerf/win7/blink_perf/Animation_balls',
'step': 'prefill-info',
})
info = json.loads(response.body)
self.assertEqual('Animation_balls/Animation_balls', info['default_metric'])
response = self.testapp.post('/start_try_job', {
'test_path': 'ChromiumPerf/android-nexus7/blink_perf/Animation_balls',
'step': 'prefill-info',
})
info = json.loads(response.body)
self.assertEqual('android_nexus7_perf_bisect', info['bisect_bot'])
response = self.testapp.post('/start_try_job', {
'test_path': ('ChromiumPerf/chromium-rel-win8-dual/'
'blink_perf/Animation_balls'),
'step': 'prefill-info',
})
info = json.loads(response.body)
self.assertEqual('win_8_perf_bisect', info['bisect_bot'])
response = self.testapp.post('/start_try_job', {
'test_path': ('ChromiumPerf/chromium-rel-xp-single/'
'blink_perf/Animation_balls'),
'step': 'prefill-info',
})
info = json.loads(response.body)
self.assertEqual('win_xp_perf_bisect', info['bisect_bot'])
def _TestGetBisectConfig(self, parameters, expected_config_dict):
"""Helper method to test get-config requests."""
response = start_try_job.GetBisectConfig(**parameters)
self.assertEqual(expected_config_dict, response)
def testGetConfig_EmptyUseArchiveParameter_GivesEmptyBuilderType(self):
self._TestGetBisectConfig(
{
'bisect_bot': 'linux_perf_bisect',
'master_name': 'ChromiumPerf',
'suite': 'page_cycler.moz',
'metric': 'times/page_load_time',
'good_revision': '265549',
'bad_revision': '265556',
'repeat_count': '15',
'max_time_minutes': '8',
'bug_id': '-1',
'use_archive': '',
},
{
'command': ('tools/perf/run_benchmark -v '
'--browser=release --output-format=buildbot '
'--upload-results '
'--also-run-disabled-tests '
'page_cycler.moz'),
'good_revision': '265549',
'bad_revision': '265556',
'metric': 'times/page_load_time',
'repeat_count': '15',
'max_time_minutes': '8',
'bug_id': '-1',
'builder_type': '',
'target_arch': 'ia32',
'bisect_mode': 'mean',
})
def testGetConfig_UseBuildbucket_ChangesTelemetryOutputFormat(self):
self._TestGetBisectConfig(
{
'bisect_bot': 'linux_perf_tester',
'master_name': 'ChromiumPerf',
'suite': 'page_cycler.moz',
'metric': 'times/page_load_time',
'good_revision': '265549',
'bad_revision': '265556',
'repeat_count': '15',
'max_time_minutes': '8',
'bug_id': '-1',
'use_archive': 'true',
'use_buildbucket': True,
},
{
'command': ('src/tools/perf/run_benchmark -v '
'--browser=release --output-format=chartjson '
'--upload-results '
'--also-run-disabled-tests '
'page_cycler.moz'),
'good_revision': '265549',
'bad_revision': '265556',
'metric': 'times/page_load_time',
'repeat_count': '15',
'max_time_minutes': '8',
'bug_id': '-1',
'builder_type': 'perf',
'target_arch': 'ia32',
'bisect_mode': 'mean',
'recipe_tester_name': 'linux_perf_tester',
})
def testGetConfig_NonEmptyUseArchiveParameter_GivesNonEmptyBuilderType(self):
# Any non-empty value for use_archive means that archives should be used.
# Even if value of use_archive is "false", archives will still be used!
self._TestGetBisectConfig(
{
'bisect_bot': 'linux_perf_bisect',
'master_name': 'ChromiumPerf',
'suite': 'page_cycler.moz',
'metric': 'times/page_load_time',
'good_revision': '265549',
'bad_revision': '265556',
'repeat_count': '15',
'max_time_minutes': '8',
'bug_id': '-1',
'use_archive': '',
},
{
'command': ('tools/perf/run_benchmark -v '
'--browser=release --output-format=buildbot '
'--upload-results '
'--also-run-disabled-tests '
'page_cycler.moz'),
'good_revision': '265549',
'bad_revision': '265556',
'metric': 'times/page_load_time',
'repeat_count': '15',
'max_time_minutes': '8',
'bug_id': '-1',
'builder_type': '',
'target_arch': 'ia32',
'bisect_mode': 'mean',
})
def testGetConfig_TelemetryTest(self):
self._TestGetBisectConfig(
{
'bisect_bot': 'win_perf_bisect',
'master_name': 'ChromiumPerf',
'suite': 'page_cycler.morejs',
'metric': 'times/page_load_time',
'good_revision': '12345',
'bad_revision': '23456',
'repeat_count': '15',
'max_time_minutes': '8',
'bug_id': '-1',
},
{
'command': ('tools/perf/run_benchmark -v '
'--browser=release --output-format=buildbot '
'--upload-results '
'--also-run-disabled-tests '
'page_cycler.morejs'),
'good_revision': '12345',
'bad_revision': '23456',
'metric': 'times/page_load_time',
'repeat_count': '15',
'max_time_minutes': '8',
'bug_id': '-1',
'builder_type': '',
'target_arch': 'ia32',
'bisect_mode': 'mean',
})
def testGetConfig_BisectModeSetToReturnCode(self):
self._TestGetBisectConfig(
{
'bisect_bot': 'linux_perf_bisect',
'master_name': 'ChromiumPerf',
'suite': 'page_cycler.moz',
'metric': 'times/page_load_time',
'good_revision': '265549',
'bad_revision': '265556',
'repeat_count': '15',
'max_time_minutes': '8',
'bug_id': '-1',
'use_archive': '',
'bisect_mode': 'return_code',
},
{
'command': ('tools/perf/run_benchmark -v '
'--browser=release --output-format=buildbot '
'--upload-results '
'--also-run-disabled-tests '
'page_cycler.moz'),
'good_revision': '265549',
'bad_revision': '265556',
'metric': 'times/page_load_time',
'repeat_count': '15',
'max_time_minutes': '8',
'bug_id': '-1',
'builder_type': '',
'target_arch': 'ia32',
'bisect_mode': 'return_code',
})
def _TestGetConfigCommand(self, expected_command, **params_to_override):
"""Helper method to test the command returned for a get-config request."""
parameters = dict(
{
'bisect_bot': 'linux_perf_bisect',
'suite': 'page_cycler.moz',
'master_name': 'ChromiumPerf',
'metric': 'times/page_load_time',
'good_revision': '265549',
'bad_revision': '265556',
'repeat_count': '15',
'max_time_minutes': '8',
'bug_id': '-1',
'use_archive': '',
'use_buildbucket': False,
}, **params_to_override)
response = start_try_job.GetBisectConfig(**parameters)
self.assertEqual(expected_command, response.get('command'))
def testGetConfig_AndroidTelemetryTest(self):
self._TestGetConfigCommand(
('tools/perf/run_benchmark -v '
'--browser=android-chromium --output-format=buildbot '
'--upload-results '
'--also-run-disabled-tests '
'page_cycler.morejs'),
bisect_bot='android_nexus7_perf_bisect',
suite='page_cycler.morejs')
def testGetConfig_CCPerftests(self):
self._TestGetConfigCommand(
('./out/Release/cc_perftests '
'--test-launcher-print-test-stdio=always'),
bisect_bot='linux_perf_bisect',
suite='cc_perftests')
def testGetConfig_AndroidCCPerftests(self):
self._TestGetConfigCommand(
'build/android/test_runner.py gtest --release -s cc_perftests',
bisect_bot='android_nexus7_perf_bisect',
suite='cc_perftests')
def testGetConfig_IdbPerf(self):
self._TestGetConfigCommand(
(r'.\out\Release\performance_ui_tests.exe '
'--gtest_filter=IndexedDBTest.Perf'),
bisect_bot='win_perf_bisect',
suite='idb_perf')
def testGetConfig_PerformanceBrowserTests(self):
self._TestGetConfigCommand(
('./out/Release/performance_browser_tests '
'--test-launcher-print-test-stdio=always '
'--enable-gpu'),
bisect_bot='linux_perf_bisect',
suite='performance_browser_tests')
def testGuessBisectBot_FetchesNameFromBisectBotMap(self):
namespaced_stored_object.Set(
can_bisect.BISECT_BOT_MAP_KEY,
{'OtherMaster': [('foo', 'super_foo_bisect_bot')]})
self.assertEqual(
'super_foo_bisect_bot',
start_try_job.GuessBisectBot('OtherMaster', 'foo'))
def testGuessBisectBot_PlatformNotFound_UsesFallback(self):
namespaced_stored_object.Set(
can_bisect.BISECT_BOT_MAP_KEY,
{'OtherMaster': [('foo', 'super_foo_bisect_bot')]})
self.assertEqual(
'linux_perf_bisect',
start_try_job.GuessBisectBot('OtherMaster', 'bar'))
def testGuessBisectBot_TreatsMasterNameAsPrefix(self):
namespaced_stored_object.Set(
can_bisect.BISECT_BOT_MAP_KEY,
{'OtherMaster': [('foo', 'super_foo_bisect_bot')]})
self.assertEqual(
'super_foo_bisect_bot',
start_try_job.GuessBisectBot('OtherMasterFyi', 'foo'))
@mock.patch.object(start_try_job.buildbucket_service, 'PutJob',
mock.MagicMock(return_value='1234567'))
def testPerformBuildbucketBisect(self):
self.SetCurrentUser('foo@chromium.org')
bug_data.Bug(id=12345).put()
query_parameters = {
'bisect_bot': 'linux_perf_tester',
'suite': 'dromaeo.jslibstylejquery',
'metric': 'jslib/jslib',
'good_revision': '215806',
'bad_revision': '215828',
'repeat_count': '20',
'max_time_minutes': '20',
'bug_id': 12345,
'use_archive': '',
'step': 'perform-bisect',
}
response = self.testapp.post('/start_try_job', query_parameters)
response_dict = json.loads(response.body)
self.assertEqual(response_dict['issue_id'], '1234567')
self.assertIn('1234567', response_dict['issue_url'])
job_entities = try_job.TryJob.query(
try_job.TryJob.buildbucket_job_id == '1234567').fetch()
self.assertEqual(1, len(job_entities))
self.assertTrue(job_entities[0].use_buildbucket)
def testPerformBisect_InvalidConfig_ReturnsError(self):
bisect_job = try_job.TryJob(
bot='foo',
config='config = {}',
master_name='ChromiumPerf',
internal_only=False,
job_type='bisect',
use_buildbucket=True)
self.assertEqual(
{'error': 'No "recipe_tester_name" given.'},
start_try_job.PerformBisect(bisect_job))
@mock.patch(
'google.appengine.api.urlfetch.fetch',
mock.MagicMock(side_effect=_MockFetch))
@mock.patch.object(
start_try_job.rietveld_service.RietveldService, 'MakeRequest',
mock.MagicMock(side_effect=_MockMakeRequest))
def testPerformBisect(self):
self.SetCurrentUser('foo@chromium.org')
# Create bug.
bug_data.Bug(id=12345).put()
query_parameters = {
'bisect_bot': 'win_perf_bisect',
'suite': 'dromaeo.jslibstylejquery',
'metric': 'jslib/jslib',
'good_revision': '215806',
'bad_revision': '215828',
'repeat_count': '20',
'max_time_minutes': '20',
'bug_id': 12345,
'use_archive': '',
'step': 'perform-bisect',
}
global _EXPECTED_CONFIG_DIFF
global _TEST_EXPECTED_BOT
global _TEST_EXPECTED_CONFIG_CONTENTS
_EXPECTED_CONFIG_DIFF = _EXPECTED_BISECT_CONFIG_DIFF
_TEST_EXPECTED_BOT = 'win_perf_bisect'
_TEST_EXPECTED_CONFIG_CONTENTS = _BISECT_CONFIG_CONTENTS
response = self.testapp.post('/start_try_job', query_parameters)
self.assertEqual(
json.dumps({'issue_id': '33001',
'issue_url': 'https://test-rietveld.appspot.com/33001'}),
response.body)
@mock.patch(
'google.appengine.api.urlfetch.fetch',
mock.MagicMock(side_effect=_MockFetch))
@mock.patch.object(
start_try_job.rietveld_service.RietveldService, 'MakeRequest',
mock.MagicMock(side_effect=_MockMakeRequest))
def testPerformPerfTry(self):
self.SetCurrentUser('foo@chromium.org')
query_parameters = {
'bisect_bot': 'linux_perf_bisect',
'suite': 'dromaeo.jslibstylejquery',
'good_revision': '215806',
'bad_revision': '215828',
'step': 'perform-perf-try',
'rerun_option': '',
}
global _EXPECTED_CONFIG_DIFF
global _TEST_EXPECTED_CONFIG_CONTENTS
global _TEST_EXPECTED_BOT
_EXPECTED_CONFIG_DIFF = _EXPECTED_PERF_CONFIG_DIFF
_TEST_EXPECTED_CONFIG_CONTENTS = _PERF_CONFIG_CONTENTS
_TEST_EXPECTED_BOT = 'linux_perf_bisect'
response = self.testapp.post('/start_try_job', query_parameters)
self.assertEqual(json.dumps({'issue_id': '33001'}), response.body)
@mock.patch(
'google.appengine.api.urlfetch.fetch',
mock.MagicMock(side_effect=_MockFailedFetch))
@mock.patch.object(
start_try_job.rietveld_service.RietveldService, 'MakeRequest',
mock.MagicMock(side_effect=_MockMakeRequest))
def testPerformBisectStep_DeleteJobOnFailedBisect(self):
self.SetCurrentUser('foo@chromium.org')
query_parameters = {
'bisect_bot': 'linux_perf_bisect',
'suite': 'dromaeo.jslibstylejquery',
'good_revision': '215806',
'bad_revision': '215828',
'rerun_option': '',
}
global _EXPECTED_CONFIG_DIFF
global _TEST_EXPECTED_CONFIG_CONTENTS
global _TEST_EXPECTED_BOT
_EXPECTED_CONFIG_DIFF = _EXPECTED_PERF_CONFIG_DIFF
_TEST_EXPECTED_CONFIG_CONTENTS = _PERF_CONFIG_CONTENTS
_TEST_EXPECTED_BOT = 'linux_perf_bisect'
query_parameters['step'] = 'perform-bisect'
self.testapp.post('/start_try_job', query_parameters)
try_jobs = try_job.TryJob.query().fetch()
self.assertEqual(0, len(try_jobs))
@mock.patch(
'google.appengine.api.urlfetch.fetch',
mock.MagicMock(side_effect=_MockFailedFetch))
@mock.patch.object(
start_try_job.rietveld_service.RietveldService, 'MakeRequest',
mock.MagicMock(side_effect=_MockMakeRequest))
def testPerformPerfTryStep_DeleteJobOnFailedBisect(self):
self.SetCurrentUser('foo@chromium.org')
query_parameters = {
'bisect_bot': 'linux_perf_bisect',
'suite': 'dromaeo.jslibstylejquery',
'good_revision': '215806',
'bad_revision': '215828',
'rerun_option': '',
}
global _EXPECTED_CONFIG_DIFF
global _TEST_EXPECTED_CONFIG_CONTENTS
global _TEST_EXPECTED_BOT
_EXPECTED_CONFIG_DIFF = _EXPECTED_PERF_CONFIG_DIFF
_TEST_EXPECTED_CONFIG_CONTENTS = _PERF_CONFIG_CONTENTS
_TEST_EXPECTED_BOT = 'linux_perf_bisect'
query_parameters['step'] = 'perform-perf-try'
self.testapp.post('/start_try_job', query_parameters)
try_jobs = try_job.TryJob.query().fetch()
self.assertEqual(0, len(try_jobs))
@mock.patch(
'google.appengine.api.urlfetch.fetch',
mock.MagicMock(side_effect=_MockFetch))
@mock.patch(
'google.appengine.api.app_identity.get_default_version_hostname',
mock.MagicMock(return_value='my-dashboard.appspot.com'))
@mock.patch.object(start_try_job.buildbucket_service, 'PutJob',
mock.MagicMock(return_value='1234567'))
def testPerformBisectWithArchive(self):
self.SetCurrentUser('foo@chromium.org')
# Create bug.
bug_data.Bug(id=12345).put()
query_parameters = {
'bisect_bot': 'linux_perf_tester',
'suite': 'dromaeo.jslibstylejquery',
'metric': 'jslib/jslib',
'good_revision': '215806',
'bad_revision': '215828',
'repeat_count': '20',
'max_time_minutes': '20',
'bug_id': 12345,
'use_archive': 'true',
'bisect_mode': 'mean',
'step': 'perform-bisect',
}
response = self.testapp.post('/start_try_job', query_parameters)
self.assertEqual(
json.dumps({'issue_id': '1234567',
'issue_url': ('https://my-dashboard.appspot.com'
'/buildbucket_job_status/1234567')}),
response.body)
def testGetBisectConfig_UseArchive(self):
self._TestGetBisectConfig(
{
'bisect_bot': 'win_perf_bisect',
'master_name': 'ChromiumPerf',
'suite': 'page_cycler.morejs',
'metric': 'times/page_load_time',
'good_revision': '12345',
'bad_revision': '23456',
'repeat_count': '15',
'max_time_minutes': '8',
'bug_id': '-1',
'use_archive': 'true',
},
{
'command': ('tools/perf/run_benchmark -v '
'--browser=release --output-format=buildbot '
'--upload-results '
'--also-run-disabled-tests '
'page_cycler.morejs'),
'good_revision': '12345',
'bad_revision': '23456',
'metric': 'times/page_load_time',
'repeat_count': '15',
'max_time_minutes': '8',
'bug_id': '-1',
'builder_type': 'perf',
'target_arch': 'ia32',
'bisect_mode': 'mean',
})
def testGetBisectConfig_WithTargetArch(self):
self._TestGetBisectConfig(
{
'bisect_bot': 'win_x64_perf_bisect',
'master_name': 'ChromiumPerf',
'suite': 'page_cycler.moz',
'metric': 'times/page_load_time',
'good_revision': '265549',
'bad_revision': '265556',
'repeat_count': '15',
'max_time_minutes': '8',
'bug_id': '-1',
'use_archive': ''
},
{
'command': ('tools/perf/run_benchmark -v '
'--browser=release_x64 --output-format=buildbot '
'--upload-results '
'--also-run-disabled-tests '
'page_cycler.moz'),
'good_revision': '265549',
'bad_revision': '265556',
'metric': 'times/page_load_time',
'repeat_count': '15',
'max_time_minutes': '8',
'bug_id': '-1',
'builder_type': '',
'target_arch': 'x64',
'bisect_mode': 'mean',
})
def testGetConfig_UseBuildbucket_AndroidTelemetryTest(self):
self._TestGetConfigCommand(
('src/tools/perf/run_benchmark -v '
'--browser=android-chromium --output-format=chartjson '
'--upload-results '
'--also-run-disabled-tests '
'page_cycler.morejs'),
bisect_bot='android_nexus7_perf_bisect',
suite='page_cycler.morejs',
use_buildbucket=True)
def testGetConfig_UseBuildbucket_CCPerftests(self):
self._TestGetConfigCommand(
('./src/out/Release/cc_perftests '
'--test-launcher-print-test-stdio=always'),
bisect_bot='linux_perf_bisect',
suite='cc_perftests',
use_buildbucket=True)
def testGetConfig_UseBuildbucket_AndroidCCPerftests(self):
self._TestGetConfigCommand(
'src/build/android/test_runner.py gtest --release -s cc_perftests',
bisect_bot='android_nexus7_perf_bisect',
suite='cc_perftests',
use_buildbucket=True)
def testGetConfig_UseBuildbucket_IdbPerf(self):
self._TestGetConfigCommand(
('.\\src\\out\\Release\\performance_ui_tests.exe '
'--gtest_filter=IndexedDBTest.Perf'),
bisect_bot='win_perf_bisect',
suite='idb_perf',
use_buildbucket=True)
def testGetConfig_UseBuildbucket_PerformanceBrowserTests(self):
self._TestGetConfigCommand(
('./src/out/Release/performance_browser_tests '
'--test-launcher-print-test-stdio=always '
'--enable-gpu'),
bisect_bot='linux_perf_bisect',
suite='performance_browser_tests',
use_buildbucket=True)
def testGetConfig_X64Bot_UsesX64ReleaseDirectory(self):
self._TestGetConfigCommand(
('.\\src\\out\\Release_x64\\performance_browser_tests.exe '
'--test-launcher-print-test-stdio=always '
'--enable-gpu'),
bisect_bot='winx64nvidia_perf_bisect',
suite='performance_browser_tests',
use_buildbucket=True)
def testGuessMetric_SummaryMetricWithNoTIRLabel(self):
testing_common.AddTests(
['M'], ['b'],
{'benchmark': {'chart': {'page': {}}}})
self.assertEqual(
'chart/chart',
start_try_job.GuessMetric('M/b/benchmark/chart'))
def testGuessMetric_SummaryMetricWithTIRLabel(self):
testing_common.AddTests(
['M'], ['b'],
{'benchmark': {'chart': {'tir_label': {'page': {}}}}})
self.assertEqual(
'tir_label-chart/tir_label-chart',
start_try_job.GuessMetric('M/b/benchmark/chart/tir_label'))
if __name__ == '__main__':
unittest.main()
| 35.143939
| 155
| 0.622305
|
87036fec380d6dd77cfb803bba9064346f638e1c
| 30,290
|
py
|
Python
|
dask/dataframe/shuffle.py
|
galipremsagar/dask
|
134182e05009dbb20bd8e59ccf8bf771e5d4399a
|
[
"BSD-3-Clause"
] | null | null | null |
dask/dataframe/shuffle.py
|
galipremsagar/dask
|
134182e05009dbb20bd8e59ccf8bf771e5d4399a
|
[
"BSD-3-Clause"
] | null | null | null |
dask/dataframe/shuffle.py
|
galipremsagar/dask
|
134182e05009dbb20bd8e59ccf8bf771e5d4399a
|
[
"BSD-3-Clause"
] | null | null | null |
import contextlib
import logging
import math
import shutil
import tempfile
import uuid
import numpy as np
import pandas as pd
import tlz as toolz
from .. import base, config
from ..base import compute, compute_as_if_collection, is_dask_collection, tokenize
from ..highlevelgraph import HighLevelGraph
from ..layers import ShuffleLayer, SimpleShuffleLayer
from ..sizeof import sizeof
from ..utils import M, digit
from . import methods
from .core import DataFrame, Series, _Frame, map_partitions, new_dd_object
from .utils import group_split_dispatch, hash_object_dispatch
logger = logging.getLogger(__name__)
def _calculate_divisions(
df, partition_col, repartition, npartitions, upsample=1.0, partition_size=128e6
):
"""
Utility function to calculate divisions for calls to `map_partitions`
"""
sizes = df.map_partitions(sizeof) if repartition else []
divisions = partition_col._repartition_quantiles(npartitions, upsample=upsample)
mins = partition_col.map_partitions(M.min)
maxes = partition_col.map_partitions(M.max)
divisions, sizes, mins, maxes = base.compute(divisions, sizes, mins, maxes)
divisions = methods.tolist(divisions)
if type(sizes) is not list:
sizes = methods.tolist(sizes)
mins = methods.tolist(mins)
maxes = methods.tolist(maxes)
empty_dataframe_detected = pd.isnull(divisions).all()
if repartition or empty_dataframe_detected:
total = sum(sizes)
npartitions = max(math.ceil(total / partition_size), 1)
npartitions = min(npartitions, df.npartitions)
n = len(divisions)
try:
divisions = np.interp(
x=np.linspace(0, n - 1, npartitions + 1),
xp=np.linspace(0, n - 1, n),
fp=divisions,
).tolist()
except (TypeError, ValueError): # str type
indexes = np.linspace(0, n - 1, npartitions + 1).astype(int)
divisions = [divisions[i] for i in indexes]
mins = remove_nans(mins)
maxes = remove_nans(maxes)
if pd.api.types.is_categorical_dtype(partition_col.dtype):
dtype = partition_col.dtype
mins = pd.Categorical(mins, dtype=dtype).codes.tolist()
maxes = pd.Categorical(maxes, dtype=dtype).codes.tolist()
return divisions, mins, maxes
def sort_values(
df,
by,
npartitions=None,
ascending=True,
upsample=1.0,
partition_size=128e6,
**kwargs,
):
""" See DataFrame.sort_values for docstring """
if not ascending:
raise NotImplementedError("The ascending= keyword is not supported")
if not isinstance(by, str):
# support ["a"] as input
if isinstance(by, list) and len(by) == 1 and isinstance(by[0], str):
by = by[0]
else:
raise NotImplementedError(
"Dataframe only supports sorting by a single column which must "
"be passed as a string or a list of a single string.\n"
"You passed %s" % str(by)
)
if npartitions == "auto":
repartition = True
npartitions = max(100, df.npartitions)
else:
if npartitions is None:
npartitions = df.npartitions
repartition = False
sort_by_col = df[by]
divisions, mins, maxes = _calculate_divisions(
df, sort_by_col, repartition, npartitions, upsample, partition_size
)
if (
mins == sorted(mins)
and maxes == sorted(maxes)
and all(mx < mn for mx, mn in zip(maxes[:-1], mins[1:]))
and npartitions == df.npartitions
):
# divisions are in the right place
return df.map_partitions(M.sort_values, by)
df = rearrange_by_divisions(df, by, divisions)
df = df.map_partitions(M.sort_values, by)
return df
def set_index(
df,
index,
npartitions=None,
shuffle=None,
compute=False,
drop=True,
upsample=1.0,
divisions=None,
partition_size=128e6,
**kwargs,
):
""" See _Frame.set_index for docstring """
if isinstance(index, Series) and index._name == df.index._name:
return df
if isinstance(index, (DataFrame, tuple, list)):
# Accept ["a"], but not [["a"]]
if (
isinstance(index, list)
and len(index) == 1
and not isinstance(index[0], list) # if index = [["a"]], leave it that way
):
index = index[0]
else:
raise NotImplementedError(
"Dask dataframe does not yet support multi-indexes.\n"
"You tried to index with this index: %s\n"
"Indexes must be single columns only." % str(index)
)
if npartitions == "auto":
repartition = True
npartitions = max(100, df.npartitions)
else:
if npartitions is None:
npartitions = df.npartitions
repartition = False
if not isinstance(index, Series):
index2 = df[index]
else:
index2 = index
if divisions is None:
divisions, mins, maxes = _calculate_divisions(
df, index2, repartition, npartitions, upsample, partition_size
)
if (
mins == sorted(mins)
and maxes == sorted(maxes)
and all(mx < mn for mx, mn in zip(maxes[:-1], mins[1:]))
and npartitions == df.npartitions
):
divisions = mins + [maxes[-1]]
result = set_sorted_index(df, index, drop=drop, divisions=divisions)
return result.map_partitions(M.sort_index)
return set_partition(
df, index, divisions, shuffle=shuffle, drop=drop, compute=compute, **kwargs
)
def remove_nans(divisions):
"""Remove nans from divisions
These sometime pop up when we call min/max on an empty partition
Examples
--------
>>> remove_nans((np.nan, 1, 2))
[1, 1, 2]
>>> remove_nans((1, np.nan, 2))
[1, 2, 2]
>>> remove_nans((1, 2, np.nan))
[1, 2, 2]
"""
divisions = list(divisions)
for i in range(len(divisions) - 2, -1, -1):
if pd.isnull(divisions[i]):
divisions[i] = divisions[i + 1]
for i in range(len(divisions) - 1, -1, -1):
if not pd.isnull(divisions[i]):
for j in range(i + 1, len(divisions)):
divisions[j] = divisions[i]
break
return divisions
def set_partition(
df, index, divisions, max_branch=32, drop=True, shuffle=None, compute=None
):
"""Group DataFrame by index
Sets a new index and partitions data along that index according to
divisions. Divisions are often found by computing approximate quantiles.
The function ``set_index`` will do both of these steps.
Parameters
----------
df: DataFrame/Series
Data that we want to re-partition
index: string or Series
Column to become the new index
divisions: list
Values to form new divisions between partitions
drop: bool, default True
Whether to delete columns to be used as the new index
shuffle: str (optional)
Either 'disk' for an on-disk shuffle or 'tasks' to use the task
scheduling framework. Use 'disk' if you are on a single machine
and 'tasks' if you are on a distributed cluster.
max_branch: int (optional)
If using the task-based shuffle, the amount of splitting each
partition undergoes. Increase this for fewer copies but more
scheduler overhead.
See Also
--------
set_index
shuffle
partd
"""
meta = df._meta._constructor_sliced([0])
if isinstance(divisions, tuple):
# pd.isna considers tuples to be scalars. Convert to a list.
divisions = list(divisions)
if np.isscalar(index):
dtype = df[index].dtype
else:
dtype = index.dtype
if pd.isna(divisions).any() and pd.api.types.is_integer_dtype(dtype):
# Can't construct a Series[int64] when any / all of the divisions are NaN.
divisions = df._meta._constructor_sliced(divisions)
else:
divisions = df._meta._constructor_sliced(divisions, dtype=dtype)
if np.isscalar(index):
partitions = df[index].map_partitions(
set_partitions_pre, divisions=divisions, meta=meta
)
df2 = df.assign(_partitions=partitions)
else:
partitions = index.map_partitions(
set_partitions_pre, divisions=divisions, meta=meta
)
df2 = df.assign(_partitions=partitions, _index=index)
df3 = rearrange_by_column(
df2,
"_partitions",
max_branch=max_branch,
npartitions=len(divisions) - 1,
shuffle=shuffle,
compute=compute,
ignore_index=True,
)
if np.isscalar(index):
df4 = df3.map_partitions(
set_index_post_scalar,
index_name=index,
drop=drop,
column_dtype=df.columns.dtype,
)
else:
df4 = df3.map_partitions(
set_index_post_series,
index_name=index.name,
drop=drop,
column_dtype=df.columns.dtype,
)
df4.divisions = methods.tolist(divisions)
return df4.map_partitions(M.sort_index)
def shuffle(
df,
index,
shuffle=None,
npartitions=None,
max_branch=32,
ignore_index=False,
compute=None,
):
"""Group DataFrame by index
Hash grouping of elements. After this operation all elements that have
the same index will be in the same partition. Note that this requires
full dataset read, serialization and shuffle. This is expensive. If
possible you should avoid shuffles.
This does not preserve a meaningful index/partitioning scheme. This is not
deterministic if done in parallel.
See Also
--------
set_index
set_partition
shuffle_disk
"""
list_like = pd.api.types.is_list_like(index) and not is_dask_collection(index)
if shuffle == "tasks" and (isinstance(index, str) or list_like):
# Avoid creating the "_partitions" column if possible.
# We currently do this if the user is passing in
# specific column names (and shuffle == "tasks").
if isinstance(index, str):
index = [index]
else:
index = list(index)
nset = set(index)
if nset & set(df.columns) == nset:
return rearrange_by_column(
df,
index,
npartitions=npartitions,
max_branch=max_branch,
shuffle=shuffle,
ignore_index=ignore_index,
compute=compute,
)
if not isinstance(index, _Frame):
index = df._select_columns_or_index(index)
partitions = index.map_partitions(
partitioning_index,
npartitions=npartitions or df.npartitions,
meta=df._meta._constructor_sliced([0]),
transform_divisions=False,
)
df2 = df.assign(_partitions=partitions)
df2._meta.index.name = df._meta.index.name
df3 = rearrange_by_column(
df2,
"_partitions",
npartitions=npartitions,
max_branch=max_branch,
shuffle=shuffle,
compute=compute,
ignore_index=ignore_index,
)
del df3["_partitions"]
return df3
def rearrange_by_divisions(df, column, divisions, max_branch=None, shuffle=None):
""" Shuffle dataframe so that column separates along divisions """
divisions = df._meta._constructor_sliced(divisions)
meta = df._meta._constructor_sliced([0])
# Assign target output partitions to every row
partitions = df[column].map_partitions(
set_partitions_pre, divisions=divisions, meta=meta
)
df2 = df.assign(_partitions=partitions)
# Perform shuffle
df3 = rearrange_by_column(
df2,
"_partitions",
max_branch=max_branch,
npartitions=len(divisions) - 1,
shuffle=shuffle,
)
del df3["_partitions"]
return df3
def rearrange_by_column(
df,
col,
npartitions=None,
max_branch=None,
shuffle=None,
compute=None,
ignore_index=False,
):
shuffle = shuffle or config.get("shuffle", None) or "disk"
if shuffle == "disk":
return rearrange_by_column_disk(df, col, npartitions, compute=compute)
elif shuffle == "tasks":
df2 = rearrange_by_column_tasks(
df, col, max_branch, npartitions, ignore_index=ignore_index
)
if ignore_index:
df2._meta = df2._meta.reset_index(drop=True)
return df2
else:
raise NotImplementedError("Unknown shuffle method %s" % shuffle)
class maybe_buffered_partd:
"""
If serialized, will return non-buffered partd. Otherwise returns a buffered partd
"""
def __init__(self, buffer=True, tempdir=None):
self.tempdir = tempdir or config.get("temporary_directory", None)
self.buffer = buffer
self.compression = config.get("dataframe.shuffle-compression", None)
def __reduce__(self):
if self.tempdir:
return (maybe_buffered_partd, (False, self.tempdir))
else:
return (maybe_buffered_partd, (False,))
def __call__(self, *args, **kwargs):
import partd
path = tempfile.mkdtemp(suffix=".partd", dir=self.tempdir)
try:
partd_compression = (
getattr(partd.compressed, self.compression)
if self.compression
else None
)
except AttributeError as e:
raise ImportError(
"Not able to import and load {0} as compression algorithm."
"Please check if the library is installed and supported by Partd.".format(
self.compression
)
) from e
file = partd.File(path)
partd.file.cleanup_files.append(path)
# Envelope partd file with compression, if set and available
if partd_compression:
file = partd_compression(file)
if self.buffer:
return partd.PandasBlocks(partd.Buffer(partd.Dict(), file))
else:
return partd.PandasBlocks(file)
def rearrange_by_column_disk(df, column, npartitions=None, compute=False):
"""Shuffle using local disk
See Also
--------
rearrange_by_column_tasks:
Same function, but using tasks rather than partd
Has a more informative docstring
"""
if npartitions is None:
npartitions = df.npartitions
token = tokenize(df, column, npartitions)
always_new_token = uuid.uuid1().hex
p = ("zpartd-" + always_new_token,)
dsk1 = {p: (maybe_buffered_partd(),)}
# Partition data on disk
name = "shuffle-partition-" + always_new_token
dsk2 = {
(name, i): (shuffle_group_3, key, column, npartitions, p)
for i, key in enumerate(df.__dask_keys__())
}
dependencies = []
layer = {}
if compute:
graph = HighLevelGraph.merge(df.dask, dsk1, dsk2)
keys = [p, sorted(dsk2)]
pp, values = compute_as_if_collection(DataFrame, graph, keys)
dsk1 = {p: pp}
dsk2 = dict(zip(sorted(dsk2), values))
else:
dependencies.append(df)
# Barrier
barrier_token = "barrier-" + always_new_token
dsk3 = {barrier_token: (barrier, list(dsk2))}
# Collect groups
name = "shuffle-collect-" + token
dsk4 = {
(name, i): (collect, p, i, df._meta, barrier_token) for i in range(npartitions)
}
divisions = (None,) * (npartitions + 1)
layer = toolz.merge(dsk1, dsk2, dsk3, dsk4)
graph = HighLevelGraph.from_collections(name, layer, dependencies=dependencies)
return DataFrame(graph, name, df._meta, divisions)
def _noop(x, cleanup_token):
"""
A task that does nothing.
"""
return x
def rearrange_by_column_tasks(
df, column, max_branch=32, npartitions=None, ignore_index=False
):
"""Order divisions of DataFrame so that all values within column(s) align
This enacts a task-based shuffle. It contains most of the tricky logic
around the complex network of tasks. Typically before this function is
called a new column, ``"_partitions"`` has been added to the dataframe,
containing the output partition number of every row. This function
produces a new dataframe where every row is in the proper partition. It
accomplishes this by splitting each input partition into several pieces,
and then concatenating pieces from different input partitions into output
partitions. If there are enough partitions then it does this work in
stages to avoid scheduling overhead.
Lets explain the motivation for this further. Imagine that we have 1000
input partitions and 1000 output partitions. In theory we could split each
input into 1000 pieces, and then move the 1 000 000 resulting pieces
around, and then concatenate them all into 1000 output groups. This would
be fine, but the central scheduling overhead of 1 000 000 tasks would
become a bottleneck. Instead we do this in stages so that we split each of
the 1000 inputs into 30 pieces (we now have 30 000 pieces) move those
around, concatenate back down to 1000, and then do the same process again.
This has the same result as the full transfer, but now we've moved data
twice (expensive) but done so with only 60 000 tasks (cheap).
Note that the `column` input may correspond to a list of columns (rather
than just a single column name). In this case, the `shuffle_group` and
`shuffle_group_2` functions will use hashing to map each row to an output
partition. This approach may require the same rows to be hased multiple
times, but avoids the need to assign a new "_partitions" column.
Parameters
----------
df: dask.dataframe.DataFrame
column: str or list
A column name on which we want to split, commonly ``"_partitions"``
which is assigned by functions upstream. This could also be a list of
columns (in which case shuffle_group will create a hash array/column).
max_branch: int
The maximum number of splits per input partition. Defaults to 32.
If there are more partitions than this then the shuffling will occur in
stages in order to avoid creating npartitions**2 tasks
Increasing this number increases scheduling overhead but decreases the
number of full-dataset transfers that we have to make.
npartitions: Optional[int]
The desired number of output partitions
Returns
-------
df3: dask.dataframe.DataFrame
See also
--------
rearrange_by_column_disk: same operation, but uses partd
rearrange_by_column: parent function that calls this or rearrange_by_column_disk
shuffle_group: does the actual splitting per-partition
"""
max_branch = max_branch or 32
if (npartitions or df.npartitions) <= max_branch:
# We are creating a small number of output partitions.
# No need for staged shuffling. Staged shuffling will
# sometimes require extra work/communication in this case.
token = tokenize(df, column, npartitions)
shuffle_name = f"simple-shuffle-{token}"
npartitions = npartitions or df.npartitions
shuffle_layer = SimpleShuffleLayer(
shuffle_name,
column,
npartitions,
df.npartitions,
ignore_index,
df._name,
df._meta,
)
graph = HighLevelGraph.from_collections(
shuffle_name, shuffle_layer, dependencies=[df]
)
return new_dd_object(graph, shuffle_name, df._meta, [None] * (npartitions + 1))
n = df.npartitions
stages = int(math.ceil(math.log(n) / math.log(max_branch)))
if stages > 1:
k = int(math.ceil(n ** (1 / stages)))
else:
k = n
inputs = [tuple(digit(i, j, k) for j in range(stages)) for i in range(k ** stages)]
npartitions_orig = df.npartitions
token = tokenize(df, stages, column, n, k)
for stage in range(stages):
stage_name = f"shuffle-{stage}-{token}"
stage_layer = ShuffleLayer(
stage_name,
column,
inputs,
stage,
npartitions,
n,
k,
ignore_index,
df._name,
df._meta,
)
graph = HighLevelGraph.from_collections(
stage_name, stage_layer, dependencies=[df]
)
df = new_dd_object(graph, stage_name, df._meta, df.divisions)
if npartitions is not None and npartitions != npartitions_orig:
token = tokenize(df, npartitions)
repartition_group_token = "repartition-group-" + token
dsk = {
(repartition_group_token, i): (
shuffle_group_2,
k,
column,
ignore_index,
npartitions,
)
for i, k in enumerate(df.__dask_keys__())
}
repartition_get_name = "repartition-get-" + token
for p in range(npartitions):
dsk[(repartition_get_name, p)] = (
shuffle_group_get,
(repartition_group_token, p % npartitions_orig),
p,
)
graph2 = HighLevelGraph.from_collections(
repartition_get_name, dsk, dependencies=[df]
)
df2 = new_dd_object(
graph2, repartition_get_name, df._meta, [None] * (npartitions + 1)
)
else:
df2 = df
df2.divisions = (None,) * (npartitions_orig + 1)
return df2
########################################################
# Various convenience functions to be run by the above #
########################################################
def partitioning_index(df, npartitions):
"""
Computes a deterministic index mapping each record to a partition.
Identical rows are mapped to the same partition.
Parameters
----------
df : DataFrame/Series/Index
npartitions : int
The number of partitions to group into.
Returns
-------
partitions : ndarray
An array of int64 values mapping each record to a partition.
"""
return hash_object_dispatch(df, index=False) % int(npartitions)
def barrier(args):
list(args)
return 0
def cleanup_partd_files(p, keys):
"""
Cleanup the files in a partd.File dataset.
Parameters
----------
p : partd.Interface
File or Encode wrapping a file should be OK.
keys: List
Just for scheduling purposes, not actually used.
"""
import partd
if isinstance(p, partd.Encode):
maybe_file = p.partd
else:
maybe_file
if isinstance(maybe_file, partd.File):
path = maybe_file.path
else:
path = None
if path:
shutil.rmtree(path, ignore_errors=True)
def collect(p, part, meta, barrier_token):
""" Collect partitions from partd, yield dataframes """
with ensure_cleanup_on_exception(p):
res = p.get(part)
return res if len(res) > 0 else meta
def set_partitions_pre(s, divisions):
partitions = divisions.searchsorted(s, side="right") - 1
partitions[(s >= divisions.iloc[-1]).values] = len(divisions) - 2
return partitions
def shuffle_group_2(df, cols, ignore_index, nparts):
if not len(df):
return {}, df
if isinstance(cols, str):
cols = [cols]
if cols and cols[0] == "_partitions":
ind = df[cols[0]].astype(np.int32)
else:
ind = (
hash_object_dispatch(df[cols] if cols else df, index=False) % int(nparts)
).astype(np.int32)
n = ind.max() + 1
result2 = group_split_dispatch(df, ind.values.view(), n, ignore_index=ignore_index)
return result2, df.iloc[:0]
def shuffle_group_get(g_head, i):
g, head = g_head
if i in g:
return g[i]
else:
return head
def shuffle_group(df, cols, stage, k, npartitions, ignore_index, nfinal):
"""Splits dataframe into groups
The group is determined by their final partition, and which stage we are in
in the shuffle
Parameters
----------
df: DataFrame
cols: str or list
Column name(s) on which to split the dataframe. If ``cols`` is not
"_partitions", hashing will be used to determine target partition
stage: int
We shuffle dataframes with many partitions we in a few stages to avoid
a quadratic number of tasks. This number corresponds to which stage
we're in, starting from zero up to some small integer
k: int
Desired number of splits from this dataframe
npartition: int
Total number of output partitions for the full dataframe
nfinal: int
Total number of output partitions after repartitioning
Returns
-------
out: Dict[int, DataFrame]
A dictionary mapping integers in {0..k} to dataframes such that the
hash values of ``df[col]`` are well partitioned.
"""
if isinstance(cols, str):
cols = [cols]
if cols and cols[0] == "_partitions":
ind = df[cols[0]]
else:
ind = hash_object_dispatch(df[cols] if cols else df, index=False)
if nfinal and nfinal != npartitions:
ind = ind % int(nfinal)
c = ind.values
typ = np.min_scalar_type(npartitions * 2)
c = np.mod(c, npartitions).astype(typ, copy=False)
np.floor_divide(c, k ** stage, out=c)
np.mod(c, k, out=c)
return group_split_dispatch(df, c, k, ignore_index=ignore_index)
@contextlib.contextmanager
def ensure_cleanup_on_exception(p):
"""Ensure a partd.File is cleaned up.
We have several tasks referring to a `partd.File` instance. We want to
ensure that the file is cleaned up if and only if there's an exception
in the tasks using the `partd.File`.
"""
try:
yield
except Exception:
# the function (e.g. shuffle_group_3) had an internal exception.
# We'll cleanup our temporary files and re-raise.
try:
p.drop()
except Exception:
logger.exception("ignoring exception in ensure_cleanup_on_exception")
raise
def shuffle_group_3(df, col, npartitions, p):
with ensure_cleanup_on_exception(p):
g = df.groupby(col)
d = {i: g.get_group(i) for i in g.groups}
p.append(d, fsync=True)
def set_index_post_scalar(df, index_name, drop, column_dtype):
df2 = df.drop("_partitions", axis=1).set_index(index_name, drop=drop)
df2.columns = df2.columns.astype(column_dtype)
return df2
def set_index_post_series(df, index_name, drop, column_dtype):
df2 = df.drop("_partitions", axis=1).set_index("_index", drop=True)
df2.index.name = index_name
df2.columns = df2.columns.astype(column_dtype)
return df2
def drop_overlap(df, index):
return df.drop(index) if index in df.index else df
def get_overlap(df, index):
return df.loc[[index]] if index in df.index else df._constructor()
def fix_overlap(ddf, overlap):
""" Ensures that the upper bound on each partition of ddf (except the last) is exclusive """
name = "fix-overlap-" + tokenize(ddf, overlap)
n = len(ddf.divisions) - 1
dsk = {(name, i): (ddf._name, i) for i in range(n)}
frames = []
for i in overlap:
# `frames` is a list of data from previous partitions that we may want to
# move to partition i. Here, we add "overlap" from the previous partition
# (i-1) to this list.
frames.append((get_overlap, (ddf._name, i - 1), ddf.divisions[i]))
# Make sure that any data added from partition i-1 to `frames` is removed
# from partition i-1.
dsk[(name, i - 1)] = (drop_overlap, dsk[(name, i - 1)], ddf.divisions[i])
# We do not want to move "overlap" from the previous partition (i-1) into
# this partition (i) if the data from this partition will need to be moved
# to the next partition (i+1) anyway. If we concatenate data too early,
# we may lose rows (https://github.com/dask/dask/issues/6972).
if i == ddf.npartitions - 2 or ddf.divisions[i] != ddf.divisions[i + 1]:
frames.append((ddf._name, i))
dsk[(name, i)] = (methods.concat, frames)
frames = []
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])
return new_dd_object(graph, name, ddf._meta, ddf.divisions)
def compute_and_set_divisions(df, **kwargs):
mins = df.index.map_partitions(M.min, meta=df.index)
maxes = df.index.map_partitions(M.max, meta=df.index)
mins, maxes = compute(mins, maxes, **kwargs)
mins = remove_nans(mins)
maxes = remove_nans(maxes)
if (
sorted(mins) != list(mins)
or sorted(maxes) != list(maxes)
or any(a > b for a, b in zip(mins, maxes))
):
raise ValueError(
"Partitions must be sorted ascending with the index", mins, maxes
)
df.divisions = tuple(mins) + (list(maxes)[-1],)
overlap = [i for i in range(1, len(mins)) if mins[i] >= maxes[i - 1]]
return fix_overlap(df, overlap) if overlap else df
def set_sorted_index(df, index, drop=True, divisions=None, **kwargs):
if not isinstance(index, Series):
meta = df._meta.set_index(index, drop=drop)
else:
meta = df._meta.set_index(index._meta, drop=drop)
result = map_partitions(M.set_index, df, index, drop=drop, meta=meta)
if not divisions:
return compute_and_set_divisions(result, **kwargs)
elif len(divisions) != len(df.divisions):
msg = (
"When doing `df.set_index(col, sorted=True, divisions=...)`, "
"divisions indicates known splits in the index column. In this "
"case divisions must be the same length as the existing "
"divisions in `df`\n\n"
"If the intent is to repartition into new divisions after "
"setting the index, you probably want:\n\n"
"`df.set_index(col, sorted=True).repartition(divisions=divisions)`"
)
raise ValueError(msg)
result.divisions = tuple(divisions)
return result
| 32.18916
| 96
| 0.628557
|
dfb14d8354450b350266f91e750cb32769110763
| 7,468
|
py
|
Python
|
geotrek/common/tests/test_admin.py
|
GeotrekCE/Geotrek
|
c1393925c1940ac795ab7fc04819cd8c78bc79fb
|
[
"BSD-2-Clause"
] | null | null | null |
geotrek/common/tests/test_admin.py
|
GeotrekCE/Geotrek
|
c1393925c1940ac795ab7fc04819cd8c78bc79fb
|
[
"BSD-2-Clause"
] | null | null | null |
geotrek/common/tests/test_admin.py
|
GeotrekCE/Geotrek
|
c1393925c1940ac795ab7fc04819cd8c78bc79fb
|
[
"BSD-2-Clause"
] | null | null | null |
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, override_settings
from django.urls import reverse
from tempfile import TemporaryDirectory
from geotrek.common.models import Attachment, FileType, Theme
from geotrek.common.tests.factories import AttachmentFactory, ThemeFactory
from geotrek.common.utils.testdata import get_dummy_uploaded_image
from geotrek.trekking.models import DifficultyLevel, POI, Trek
from geotrek.trekking.tests.factories import DifficultyLevelFactory, POIFactory, TrekFactory
from mapentity.tests.factories import SuperUserFactory
@override_settings(MEDIA_ROOT=TemporaryDirectory().name)
class AttachmentAdminTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = SuperUserFactory()
cls.poi = POIFactory.create(geom='SRID=%s;POINT(1 1)' % settings.SRID)
cls.picture = AttachmentFactory(content_object=cls.poi, title='img1',
attachment_file=get_dummy_uploaded_image())
cls.trek = TrekFactory.create(geom='SRID=%s;LINESTRING(0 0, 1 0, 2 0)' % settings.SRID)
cls.picture_2 = AttachmentFactory(content_object=cls.trek, title='img2',
attachment_file=get_dummy_uploaded_image())
cls.theme = ThemeFactory.create(label="Theme 1")
cls.picture_3 = AttachmentFactory(content_object=cls.theme, title='img3',
attachment_file=get_dummy_uploaded_image())
def setUp(self):
self.client.force_login(self.user)
def test_changelist_attachment(self):
list_url = reverse('admin:common_attachment_changelist')
response = self.client.get(list_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'img1.png')
self.assertContains(response, 'img2.png')
self.assertContains(response, self.poi.get_detail_url())
self.assertContains(response, self.trek.get_detail_url())
self.assertContains(response, self.theme.pk)
def test_changelist_attachment_filter_content_id(self):
list_url = reverse('admin:common_attachment_changelist')
data = {
'content_type': ContentType.objects.get_for_model(POI).pk
}
response = self.client.get(list_url, data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'img1.png')
self.assertNotContains(response, 'img2.png')
def test_attachment_can_be_change(self):
change_url = reverse('admin:common_attachment_change', args=[self.picture.pk])
file_type = FileType.objects.first()
response = self.client.post(change_url, {'title': 'Coucou', 'filetype': file_type.pk, 'starred': True})
self.assertEqual(response.status_code, 302)
attachment_modified = Attachment.objects.get(pk=self.picture.pk)
self.assertEqual(attachment_modified.title, self.picture.title)
# Is not changed depend on file title
self.assertEqual(attachment_modified.starred, True)
self.assertEqual(response.url, reverse('admin:common_attachment_changelist'))
class MergeActionAdminTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = SuperUserFactory.create()
cls.theme = ThemeFactory.create(label="Theme 1")
cls.theme_2 = ThemeFactory.create(label="Theme 2")
cls.theme_other = ThemeFactory.create(label="Autre theme")
cls.difficulty_1 = DifficultyLevelFactory.create(difficulty="Dif 1")
cls.difficulty_2 = DifficultyLevelFactory.create(difficulty="Dif 2")
cls.difficulty_3 = DifficultyLevelFactory.create(difficulty="Dif 3")
cls.trek = TrekFactory.create(geom='SRID=%s;LINESTRING(0 0, 1 0, 2 0)' % settings.SRID,
difficulty=cls.difficulty_2)
cls.trek.themes.add(cls.theme, cls.theme_2)
def setUp(self):
self.client.force_login(self.user)
def test_merge_actions_many2many(self):
"""
(A B C)
| | |
T
B main
A C tail
T linked only to B only
"""
data = {'action': 'apply_merge', '_selected_action': Theme.objects.all().values_list('pk', flat=True)}
self.client.post(reverse("admin:common_theme_changelist"), data, follow=True)
self.assertEqual(Theme.objects.count(), 1)
self.assertEqual(Theme.objects.first().label, "Autre theme + Theme 1 + Theme 2")
self.assertEqual(self.trek.themes.first().label, "Autre theme + Theme 1 + Theme 2")
def test_merge_actions_2_many2many(self):
"""
(A B) C
| /
T
A main
B tail
T linked to A after merge and C
"""
data = {'action': 'apply_merge', '_selected_action': Theme.objects.filter(label__in=['Theme 1', 'Autre theme']).values_list('pk', flat=True)}
self.client.post(reverse("admin:common_theme_changelist"), data, follow=True)
self.assertEqual(Theme.objects.count(), 2)
self.assertEqual(Theme.objects.exclude(label="Theme 2").first().label, "Autre theme + Theme 1")
self.assertEqual(self.trek.themes.first().label, "Autre theme + Theme 1")
def test_merge_actions_fk(self):
"""
(A B) C
|
T
A main
B tail
T linked to A
"""
data = {'action': 'apply_merge', '_selected_action': DifficultyLevel.objects.filter(difficulty__in=['Dif 1', 'Dif 2']).values_list('pk', flat=True)}
self.client.post(reverse("admin:trekking_difficultylevel_changelist"), data, follow=True)
self.assertEqual(DifficultyLevel.objects.count(), 2)
self.assertEqual(DifficultyLevel.objects.exclude(difficulty="Dif 3").first().difficulty, "Dif 1 + Dif 2")
self.assertEqual(Trek.objects.first().difficulty.difficulty, "Dif 1 + Dif 2")
def test_merge_actions_one_element(self):
"""
A (B) C
|
T
A main
no tail
T linked to A
"""
data = {'action': 'apply_merge', '_selected_action': DifficultyLevel.objects.filter(difficulty="Dif 1").values_list('pk', flat=True)}
self.client.post(reverse("admin:trekking_difficultylevel_changelist"), data, follow=True)
self.assertEqual(DifficultyLevel.objects.count(), 3)
def test_merge_actions_long_name(self):
"""
(A B C)
| | |
T
B main
A C tail
T linked only to B only
"""
self.theme.label = '*' * 128
self.theme.save()
data = {'action': 'apply_merge', '_selected_action': Theme.objects.all().values_list('pk', flat=True)}
self.client.post(reverse("admin:common_theme_changelist"), data, follow=True)
self.assertEqual(Theme.objects.count(), 1)
self.assertEqual(len(Theme.objects.first().label), 128)
self.assertEqual(Theme.objects.first().label,
"*********************************************************************************************"
"******************************* ...")
self.assertEqual(self.trek.themes.first().label,
"*********************************************************************************************"
"******************************* ...")
| 44.718563
| 156
| 0.625736
|
c94c453248cbee7c7cd06236e9b92ec358e8cf82
| 61,931
|
py
|
Python
|
tests/calc/test_calc_tools.py
|
khintz/MetPy
|
d485069a275fa6de90f3b5b15f49cf9befc21745
|
[
"BSD-3-Clause"
] | null | null | null |
tests/calc/test_calc_tools.py
|
khintz/MetPy
|
d485069a275fa6de90f3b5b15f49cf9befc21745
|
[
"BSD-3-Clause"
] | null | null | null |
tests/calc/test_calc_tools.py
|
khintz/MetPy
|
d485069a275fa6de90f3b5b15f49cf9befc21745
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2016,2017,2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the `tools` module."""
from collections import namedtuple
import cartopy.crs as ccrs
import numpy as np
import numpy.ma as ma
import pandas as pd
import pytest
import xarray as xr
from metpy.calc import (angle_to_direction, find_bounding_indices, find_intersections,
first_derivative, get_layer, get_layer_heights, gradient,
grid_deltas_from_dataarray, laplacian, lat_lon_grid_deltas,
nearest_intersection_idx, parse_angle, pressure_to_height_std,
reduce_point_density, resample_nn_1d, second_derivative,
regular_to_rotated, rotated_to_regular)
from metpy.calc.tools import (_delete_masked_points, _get_bound_pressure_height,
_greater_or_close, _less_or_close, _next_non_masked_element,
_remove_nans, BASE_DEGREE_MULTIPLIER, DIR_STRS, UND,
wrap_output_like)
from metpy.testing import (assert_almost_equal, assert_array_almost_equal, assert_array_equal)
from metpy.units import DimensionalityError, units
FULL_CIRCLE_DEGREES = np.arange(0, 360, BASE_DEGREE_MULTIPLIER.m) * units.degree
def test_resample_nn():
"""Test 1d nearest neighbor functionality."""
a = np.arange(5.)
b = np.array([2, 3.8])
truth = np.array([2, 4])
assert_array_equal(truth, resample_nn_1d(a, b))
def test_nearest_intersection_idx():
"""Test nearest index to intersection functionality."""
x = np.linspace(5, 30, 17)
y1 = 3 * x**2
y2 = 100 * x - 650
truth = np.array([2, 12])
assert_array_equal(truth, nearest_intersection_idx(y1, y2))
@pytest.mark.parametrize('direction, expected', [
('all', np.array([[8.88, 24.44], [238.84, 1794.53]])),
('increasing', np.array([[24.44], [1794.53]])),
('decreasing', np.array([[8.88], [238.84]]))
])
def test_find_intersections(direction, expected):
"""Test finding the intersection of two curves functionality."""
x = np.linspace(5, 30, 17)
y1 = 3 * x**2
y2 = 100 * x - 650
# Note: Truth is what we will get with this sampling, not the mathematical intersection
assert_array_almost_equal(expected, find_intersections(x, y1, y2, direction=direction), 2)
def test_find_intersections_no_intersections():
"""Test finding the intersection of two curves with no intersections."""
x = np.linspace(5, 30, 17)
y1 = 3 * x + 0
y2 = 5 * x + 5
# Note: Truth is what we will get with this sampling, not the mathematical intersection
truth = np.array([[],
[]])
assert_array_equal(truth, find_intersections(x, y1, y2))
def test_find_intersections_invalid_direction():
"""Test exception if an invalid direction is given."""
x = np.linspace(5, 30, 17)
y1 = 3 * x ** 2
y2 = 100 * x - 650
with pytest.raises(ValueError):
find_intersections(x, y1, y2, direction='increaing')
def test_find_intersections_units():
"""Test handling of units when logarithmic interpolation is called."""
x = np.linspace(5, 30, 17) * units.hPa
y1 = 3 * x.m**2
y2 = 100 * x.m - 650
truth = np.array([24.43, 1794.54])
x_test, y_test = find_intersections(x, y1, y2, direction='increasing', log_x=True)
assert_array_almost_equal(truth, np.array([x_test.m, y_test.m]).flatten(), 2)
assert x_test.units == units.hPa
@pytest.mark.parametrize('direction, expected', [
('all', np.array([[0., 3.5, 4.33333333, 7., 9., 10., 11.5, 13.], np.zeros(8)])),
('increasing', np.array([[0., 4.333, 7., 11.5], np.zeros(4)])),
('decreasing', np.array([[3.5, 10.], np.zeros(2)]))
])
def test_find_intersections_intersections_in_data_at_ends(direction, expected):
"""Test finding intersections when intersections are in the data.
Test data includes points of intersection, sequential points of intersection, intersection
at the ends of the data, and intersections in increasing/decreasing direction.
"""
x = np.arange(14)
y1 = np.array([0, 3, 2, 1, -1, 2, 2, 0, 1, 0, 0, -2, 2, 0])
y2 = np.zeros_like(y1)
assert_array_almost_equal(expected, find_intersections(x, y1, y2, direction=direction), 2)
@pytest.mark.parametrize('mask, expected_idx, expected_element', [
([False, False, False, False, False], 1, 1),
([False, True, True, False, False], 3, 3),
([False, True, True, True, True], None, None)
])
def test_non_masked_elements(mask, expected_idx, expected_element):
"""Test with a valid element."""
a = ma.masked_array(np.arange(5), mask=mask)
idx, element = _next_non_masked_element(a, 1)
assert idx == expected_idx
assert element == expected_element
@pytest.fixture
def thin_point_data():
r"""Provide scattered points for testing."""
xy = np.array([[0.8793620, 0.9005706], [0.5382446, 0.8766988], [0.6361267, 0.1198620],
[0.4127191, 0.0270573], [0.1486231, 0.3121822], [0.2607670, 0.4886657],
[0.7132257, 0.2827587], [0.4371954, 0.5660840], [0.1318544, 0.6468250],
[0.6230519, 0.0682618], [0.5069460, 0.2326285], [0.1324301, 0.5609478],
[0.7975495, 0.2109974], [0.7513574, 0.9870045], [0.9305814, 0.0685815],
[0.5271641, 0.7276889], [0.8116574, 0.4795037], [0.7017868, 0.5875983],
[0.5591604, 0.5579290], [0.1284860, 0.0968003], [0.2857064, 0.3862123]])
return xy
@pytest.mark.parametrize('radius, truth',
[(2.0, np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.bool)),
(1.0, np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 0], dtype=np.bool)),
(0.3, np.array([1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0], dtype=np.bool)),
(0.1, np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.bool))
])
def test_reduce_point_density(thin_point_data, radius, truth):
r"""Test that reduce_point_density works."""
assert_array_equal(reduce_point_density(thin_point_data, radius=radius), truth)
@pytest.mark.parametrize('radius, truth',
[(2.0, np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.bool)),
(1.0, np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 0], dtype=np.bool)),
(0.3, np.array([1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0], dtype=np.bool)),
(0.1, np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.bool))
])
def test_reduce_point_density_units(thin_point_data, radius, truth):
r"""Test that reduce_point_density works with units."""
assert_array_equal(reduce_point_density(thin_point_data * units.dam,
radius=radius * units.dam), truth)
@pytest.mark.parametrize('radius, truth',
[(2.0, np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1], dtype=np.bool)),
(0.7, np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1], dtype=np.bool)),
(0.3, np.array([1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0,
0, 0, 0, 1, 0, 0, 0, 1, 0, 1], dtype=np.bool)),
(0.1, np.array([1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.bool))
])
def test_reduce_point_density_priority(thin_point_data, radius, truth):
r"""Test that reduce_point_density works properly with priority."""
key = np.array([8, 6, 2, 8, 6, 4, 4, 8, 8, 6, 3, 4, 3, 0, 7, 4, 3, 2, 3, 3, 9])
assert_array_equal(reduce_point_density(thin_point_data, radius, key), truth)
def test_reduce_point_density_1d():
r"""Test that reduce_point_density works with 1D points."""
x = np.array([1, 3, 4, 8, 9, 10])
assert_array_equal(reduce_point_density(x, 2.5),
np.array([1, 0, 1, 1, 0, 0], dtype=np.bool))
def test_delete_masked_points():
"""Test deleting masked points."""
a = ma.masked_array(np.arange(5), mask=[False, True, False, False, False])
b = ma.masked_array(np.arange(5), mask=[False, False, False, True, False])
expected = np.array([0, 2, 4])
a, b = _delete_masked_points(a, b)
assert_array_equal(a, expected)
assert_array_equal(b, expected)
def get_bounds_data():
"""Provide pressure and height data for testing layer bounds calculation."""
pressures = np.linspace(1000, 100, 10) * units.hPa
heights = pressure_to_height_std(pressures)
return pressures, heights
@pytest.mark.parametrize('pressure, bound, hgts, interp, expected', [
(get_bounds_data()[0], 900 * units.hPa, None, True,
(900 * units.hPa, 0.9880028 * units.kilometer)),
(get_bounds_data()[0], 900 * units.hPa, None, False,
(900 * units.hPa, 0.9880028 * units.kilometer)),
(get_bounds_data()[0], 870 * units.hPa, None, True,
(870 * units.hPa, 1.2665298 * units.kilometer)),
(get_bounds_data()[0], 870 * units.hPa, None, False,
(900 * units.hPa, 0.9880028 * units.kilometer)),
(get_bounds_data()[0], 0.9880028 * units.kilometer, None, True,
(900 * units.hPa, 0.9880028 * units.kilometer)),
(get_bounds_data()[0], 0.9880028 * units.kilometer, None, False,
(900 * units.hPa, 0.9880028 * units.kilometer)),
(get_bounds_data()[0], 1.2665298 * units.kilometer, None, True,
(870 * units.hPa, 1.2665298 * units.kilometer)),
(get_bounds_data()[0], 1.2665298 * units.kilometer, None, False,
(900 * units.hPa, 0.9880028 * units.kilometer)),
(get_bounds_data()[0], 900 * units.hPa, get_bounds_data()[1], True,
(900 * units.hPa, 0.9880028 * units.kilometer)),
(get_bounds_data()[0], 900 * units.hPa, get_bounds_data()[1], False,
(900 * units.hPa, 0.9880028 * units.kilometer)),
(get_bounds_data()[0], 870 * units.hPa, get_bounds_data()[1], True,
(870 * units.hPa, 1.2643214 * units.kilometer)),
(get_bounds_data()[0], 870 * units.hPa, get_bounds_data()[1], False,
(900 * units.hPa, 0.9880028 * units.kilometer)),
(get_bounds_data()[0], 0.9880028 * units.kilometer, get_bounds_data()[1], True,
(900 * units.hPa, 0.9880028 * units.kilometer)),
(get_bounds_data()[0], 0.9880028 * units.kilometer, get_bounds_data()[1], False,
(900 * units.hPa, 0.9880028 * units.kilometer)),
(get_bounds_data()[0], 1.2665298 * units.kilometer, get_bounds_data()[1], True,
(870.9869087 * units.hPa, 1.2665298 * units.kilometer)),
(get_bounds_data()[0], 1.2665298 * units.kilometer, get_bounds_data()[1], False,
(900 * units.hPa, 0.9880028 * units.kilometer)),
(get_bounds_data()[0], 0.98800289 * units.kilometer, get_bounds_data()[1], True,
(900 * units.hPa, 0.9880028 * units.kilometer))
])
def test_get_bound_pressure_height(pressure, bound, hgts, interp, expected):
"""Test getting bounds in layers with various parameter combinations."""
bounds = _get_bound_pressure_height(pressure, bound, height=hgts, interpolate=interp)
assert_array_almost_equal(bounds[0], expected[0], 4)
assert_array_almost_equal(bounds[1], expected[1], 4)
def test_get_bound_invalid_bound_units():
"""Test that value error is raised with invalid bound units."""
p = np.arange(900, 300, -100) * units.hPa
with pytest.raises(ValueError):
_get_bound_pressure_height(p, 100 * units.degC)
def test_get_bound_pressure_out_of_range():
"""Test when bound is out of data range in pressure."""
p = np.arange(900, 300, -100) * units.hPa
with pytest.raises(ValueError):
_get_bound_pressure_height(p, 100 * units.hPa)
with pytest.raises(ValueError):
_get_bound_pressure_height(p, 1000 * units.hPa)
def test_get_bound_height_out_of_range():
"""Test when bound is out of data range in height."""
p = np.arange(900, 300, -100) * units.hPa
h = np.arange(1, 7) * units.kilometer
with pytest.raises(ValueError):
_get_bound_pressure_height(p, 8 * units.kilometer, height=h)
with pytest.raises(ValueError):
_get_bound_pressure_height(p, 100 * units.meter, height=h)
@pytest.mark.parametrize('flip_order', [(True, False)])
def test_get_layer_float32(flip_order):
"""Test that get_layer works properly with float32 data."""
p = np.asarray([940.85083008, 923.78851318, 911.42022705, 896.07220459,
876.89404297, 781.63330078], np.float32) * units('hPa')
hgt = np.asarray([563.671875, 700.93817139, 806.88098145, 938.51745605,
1105.25854492, 2075.04443359], dtype=np.float32) * units.meter
true_p_layer = np.asarray([940.85083008, 923.78851318, 911.42022705, 896.07220459,
876.89404297, 831.86472819], np.float32) * units('hPa')
true_hgt_layer = np.asarray([563.671875, 700.93817139, 806.88098145, 938.51745605,
1105.25854492, 1549.8079], dtype=np.float32) * units.meter
if flip_order:
p = p[::-1]
hgt = hgt[::-1]
p_layer, hgt_layer = get_layer(p, hgt, height=hgt, depth=1000. * units.meter)
assert_array_almost_equal(p_layer, true_p_layer, 4)
assert_array_almost_equal(hgt_layer, true_hgt_layer, 4)
def test_get_layer_ragged_data():
"""Test that an error is raised for unequal length pressure and data arrays."""
p = np.arange(10) * units.hPa
y = np.arange(9) * units.degC
with pytest.raises(ValueError):
get_layer(p, y)
def test_get_layer_invalid_depth_units():
"""Test that an error is raised when depth has invalid units."""
p = np.arange(10) * units.hPa
y = np.arange(9) * units.degC
with pytest.raises(ValueError):
get_layer(p, y, depth=400 * units.degC)
def layer_test_data():
"""Provide test data for testing of layer bounds."""
pressure = np.arange(1000, 10, -100) * units.hPa
temperature = np.linspace(25, -50, len(pressure)) * units.degC
return pressure, temperature
@pytest.mark.parametrize('pressure, variable, heights, bottom, depth, interp, expected', [
(layer_test_data()[0], layer_test_data()[1], None, None, 150 * units.hPa, True,
(np.array([1000, 900, 850]) * units.hPa,
np.array([25.0, 16.666666, 12.62262]) * units.degC)),
(layer_test_data()[0], layer_test_data()[1], None, None, 150 * units.hPa, False,
(np.array([1000, 900]) * units.hPa, np.array([25.0, 16.666666]) * units.degC)),
(layer_test_data()[0], layer_test_data()[1], None, 2 * units.km, 3 * units.km, True,
(np.array([794.85264282, 700., 600., 540.01696548]) * units.hPa,
np.array([7.93049516, 0., -8.33333333, -13.14758845]) * units.degC))
])
def test_get_layer(pressure, variable, heights, bottom, depth, interp, expected):
"""Test get_layer functionality."""
p_layer, y_layer = get_layer(pressure, variable, height=heights, bottom=bottom,
depth=depth, interpolate=interp)
assert_array_almost_equal(p_layer, expected[0], 4)
assert_array_almost_equal(y_layer, expected[1], 4)
def test_greater_or_close():
"""Test floating point greater or close to."""
x = np.array([0.0, 1.0, 1.49999, 1.5, 1.5000, 1.7])
comparison_value = 1.5
truth = np.array([False, False, True, True, True, True])
res = _greater_or_close(x, comparison_value)
assert_array_equal(res, truth)
def test_greater_or_close_mixed_types():
"""Test _greater_or_close with mixed Quantity and array errors."""
with pytest.raises(ValueError):
_greater_or_close(1000. * units.mbar, 1000.)
with pytest.raises(ValueError):
_greater_or_close(1000., 1000. * units.mbar)
def test_less_or_close():
"""Test floating point less or close to."""
x = np.array([0.0, 1.0, 1.49999, 1.5, 1.5000, 1.7])
comparison_value = 1.5
truth = np.array([True, True, True, True, True, False])
res = _less_or_close(x, comparison_value)
assert_array_equal(res, truth)
def test_less_or_close_mixed_types():
"""Test _less_or_close with mixed Quantity and array errors."""
with pytest.raises(ValueError):
_less_or_close(1000. * units.mbar, 1000.)
with pytest.raises(ValueError):
_less_or_close(1000., 1000. * units.mbar)
def test_get_layer_heights_interpolation():
"""Test get_layer_heights with interpolation."""
heights = np.arange(10) * units.km
data = heights.m * 2 * units.degC
heights, data = get_layer_heights(heights, 5000 * units.m, data, bottom=1500 * units.m)
heights_true = np.array([1.5, 2, 3, 4, 5, 6, 6.5]) * units.km
data_true = heights_true.m * 2 * units.degC
assert_array_almost_equal(heights_true, heights, 6)
assert_array_almost_equal(data_true, data, 6)
def test_get_layer_heights_no_interpolation():
"""Test get_layer_heights without interpolation."""
heights = np.arange(10) * units.km
data = heights.m * 2 * units.degC
heights, data = get_layer_heights(heights, 5000 * units.m, data,
bottom=1500 * units.m, interpolate=False)
heights_true = np.array([2, 3, 4, 5, 6]) * units.km
data_true = heights_true.m * 2 * units.degC
assert_array_almost_equal(heights_true, heights, 6)
assert_array_almost_equal(data_true, data, 6)
def test_get_layer_heights_agl():
"""Test get_layer_heights with interpolation."""
heights = np.arange(300, 1200, 100) * units.m
data = heights.m * 0.1 * units.degC
heights, data = get_layer_heights(heights, 500 * units.m, data, with_agl=True)
heights_true = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5]) * units.km
data_true = np.array([30, 40, 50, 60, 70, 80]) * units.degC
assert_array_almost_equal(heights_true, heights, 6)
assert_array_almost_equal(data_true, data, 6)
def test_get_layer_heights_agl_bottom_no_interp():
"""Test get_layer_heights with no interpolation and a bottom."""
heights_init = np.arange(300, 1200, 100) * units.m
data = heights_init.m * 0.1 * units.degC
heights, data = get_layer_heights(heights_init, 500 * units.m, data, with_agl=True,
interpolate=False, bottom=200 * units.m)
# Regression test for #789
assert_array_equal(heights_init[0], 300 * units.m)
heights_true = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7]) * units.km
data_true = np.array([50, 60, 70, 80, 90, 100]) * units.degC
assert_array_almost_equal(heights_true, heights, 6)
assert_array_almost_equal(data_true, data, 6)
def test_lat_lon_grid_deltas_1d():
"""Test for lat_lon_grid_deltas for variable grid."""
lat = np.arange(40, 50, 2.5)
lon = np.arange(-100, -90, 2.5)
dx, dy = lat_lon_grid_deltas(lon, lat)
dx_truth = np.array([[212943.5585, 212943.5585, 212943.5585],
[204946.2305, 204946.2305, 204946.2305],
[196558.8269, 196558.8269, 196558.8269],
[187797.3216, 187797.3216, 187797.3216]]) * units.meter
dy_truth = np.array([[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857]]) * units.meter
assert_almost_equal(dx, dx_truth, 4)
assert_almost_equal(dy, dy_truth, 4)
@pytest.mark.parametrize('flip_order', [(False, True)])
def test_lat_lon_grid_deltas_2d(flip_order):
"""Test for lat_lon_grid_deltas for variable grid with negative delta distances."""
lat = np.arange(40, 50, 2.5)
lon = np.arange(-100, -90, 2.5)
dx_truth = np.array([[212943.5585, 212943.5585, 212943.5585],
[204946.2305, 204946.2305, 204946.2305],
[196558.8269, 196558.8269, 196558.8269],
[187797.3216, 187797.3216, 187797.3216]]) * units.meter
dy_truth = np.array([[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857]]) * units.meter
if flip_order:
lon = lon[::-1]
lat = lat[::-1]
dx_truth = -1 * dx_truth[::-1]
dy_truth = -1 * dy_truth[::-1]
lon, lat = np.meshgrid(lon, lat)
dx, dy = lat_lon_grid_deltas(lon, lat)
assert_almost_equal(dx, dx_truth, 4)
assert_almost_equal(dy, dy_truth, 4)
def test_lat_lon_grid_deltas_extra_dimensions():
"""Test for lat_lon_grid_deltas with extra leading dimensions."""
lon, lat = np.meshgrid(np.arange(-100, -90, 2.5), np.arange(40, 50, 2.5))
lat = lat[None, None]
lon = lon[None, None]
dx_truth = np.array([[[[212943.5585, 212943.5585, 212943.5585],
[204946.2305, 204946.2305, 204946.2305],
[196558.8269, 196558.8269, 196558.8269],
[187797.3216, 187797.3216, 187797.3216]]]]) * units.meter
dy_truth = (np.array([[[[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857],
[277987.1857, 277987.1857, 277987.1857, 277987.1857]]]])
* units.meter)
dx, dy = lat_lon_grid_deltas(lon, lat)
assert_almost_equal(dx, dx_truth, 4)
assert_almost_equal(dy, dy_truth, 4)
def test_lat_lon_grid_deltas_mismatched_shape():
"""Test for lat_lon_grid_deltas for variable grid."""
lat = np.arange(40, 50, 2.5)
lon = np.array([[-100., -97.5, -95., -92.5],
[-100., -97.5, -95., -92.5],
[-100., -97.5, -95., -92.5],
[-100., -97.5, -95., -92.5]])
with pytest.raises(ValueError):
lat_lon_grid_deltas(lon, lat)
@pytest.fixture()
def deriv_1d_data():
"""Return 1-dimensional data for testing derivative functions."""
return namedtuple('D_1D_Test_Data', 'x values')(np.array([0, 1.25, 3.75]) * units.cm,
np.array([13.5, 12, 10]) * units.degC)
@pytest.fixture()
def deriv_2d_data():
"""Return 2-dimensional data for analytic function for testing derivative functions."""
ret = namedtuple('D_2D_Test_Data', 'x y x0 y0 a b f')(
np.array([0., 2., 7.]), np.array([1., 5., 11., 13.]), 3, 1.5, 0.5, 0.25, 0)
# Makes a value array with y changing along rows (axis 0) and x along columns (axis 1)
return ret._replace(f=ret.a * (ret.x - ret.x0)**2 + ret.b * (ret.y[:, None] - ret.y0)**2)
@pytest.fixture()
def deriv_4d_data():
"""Return simple 4-dimensional data for testing axis handling of derivative functions."""
return np.arange(3 * 3 * 4 * 4).reshape((3, 3, 4, 4))
def test_first_derivative(deriv_1d_data):
"""Test first_derivative with a simple 1D array."""
dv_dx = first_derivative(deriv_1d_data.values, x=deriv_1d_data.x)
# Worked by hand and taken from Chapra and Canale 23.2
truth = np.array([-1.333333, -1.06666667, -0.5333333]) * units('delta_degC / cm')
assert_array_almost_equal(dv_dx, truth, 5)
def test_first_derivative_2d(deriv_2d_data):
"""Test first_derivative with a full 2D array."""
df_dx = first_derivative(deriv_2d_data.f, x=deriv_2d_data.x, axis=1)
df_dx_analytic = np.tile(2 * deriv_2d_data.a * (deriv_2d_data.x - deriv_2d_data.x0),
(deriv_2d_data.f.shape[0], 1))
assert_array_almost_equal(df_dx, df_dx_analytic, 5)
df_dy = first_derivative(deriv_2d_data.f, x=deriv_2d_data.y, axis=0)
# Repeat each row, then flip to get variation along rows
df_dy_analytic = np.tile(2 * deriv_2d_data.b * (deriv_2d_data.y - deriv_2d_data.y0),
(deriv_2d_data.f.shape[1], 1)).T
assert_array_almost_equal(df_dy, df_dy_analytic, 5)
def test_first_derivative_too_small(deriv_1d_data):
"""Test first_derivative with too small an array."""
with pytest.raises(ValueError):
first_derivative(deriv_1d_data.values[None, :].T, x=deriv_1d_data.x, axis=1)
def test_first_derivative_scalar_delta():
"""Test first_derivative with a scalar passed for a delta."""
df_dx = first_derivative(np.arange(3), delta=1)
assert_array_almost_equal(df_dx, np.array([1., 1., 1.]), 6)
def test_first_derivative_masked():
"""Test that first_derivative properly propagates masks."""
data = np.ma.arange(7)
data[3] = np.ma.masked
df_dx = first_derivative(data, delta=1)
truth = np.ma.array([1., 1., 1., 1., 1., 1., 1.],
mask=[False, False, True, True, True, False, False])
assert_array_almost_equal(df_dx, truth)
assert_array_equal(df_dx.mask, truth.mask)
def test_second_derivative(deriv_1d_data):
"""Test second_derivative with a simple 1D array."""
d2v_dx2 = second_derivative(deriv_1d_data.values, x=deriv_1d_data.x)
# Worked by hand
truth = np.ones_like(deriv_1d_data.values) * 0.2133333 * units('delta_degC/cm**2')
assert_array_almost_equal(d2v_dx2, truth, 5)
def test_second_derivative_2d(deriv_2d_data):
"""Test second_derivative with a full 2D array."""
df2_dx2 = second_derivative(deriv_2d_data.f, x=deriv_2d_data.x, axis=1)
assert_array_almost_equal(df2_dx2,
np.ones_like(deriv_2d_data.f) * (2 * deriv_2d_data.a), 5)
df2_dy2 = second_derivative(deriv_2d_data.f, x=deriv_2d_data.y, axis=0)
assert_array_almost_equal(df2_dy2,
np.ones_like(deriv_2d_data.f) * (2 * deriv_2d_data.b), 5)
def test_second_derivative_too_small(deriv_1d_data):
"""Test second_derivative with too small an array."""
with pytest.raises(ValueError):
second_derivative(deriv_1d_data.values[None, :].T, x=deriv_1d_data.x, axis=1)
def test_second_derivative_scalar_delta():
"""Test second_derivative with a scalar passed for a delta."""
df_dx = second_derivative(np.arange(3), delta=1)
assert_array_almost_equal(df_dx, np.array([0., 0., 0.]), 6)
def test_laplacian(deriv_1d_data):
"""Test laplacian with simple 1D data."""
laplac = laplacian(deriv_1d_data.values, coordinates=(deriv_1d_data.x,))
# Worked by hand
truth = np.ones_like(deriv_1d_data.values) * 0.2133333 * units('delta_degC/cm**2')
assert_array_almost_equal(laplac, truth, 5)
def test_laplacian_2d(deriv_2d_data):
"""Test lapacian with full 2D arrays."""
laplac_true = 2 * (np.ones_like(deriv_2d_data.f) * (deriv_2d_data.a + deriv_2d_data.b))
laplac = laplacian(deriv_2d_data.f, coordinates=(deriv_2d_data.y, deriv_2d_data.x))
assert_array_almost_equal(laplac, laplac_true, 5)
def test_parse_angle_abbrieviated():
"""Test abbrieviated directional text in degrees."""
expected_angles_degrees = FULL_CIRCLE_DEGREES
output_angles_degrees = parse_angle(DIR_STRS[:-1])
assert_array_almost_equal(output_angles_degrees, expected_angles_degrees)
def test_parse_angle_ext():
"""Test extended (unabbrieviated) directional text in degrees."""
test_dir_strs = ['NORTH', 'NORTHnorthEast', 'North_East', 'East__North_East',
'easT', 'east south east', 'south east', ' south southeast',
'SOUTH', 'SOUTH SOUTH WEST', 'southWEST', 'WEST south_WEST',
'WeSt', 'WestNorth West', 'North West', 'NORTH north_WeSt']
expected_angles_degrees = np.arange(0, 360, 22.5) * units.degree
output_angles_degrees = parse_angle(test_dir_strs)
assert_array_almost_equal(output_angles_degrees, expected_angles_degrees)
def test_parse_angle_mix_multiple():
"""Test list of extended (unabbrieviated) directional text in degrees in one go."""
test_dir_strs = ['NORTH', 'nne', 'ne', 'east north east',
'easT', 'east se', 'south east', ' south southeast',
'SOUTH', 'SOUTH SOUTH WEST', 'sw', 'WEST south_WEST',
'w', 'wnw', 'North West', 'nnw']
expected_angles_degrees = FULL_CIRCLE_DEGREES
output_angles_degrees = parse_angle(test_dir_strs)
assert_array_almost_equal(output_angles_degrees, expected_angles_degrees)
def test_parse_angle_none():
"""Test list of extended (unabbrieviated) directional text in degrees in one go."""
test_dir_strs = None
expected_angles_degrees = np.nan
output_angles_degrees = parse_angle(test_dir_strs)
assert_array_almost_equal(output_angles_degrees, expected_angles_degrees)
def test_parse_angle_invalid_number():
"""Test list of extended (unabbrieviated) directional text in degrees in one go."""
test_dir_strs = 365.
expected_angles_degrees = np.nan
output_angles_degrees = parse_angle(test_dir_strs)
assert_array_almost_equal(output_angles_degrees, expected_angles_degrees)
def test_parse_angle_invalid_arr():
"""Test list of extended (unabbrieviated) directional text in degrees in one go."""
test_dir_strs = ['nan', None, np.nan, 35, 35.5, 'north', 'andrewiscool']
expected_angles_degrees = [np.nan, np.nan, np.nan, np.nan, np.nan, 0, np.nan]
output_angles_degrees = parse_angle(test_dir_strs)
assert_array_almost_equal(output_angles_degrees, expected_angles_degrees)
def test_parse_angle_mix_multiple_arr():
"""Test list of extended (unabbrieviated) directional text in degrees in one go."""
test_dir_strs = np.array(['NORTH', 'nne', 'ne', 'east north east',
'easT', 'east se', 'south east', ' south southeast',
'SOUTH', 'SOUTH SOUTH WEST', 'sw', 'WEST south_WEST',
'w', 'wnw', 'North West', 'nnw'])
expected_angles_degrees = FULL_CIRCLE_DEGREES
output_angles_degrees = parse_angle(test_dir_strs)
assert_array_almost_equal(output_angles_degrees, expected_angles_degrees)
def test_parse_angles_array():
"""Test array of angles to parse."""
angles = np.array(['N', 'S', 'E', 'W'])
expected_angles = np.array([0, 180, 90, 270]) * units.degree
calculated_angles = parse_angle(angles)
assert_array_almost_equal(calculated_angles, expected_angles)
def test_parse_angles_series():
"""Test pandas.Series of angles to parse."""
angles = pd.Series(['N', 'S', 'E', 'W'])
expected_angles = np.array([0, 180, 90, 270]) * units.degree
calculated_angles = parse_angle(angles)
assert_array_almost_equal(calculated_angles, expected_angles)
def test_parse_angles_single():
"""Test single input into `parse_angles`."""
calculated_angle = parse_angle('SOUTH SOUTH EAST')
expected_angle = 157.5 * units.degree
assert_almost_equal(calculated_angle, expected_angle)
def test_gradient_2d(deriv_2d_data):
"""Test gradient with 2D arrays."""
res = gradient(deriv_2d_data.f, coordinates=(deriv_2d_data.y, deriv_2d_data.x))
truth = (np.array([[-0.25, -0.25, -0.25],
[1.75, 1.75, 1.75],
[4.75, 4.75, 4.75],
[5.75, 5.75, 5.75]]),
np.array([[-3, -1, 4],
[-3, -1, 4],
[-3, -1, 4],
[-3, -1, 4]]))
assert_array_almost_equal(res, truth, 5)
def test_gradient_4d(deriv_4d_data):
"""Test gradient with 4D arrays."""
res = gradient(deriv_4d_data, deltas=(1, 1, 1, 1))
truth = tuple(factor * np.ones_like(deriv_4d_data) for factor in (48., 16., 4., 1.))
assert_array_almost_equal(res, truth, 8)
def test_gradient_restricted_axes(deriv_2d_data):
"""Test 2D gradient with 3D arrays and manual specification of axes."""
res = gradient(deriv_2d_data.f[..., None], coordinates=(deriv_2d_data.y, deriv_2d_data.x),
axes=(0, 1))
truth = (np.array([[[-0.25], [-0.25], [-0.25]],
[[1.75], [1.75], [1.75]],
[[4.75], [4.75], [4.75]],
[[5.75], [5.75], [5.75]]]),
np.array([[[-3], [-1], [4]],
[[-3], [-1], [4]],
[[-3], [-1], [4]],
[[-3], [-1], [4]]]))
assert_array_almost_equal(res, truth, 5)
def test_bounding_indices():
"""Test finding bounding indices."""
data = np.array([[1, 2, 3, 1], [5, 6, 7, 8]])
above, below, good = find_bounding_indices(data, [1.5, 7], axis=1, from_below=True)
assert_array_equal(above[1], np.array([[1, 0], [0, 3]]))
assert_array_equal(below[1], np.array([[0, -1], [-1, 2]]))
assert_array_equal(good, np.array([[True, False], [False, True]]))
def test_bounding_indices_above():
"""Test finding bounding indices from above."""
data = np.array([[1, 2, 3, 1], [5, 6, 7, 8]])
above, below, good = find_bounding_indices(data, [1.5, 7], axis=1, from_below=False)
assert_array_equal(above[1], np.array([[3, 0], [0, 3]]))
assert_array_equal(below[1], np.array([[2, -1], [-1, 2]]))
assert_array_equal(good, np.array([[True, False], [False, True]]))
def test_angle_to_direction():
"""Test single angle in degree."""
expected_dirs = DIR_STRS[:-1] # UND at -1
output_dirs = [angle_to_direction(angle) for angle in FULL_CIRCLE_DEGREES]
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_edge():
"""Test single angle edge case (360 and no units) in degree."""
expected_dirs = 'N'
output_dirs = angle_to_direction(360)
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_list():
"""Test list of angles in degree."""
expected_dirs = DIR_STRS[:-1]
output_dirs = list(angle_to_direction(FULL_CIRCLE_DEGREES))
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_arr():
"""Test array of angles in degree."""
expected_dirs = DIR_STRS[:-1]
output_dirs = angle_to_direction(FULL_CIRCLE_DEGREES)
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_full():
"""Test the `full` keyword argument, expecting unabbrieviated output."""
expected_dirs = [
'North', 'North North East', 'North East', 'East North East',
'East', 'East South East', 'South East', 'South South East',
'South', 'South South West', 'South West', 'West South West',
'West', 'West North West', 'North West', 'North North West'
]
output_dirs = angle_to_direction(FULL_CIRCLE_DEGREES, full=True)
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_invalid_scalar():
"""Test invalid angle."""
expected_dirs = UND
output_dirs = angle_to_direction(None)
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_invalid_arr():
"""Test array of invalid angles."""
expected_dirs = ['NE', UND, UND, UND, 'N']
output_dirs = angle_to_direction(['46', None, np.nan, None, '362.'])
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_level_4():
"""Test non-existent level of complexity."""
with pytest.raises(ValueError) as exc:
angle_to_direction(FULL_CIRCLE_DEGREES, level=4)
assert 'cannot be less than 1 or greater than 3' in str(exc.value)
def test_angle_to_direction_level_3():
"""Test array of angles in degree."""
expected_dirs = DIR_STRS[:-1] # UND at -1
output_dirs = angle_to_direction(FULL_CIRCLE_DEGREES, level=3)
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_level_2():
"""Test array of angles in degree."""
expected_dirs = [
'N', 'N', 'NE', 'NE', 'E', 'E', 'SE', 'SE',
'S', 'S', 'SW', 'SW', 'W', 'W', 'NW', 'NW'
]
output_dirs = angle_to_direction(FULL_CIRCLE_DEGREES, level=2)
assert_array_equal(output_dirs, expected_dirs)
def test_angle_to_direction_level_1():
"""Test array of angles in degree."""
expected_dirs = [
'N', 'N', 'N', 'E', 'E', 'E', 'E', 'S', 'S', 'S', 'S',
'W', 'W', 'W', 'W', 'N']
output_dirs = angle_to_direction(FULL_CIRCLE_DEGREES, level=1)
assert_array_equal(output_dirs, expected_dirs)
def test_3d_gradient_3d_data_no_axes(deriv_4d_data):
"""Test 3D gradient with 3D data and no axes parameter."""
test = deriv_4d_data[0]
res = gradient(test, deltas=(1, 1, 1))
truth = tuple(factor * np.ones_like(test) for factor in (16., 4., 1.))
assert_array_almost_equal(res, truth, 8)
def test_2d_gradient_3d_data_no_axes(deriv_4d_data):
"""Test for failure of 2D gradient with 3D data and no axes parameter."""
test = deriv_4d_data[0]
with pytest.raises(ValueError) as exc:
gradient(test, deltas=(1, 1))
assert 'must match the number of dimensions' in str(exc.value)
def test_3d_gradient_2d_data_no_axes(deriv_4d_data):
"""Test for failure of 3D gradient with 2D data and no axes parameter."""
test = deriv_4d_data[0, 0]
with pytest.raises(ValueError) as exc:
gradient(test, deltas=(1, 1, 1))
assert 'must match the number of dimensions' in str(exc.value)
def test_2d_gradient_4d_data_2_axes_3_deltas(deriv_4d_data):
"""Test 2D gradient of 4D data with 2 axes and 3 deltas."""
res = gradient(deriv_4d_data, deltas=(1, 1, 1), axes=(-2, -1))
truth = tuple(factor * np.ones_like(deriv_4d_data) for factor in (4., 1.))
assert_array_almost_equal(res, truth, 8)
def test_2d_gradient_4d_data_2_axes_2_deltas(deriv_4d_data):
"""Test 2D gradient of 4D data with 2 axes and 2 deltas."""
res = gradient(deriv_4d_data, deltas=(1, 1), axes=(0, 1))
truth = tuple(factor * np.ones_like(deriv_4d_data) for factor in (48., 16.))
assert_array_almost_equal(res, truth, 8)
def test_2d_gradient_4d_data_2_axes_1_deltas(deriv_4d_data):
"""Test for failure of 2D gradient of 4D data with 2 axes and 1 deltas."""
with pytest.raises(ValueError) as exc:
gradient(deriv_4d_data, deltas=(1, ), axes=(1, 2))
assert 'cannot be less than that of "axes"' in str(exc.value)
@pytest.fixture()
def test_da_lonlat():
"""Return a DataArray with a lon/lat grid and no time coordinate for use in tests."""
data = np.linspace(300, 250, 3 * 4 * 4).reshape((3, 4, 4))
ds = xr.Dataset(
{'temperature': (['isobaric', 'lat', 'lon'], data)},
coords={
'isobaric': xr.DataArray(
np.array([850., 700., 500.]),
name='isobaric',
dims=['isobaric'],
attrs={'units': 'hPa'}
),
'lat': xr.DataArray(
np.linspace(30, 40, 4),
name='lat',
dims=['lat'],
attrs={'units': 'degrees_north'}
),
'lon': xr.DataArray(
np.linspace(260, 270, 4),
name='lon',
dims=['lon'],
attrs={'units': 'degrees_east'}
)
}
)
ds['temperature'].attrs['units'] = 'kelvin'
return ds.metpy.parse_cf('temperature')
@pytest.fixture()
def test_da_xy():
"""Return a DataArray with a x/y grid and a time coordinate for use in tests."""
data = np.linspace(300, 250, 3 * 3 * 4 * 4).reshape((3, 3, 4, 4))
ds = xr.Dataset(
{'temperature': (['time', 'isobaric', 'y', 'x'], data),
'lambert_conformal': ([], '')},
coords={
'time': xr.DataArray(
np.array([np.datetime64('2018-07-01T00:00'),
np.datetime64('2018-07-01T06:00'),
np.datetime64('2018-07-01T12:00')]),
name='time',
dims=['time']
),
'isobaric': xr.DataArray(
np.array([850., 700., 500.]),
name='isobaric',
dims=['isobaric'],
attrs={'units': 'hPa'}
),
'y': xr.DataArray(
np.linspace(-1000, 500, 4),
name='y',
dims=['y'],
attrs={'units': 'km'}
),
'x': xr.DataArray(
np.linspace(0, 1500, 4),
name='x',
dims=['x'],
attrs={'units': 'km'}
)
}
)
ds['temperature'].attrs = {
'units': 'kelvin',
'grid_mapping': 'lambert_conformal'
}
ds['lambert_conformal'].attrs = {
'grid_mapping_name': 'lambert_conformal_conic',
'standard_parallel': 50.0,
'longitude_of_central_meridian': -107.0,
'latitude_of_projection_origin': 50.0,
'earth_shape': 'spherical',
'earth_radius': 6367470.21484375
}
return ds.metpy.parse_cf('temperature')
def test_grid_deltas_from_dataarray_lonlat(test_da_lonlat):
"""Test grid_deltas_from_dataarray with a lonlat grid."""
dx, dy = grid_deltas_from_dataarray(test_da_lonlat)
true_dx = np.array([[[321609.59212064, 321609.59212065, 321609.59212064],
[310320.85961483, 310320.85961483, 310320.85961483],
[297980.72966733, 297980.72966733, 297980.72966733],
[284629.6008561, 284629.6008561, 284629.6008561]]]) * units.m
true_dy = np.array([[[369603.78775948, 369603.78775948, 369603.78775948, 369603.78775948],
[369802.28173967, 369802.28173967, 369802.28173967, 369802.28173967],
[370009.56291098, 370009.56291098, 370009.56291098,
370009.56291098]]]) * units.m
assert_array_almost_equal(dx, true_dx, 5)
assert_array_almost_equal(dy, true_dy, 5)
def test_grid_deltas_from_dataarray_xy(test_da_xy):
"""Test grid_deltas_from_dataarray with a xy grid."""
dx, dy = grid_deltas_from_dataarray(test_da_xy)
true_dx = np.array([[[[500] * 3]]]) * units('km')
true_dy = np.array([[[[500]] * 3]]) * units('km')
assert_array_almost_equal(dx, true_dx, 5)
assert_array_almost_equal(dy, true_dy, 5)
def test_grid_deltas_from_dataarray_actual_xy(test_da_xy):
"""Test grid_deltas_from_dataarray with a xy grid and kind='actual'."""
# Construct lon/lat coordinates
y, x = xr.broadcast(*test_da_xy.metpy.coordinates('y', 'x'))
lonlat = (ccrs.Geodetic(test_da_xy.metpy.cartopy_globe)
.transform_points(test_da_xy.metpy.cartopy_crs, x.values, y.values))
lon = lonlat[..., 0]
lat = lonlat[..., 1]
test_da_xy = test_da_xy.assign_coords(
longitude=xr.DataArray(lon, dims=('y', 'x'), attrs={'units': 'degrees_east'}),
latitude=xr.DataArray(lat, dims=('y', 'x'), attrs={'units': 'degrees_north'}))
# Actually test calculation
dx, dy = grid_deltas_from_dataarray(test_da_xy, kind='actual')
true_dx = [[[[494426.3249766, 493977.6028005, 493044.0656467],
[498740.2046073, 498474.9771064, 497891.6588559],
[500276.2649627, 500256.3440237, 500139.9484845],
[498740.6956936, 499045.0391707, 499542.7244501]]]] * units.m
true_dy = [[[[496862.4106337, 496685.4729999, 496132.0732114, 495137.8882404],
[499774.9676486, 499706.3354977, 499467.5546773, 498965.2587818],
[499750.8962991, 499826.2263137, 500004.4977747, 500150.9897759]]]] * units.m
assert_array_almost_equal(dx, true_dx, 3)
assert_array_almost_equal(dy, true_dy, 3)
def test_grid_deltas_from_dataarray_nominal_lonlat(test_da_lonlat):
"""Test grid_deltas_from_dataarray with a lonlat grid and kind='nominal'."""
dx, dy = grid_deltas_from_dataarray(test_da_lonlat, kind='nominal')
true_dx = [[[3.333333] * 3]] * units.degrees
true_dy = [[[3.333333]] * 3] * units.degrees
assert_array_almost_equal(dx, true_dx, 5)
assert_array_almost_equal(dy, true_dy, 5)
def test_grid_deltas_from_dataarray_lonlat_assumed_order():
"""Test grid_deltas_from_dataarray when dim order must be assumed."""
# Create test dataarray
lat, lon = np.meshgrid(np.array([38., 40., 42]), np.array([263., 265., 267.]))
test_da = xr.DataArray(
np.linspace(300, 250, 3 * 3).reshape((3, 3)),
name='temperature',
dims=('dim_0', 'dim_1'),
coords={
'lat': xr.DataArray(lat, dims=('dim_0', 'dim_1'),
attrs={'units': 'degrees_north'}),
'lon': xr.DataArray(lon, dims=('dim_0', 'dim_1'), attrs={'units': 'degrees_east'})
},
attrs={'units': 'K'}).to_dataset().metpy.parse_cf('temperature')
# Run and check for warning
with pytest.warns(UserWarning, match=r'y and x dimensions unable to be identified.*'):
dx, dy = grid_deltas_from_dataarray(test_da)
# Check results
true_dx = [[222031.0111961, 222107.8492205],
[222031.0111961, 222107.8492205],
[222031.0111961, 222107.8492205]] * units.m
true_dy = [[175661.5413976, 170784.1311091, 165697.7563223],
[175661.5413976, 170784.1311091, 165697.7563223]] * units.m
assert_array_almost_equal(dx, true_dx, 5)
assert_array_almost_equal(dy, true_dy, 5)
def test_grid_deltas_from_dataarray_invalid_kind(test_da_xy):
"""Test grid_deltas_from_dataarray when kind is invalid."""
with pytest.raises(ValueError):
grid_deltas_from_dataarray(test_da_xy, kind='invalid')
def test_first_derivative_xarray_lonlat(test_da_lonlat):
"""Test first derivative with an xarray.DataArray on a lonlat grid in each axis usage."""
deriv = first_derivative(test_da_lonlat, axis='lon') # dimension coordinate name
deriv_alt1 = first_derivative(test_da_lonlat, axis='x') # axis type
deriv_alt2 = first_derivative(test_da_lonlat, axis=-1) # axis number
# Build the xarray of the desired values
partial = xr.DataArray(
np.array([-3.30782978e-06, -3.42816074e-06, -3.57012948e-06, -3.73759364e-06]),
coords=(('lat', test_da_lonlat['lat']),)
)
_, truth = xr.broadcast(test_da_lonlat, partial)
truth.coords['crs'] = test_da_lonlat['crs']
truth.attrs['units'] = 'kelvin / meter'
# Assert result matches expectation
xr.testing.assert_allclose(deriv, truth)
assert deriv.metpy.units == truth.metpy.units
# Assert alternative specifications give same result
xr.testing.assert_identical(deriv_alt1, deriv)
xr.testing.assert_identical(deriv_alt2, deriv)
def test_first_derivative_xarray_time_and_default_axis(test_da_xy):
"""Test first derivative with an xarray.DataArray over time as default first dimension."""
deriv = first_derivative(test_da_xy)
truth = xr.full_like(test_da_xy, -0.000777000777)
truth.attrs['units'] = 'kelvin / second'
xr.testing.assert_allclose(deriv, truth)
assert deriv.metpy.units == truth.metpy.units
def test_first_derivative_xarray_time_subsecond_precision():
"""Test time derivative with an xarray.DataArray where subsecond precision is needed."""
test_da = xr.DataArray([299.5, 300, 300.5],
dims='time',
coords={'time': np.array(['2019-01-01T00:00:00.0',
'2019-01-01T00:00:00.1',
'2019-01-01T00:00:00.2'],
dtype='datetime64[ms]')},
attrs={'units': 'kelvin'})
deriv = first_derivative(test_da)
truth = xr.full_like(test_da, 5.)
truth.attrs['units'] = 'kelvin / second'
xr.testing.assert_allclose(deriv, truth)
assert deriv.metpy.units == truth.metpy.units
def test_second_derivative_xarray_lonlat(test_da_lonlat):
"""Test second derivative with an xarray.DataArray on a lonlat grid."""
deriv = second_derivative(test_da_lonlat, axis='lat')
# Build the xarray of the desired values
partial = xr.DataArray(
np.array([1.67155420e-14, 1.67155420e-14, 1.74268211e-14, 1.74268211e-14]),
coords=(('lat', test_da_lonlat['lat']),)
)
_, truth = xr.broadcast(test_da_lonlat, partial)
truth.coords['crs'] = test_da_lonlat['crs']
truth.attrs['units'] = 'kelvin / meter^2'
xr.testing.assert_allclose(deriv, truth)
assert deriv.metpy.units == truth.metpy.units
def test_gradient_xarray(test_da_xy):
"""Test the 3D gradient calculation with a 4D DataArray in each axis usage."""
deriv_x, deriv_y, deriv_p = gradient(test_da_xy, axes=('x', 'y', 'isobaric'))
deriv_x_alt1, deriv_y_alt1, deriv_p_alt1 = gradient(test_da_xy,
axes=('x', 'y', 'vertical'))
deriv_x_alt2, deriv_y_alt2, deriv_p_alt2 = gradient(test_da_xy, axes=(3, 2, 1))
truth_x = xr.full_like(test_da_xy, -6.993007e-07)
truth_x.attrs['units'] = 'kelvin / meter'
truth_y = xr.full_like(test_da_xy, -2.797203e-06)
truth_y.attrs['units'] = 'kelvin / meter'
partial = xr.DataArray(
np.array([0.04129204, 0.03330003, 0.02264402]),
coords=(('isobaric', test_da_xy['isobaric']),)
)
_, truth_p = xr.broadcast(test_da_xy, partial)
truth_p.coords['crs'] = test_da_xy['crs']
truth_p.attrs['units'] = 'kelvin / hectopascal'
# Assert results match expectations
xr.testing.assert_allclose(deriv_x, truth_x)
assert deriv_x.metpy.units == truth_x.metpy.units
xr.testing.assert_allclose(deriv_y, truth_y)
assert deriv_y.metpy.units == truth_y.metpy.units
xr.testing.assert_allclose(deriv_p, truth_p)
assert deriv_p.metpy.units == truth_p.metpy.units
# Assert alternative specifications give same results (up to attribute differences)
xr.testing.assert_equal(deriv_x_alt1, deriv_x)
xr.testing.assert_equal(deriv_y_alt1, deriv_y)
xr.testing.assert_equal(deriv_p_alt1, deriv_p)
xr.testing.assert_equal(deriv_x_alt2, deriv_x)
xr.testing.assert_equal(deriv_y_alt2, deriv_y)
xr.testing.assert_equal(deriv_p_alt2, deriv_p)
def test_gradient_xarray_implicit_axes(test_da_xy):
"""Test the 2D gradient calculation with a 2D DataArray and no axes specified."""
data = test_da_xy.isel(time=0, isobaric=2)
deriv_y, deriv_x = gradient(data)
truth_x = xr.full_like(data, -6.993007e-07)
truth_x.attrs['units'] = 'kelvin / meter'
truth_y = xr.full_like(data, -2.797203e-06)
truth_y.attrs['units'] = 'kelvin / meter'
xr.testing.assert_allclose(deriv_x, truth_x)
assert deriv_x.metpy.units == truth_x.metpy.units
xr.testing.assert_allclose(deriv_y, truth_y)
assert deriv_y.metpy.units == truth_y.metpy.units
def test_gradient_xarray_implicit_axes_transposed(test_da_lonlat):
"""Test the 2D gradient with no axes specified but in x/y order."""
test_da = test_da_lonlat.isel(isobaric=0).transpose('lon', 'lat')
deriv_x, deriv_y = gradient(test_da)
truth_x = xr.DataArray(
np.array([[-3.30782978e-06, -3.42816074e-06, -3.57012948e-06, -3.73759364e-06],
[-3.30782978e-06, -3.42816074e-06, -3.57012948e-06, -3.73759364e-06],
[-3.30782978e-06, -3.42816074e-06, -3.57012948e-06, -3.73759364e-06],
[-3.30782978e-06, -3.42816074e-06, -3.57012948e-06, -3.73759364e-06]]),
dims=test_da.dims,
coords=test_da.coords,
attrs={'units': 'kelvin / meter'})
truth_y = xr.DataArray(
np.array([[-1.15162805e-05, -1.15101023e-05, -1.15037894e-05, -1.14973413e-05],
[-1.15162805e-05, -1.15101023e-05, -1.15037894e-05, -1.14973413e-05],
[-1.15162805e-05, -1.15101023e-05, -1.15037894e-05, -1.14973413e-05],
[-1.15162805e-05, -1.15101023e-05, -1.15037894e-05, -1.14973413e-05]]),
dims=test_da.dims,
coords=test_da.coords,
attrs={'units': 'kelvin / meter'})
xr.testing.assert_allclose(deriv_x, truth_x)
assert deriv_x.metpy.units == truth_x.metpy.units
xr.testing.assert_allclose(deriv_y, truth_y)
assert deriv_y.metpy.units == truth_y.metpy.units
def test_laplacian_xarray_lonlat(test_da_lonlat):
"""Test laplacian with an xarray.DataArray on a lonlat grid."""
laplac = laplacian(test_da_lonlat, axes=('lat', 'lon'))
# Build the xarray of the desired values
partial = xr.DataArray(
np.array([1.67155420e-14, 1.67155420e-14, 1.74268211e-14, 1.74268211e-14]),
coords=(('lat', test_da_lonlat['lat']),)
)
_, truth = xr.broadcast(test_da_lonlat, partial)
truth.coords['crs'] = test_da_lonlat['crs']
truth.attrs['units'] = 'kelvin / meter^2'
xr.testing.assert_allclose(laplac, truth)
assert laplac.metpy.units == truth.metpy.units
def test_first_derivative_xarray_pint_conversion(test_da_lonlat):
"""Test first derivative with implicit xarray to pint quantity conversion."""
dx, _ = grid_deltas_from_dataarray(test_da_lonlat)
deriv = first_derivative(test_da_lonlat, delta=dx, axis=-1)
truth = np.array([[[-3.30782978e-06] * 4, [-3.42816074e-06] * 4, [-3.57012948e-06] * 4,
[-3.73759364e-06] * 4]] * 3) * units('kelvin / meter')
assert_array_almost_equal(deriv, truth, 12)
def test_gradient_xarray_pint_conversion(test_da_xy):
"""Test the 2D gradient calculation with a 2D DataArray and implicit pint conversion."""
data = test_da_xy.isel(time=0, isobaric=2)
deriv_y, deriv_x = gradient(data, coordinates=(data.metpy.y, data.metpy.x))
truth_x = np.ones_like(data) * -6.993007e-07 * units('kelvin / meter')
truth_y = np.ones_like(data) * -2.797203e-06 * units('kelvin / meter')
assert_array_almost_equal(deriv_x, truth_x, 12)
assert_array_almost_equal(deriv_y, truth_y, 12)
def test_remove_nans():
"""Test removal of NaNs."""
x = np.array([3, 2, np.nan, 5, 6, np.nan])
y = np.arange(0, len(x))
y_test, x_test = _remove_nans(y, x)
x_expected = np.array([3, 2, 5, 6])
y_expected = np.array([0, 1, 3, 4])
assert_array_almost_equal(x_expected, x_test, 0)
assert_almost_equal(y_expected, y_test, 0)
@pytest.mark.parametrize('test, other, match_unit, expected', [
(np.arange(4), np.arange(4), False, np.arange(4) * units('dimensionless')),
(np.arange(4), np.arange(4), True, np.arange(4) * units('dimensionless')),
(np.arange(4), [0] * units.m, False, np.arange(4) * units('dimensionless')),
(np.arange(4), [0] * units.m, True, np.arange(4) * units.m),
(
np.arange(4),
xr.DataArray(
np.zeros(4),
dims=('x',),
coords={'x': np.linspace(0, 1, 4)},
attrs={'units': 'meter', 'description': 'Just some zeros'}
),
False,
xr.DataArray(
np.arange(4),
dims=('x',),
coords={'x': np.linspace(0, 1, 4)},
attrs={'units': ''}
)
),
(
np.arange(4),
xr.DataArray(
np.zeros(4),
dims=('x',),
coords={'x': np.linspace(0, 1, 4)},
attrs={'units': 'meter', 'description': 'Just some zeros'}
),
True,
xr.DataArray(
np.arange(4),
dims=('x',),
coords={'x': np.linspace(0, 1, 4)},
attrs={'units': 'meter'}
)
),
([2, 4, 8] * units.kg, [0] * units.m, False, [2, 4, 8] * units.kg),
([2, 4, 8] * units.kg, [0] * units.g, True, [2000, 4000, 8000] * units.g),
(
[2, 4, 8] * units.kg,
xr.DataArray(
np.zeros(3),
dims=('x',),
coords={'x': np.linspace(0, 1, 3)},
attrs={'units': 'meter'}
),
False,
xr.DataArray(
[2, 4, 8],
dims=('x',),
coords={'x': np.linspace(0, 1, 3)},
attrs={'units': 'kilogram'}
)
),
(
[2, 4, 8] * units.kg,
xr.DataArray(
np.zeros(3),
dims=('x',),
coords={'x': np.linspace(0, 1, 3)},
attrs={'units': 'gram'}
),
True,
xr.DataArray(
[2000, 4000, 8000],
dims=('x',),
coords={'x': np.linspace(0, 1, 3)},
attrs={'units': 'gram'}
)
),
(
xr.DataArray(
np.linspace(0, 1, 5),
attrs={'units': 'meter', 'description': 'A range of values'}
),
np.arange(4, dtype=np.float64),
False,
units.Quantity(np.linspace(0, 1, 5), 'meter')
),
(
xr.DataArray(
np.linspace(0, 1, 5),
attrs={'units': 'meter', 'description': 'A range of values'}
),
[0] * units.kg,
False,
np.linspace(0, 1, 5) * units.m
),
(
xr.DataArray(
np.linspace(0, 1, 5),
attrs={'units': 'meter', 'description': 'A range of values'}
),
[0] * units.cm,
True,
np.linspace(0, 100, 5) * units.cm
),
(
xr.DataArray(
np.linspace(0, 1, 5),
attrs={'units': 'meter', 'description': 'A range of values'}
),
xr.DataArray(
np.zeros(3),
dims=('x',),
coords={'x': np.linspace(0, 1, 3)},
attrs={'units': 'kilogram', 'description': 'Alternative data'}
),
False,
xr.DataArray(
np.linspace(0, 1, 5),
attrs={'units': 'meter', 'description': 'A range of values'}
)
),
(
xr.DataArray(
np.linspace(0, 1, 5),
attrs={'units': 'meter', 'description': 'A range of values'}
),
xr.DataArray(
np.zeros(3),
dims=('x',),
coords={'x': np.linspace(0, 1, 3)},
attrs={'units': 'centimeter', 'description': 'Alternative data'}
),
True,
xr.DataArray(
np.linspace(0, 100, 5),
attrs={'units': 'centimeter', 'description': 'A range of values'}
)
),
])
def test_wrap_output_like_with_other_kwarg(test, other, match_unit, expected):
"""Test the wrap output like decorator when using the output kwarg."""
@wrap_output_like(other=other, match_unit=match_unit)
def almost_identity(arg):
return arg
result = almost_identity(test)
if hasattr(expected, 'units'):
assert expected.units == result.units
if isinstance(expected, xr.DataArray):
xr.testing.assert_identical(result, expected)
else:
assert_array_equal(result, expected)
@pytest.mark.parametrize('test, other', [
([2, 4, 8] * units.kg, [0] * units.m),
(
[2, 4, 8] * units.kg,
xr.DataArray(
np.zeros(3),
dims=('x',),
coords={'x': np.linspace(0, 1, 3)},
attrs={'units': 'meter'}
)
),
(
xr.DataArray(
np.linspace(0, 1, 5),
attrs={'units': 'meter'}
),
[0] * units.kg
),
(
xr.DataArray(
np.linspace(0, 1, 5),
attrs={'units': 'meter'}
),
xr.DataArray(
np.zeros(3),
dims=('x',),
coords={'x': np.linspace(0, 1, 3)},
attrs={'units': 'kilogram'}
)
),
(
xr.DataArray(
np.linspace(0, 1, 5),
attrs={'units': 'meter', 'description': 'A range of values'}
),
np.arange(4, dtype=np.float64)
)
])
def test_wrap_output_like_with_other_kwarg_raising_dimensionality_error(test, other):
"""Test the wrap output like decorator when when a dimensionality error is raised."""
@wrap_output_like(other=other, match_unit=True)
def almost_identity(arg):
return arg
with pytest.raises(DimensionalityError):
almost_identity(test)
def test_wrap_output_like_with_argument_kwarg():
"""Test the wrap output like decorator with signature recognition."""
@wrap_output_like(argument='a')
def double(a):
return units.Quantity(2) * a.metpy.unit_array
test = xr.DataArray([1, 3, 5, 7], attrs={'units': 'm'})
expected = xr.DataArray([2, 6, 10, 14], attrs={'units': 'meter'})
xr.testing.assert_identical(double(test), expected)
def test_wrap_output_like_without_control_kwarg():
"""Test that the wrap output like decorator fails when not provided a control param."""
@wrap_output_like()
def func(arg):
"""Do nothing."""
with pytest.raises(ValueError) as exc:
func(0)
assert 'Must specify keyword' in str(exc)
def test_regular_to_rotated():
result = regular_to_rotated(0, 0, 0, 0)
expected = (0.0, -90.0)
assert_array_almost_equal(expected, result, 2)
def test_rotated_to_regular():
result = regular_to_rotated(0, 0, 0, 90.)
expected = (0.0, 0.0)
assert_array_almost_equal(expected, result, 2)
def test_regular_to_rotated_with_array():
lats = np.array([0, 0])
lons = np.array([0, 0])
result = regular_to_rotated(0, 0, lons, lats)
expected = np.array([[0,0],[-90,-90]])
assert_array_almost_equal(expected, result, 2)
def test_rotated_to_regular_with_array():
lats = np.array([0, 0])
lons = np.array([0, 0])
result = regular_to_rotated(0, 90, lons, lats)
expected = np.array([[180,180],[0,0]])
assert_array_almost_equal(expected, result, 2)
| 40.932584
| 94
| 0.617623
|
6e4035d43b78f892e0a8a060903de554dd822cfa
| 4,025
|
py
|
Python
|
tools/fuchsia/fidl/gen_response_file.py
|
beare/engine
|
35ee8bde43a8effdcb8cec1120855c7f97e9da9c
|
[
"BSD-3-Clause"
] | 4
|
2018-12-31T14:20:26.000Z
|
2019-08-06T10:44:39.000Z
|
tools/fuchsia/fidl/gen_response_file.py
|
beare/engine
|
35ee8bde43a8effdcb8cec1120855c7f97e9da9c
|
[
"BSD-3-Clause"
] | 2
|
2020-03-20T07:27:42.000Z
|
2021-01-05T07:09:27.000Z
|
tools/fuchsia/fidl/gen_response_file.py
|
beare/engine
|
35ee8bde43a8effdcb8cec1120855c7f97e9da9c
|
[
"BSD-3-Clause"
] | 4
|
2020-09-18T10:09:17.000Z
|
2020-11-27T05:42:32.000Z
|
#!/usr/bin/env python3.8
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import string
import sys
def read_libraries(libraries_path):
with open(libraries_path) as f:
lines = f.readlines()
return [l.rstrip("\n") for l in lines]
def write_libraries(libraries_path, libraries):
directory = os.path.dirname(libraries_path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(libraries_path, "w+") as f:
for library in libraries:
f.write(library)
f.write("\n")
def main():
parser = argparse.ArgumentParser(
description="Generate response file for FIDL frontend")
parser.add_argument(
"--out-response-file",
help="The path for for the response file to generate",
required=True)
parser.add_argument(
"--out-libraries",
help="The path for for the libraries file to generate",
required=True)
parser.add_argument(
"--json", help="The path for the JSON file to generate, if any")
parser.add_argument(
"--tables", help="The path for the tables file to generate, if any")
parser.add_argument(
"--deprecated-fuchsia-only-c-client",
help="The path for the C simple client file to generate, if any")
parser.add_argument(
"--deprecated-fuchsia-only-c-header",
help="The path for the C header file to generate, if any")
parser.add_argument(
"--deprecated-fuchsia-only-c-server",
help="The path for the C simple server file to generate, if any")
parser.add_argument(
"--name", help="The name for the generated FIDL library, if any")
parser.add_argument(
"--depfile", help="The name for the generated depfile, if any")
parser.add_argument(
"--sources", help="List of FIDL source files", nargs="*")
parser.add_argument(
"--dep-libraries", help="List of dependent libraries", nargs="*")
parser.add_argument(
"--experimental-flag",
help="List of experimental flags",
action="append")
args = parser.parse_args()
target_libraries = []
for dep_libraries_path in args.dep_libraries or []:
dep_libraries = read_libraries(dep_libraries_path)
for library in dep_libraries:
if library in target_libraries:
continue
target_libraries.append(library)
target_libraries.append(" ".join(sorted(args.sources)))
write_libraries(args.out_libraries, target_libraries)
response_file = []
response_file.append('--experimental allow_new_syntax')
if args.json:
response_file.append("--json %s" % args.json)
if args.tables:
response_file.append("--tables %s" % args.tables)
if args.deprecated_fuchsia_only_c_client:
response_file.append(
"--deprecated-fuchsia-only-c-client %s" %
args.deprecated_fuchsia_only_c_client)
if args.deprecated_fuchsia_only_c_header:
response_file.append(
"--deprecated-fuchsia-only-c-header %s" %
args.deprecated_fuchsia_only_c_header)
if args.deprecated_fuchsia_only_c_server:
response_file.append(
"--deprecated-fuchsia-only-c-server %s" %
args.deprecated_fuchsia_only_c_server)
if args.name:
response_file.append("--name %s" % args.name)
if args.depfile:
response_file.append("--depfile %s" % args.depfile)
if args.experimental_flag:
for experimental_flag in args.experimental_flag:
response_file.append("--experimental %s" % experimental_flag)
response_file.extend(
["--files %s" % library for library in target_libraries])
with open(args.out_response_file, "w+") as f:
f.write(" ".join(response_file))
f.write("\n")
if __name__ == "__main__":
sys.exit(main())
| 32.723577
| 76
| 0.652422
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.